Merge pull request #18766 from mikedanese/gcplogs
Add logging driver for Google Cloud Logging
This commit is contained in:
commit
3c4d093bab
111 changed files with 26604 additions and 4 deletions
|
@ -397,6 +397,7 @@ __docker_complete_log_drivers() {
|
|||
awslogs
|
||||
etwlogs
|
||||
fluentd
|
||||
gcplogs
|
||||
gelf
|
||||
journald
|
||||
json-file
|
||||
|
@ -410,13 +411,14 @@ __docker_complete_log_options() {
|
|||
# see docs/reference/logging/index.md
|
||||
local awslogs_options="awslogs-region awslogs-group awslogs-stream"
|
||||
local fluentd_options="env fluentd-address labels tag"
|
||||
local gcplogs_options="env gcp-log-cmd gcp-project labels"
|
||||
local gelf_options="env gelf-address labels tag"
|
||||
local journald_options="env labels tag"
|
||||
local json_file_options="env labels max-file max-size"
|
||||
local syslog_options="syslog-address syslog-tls-ca-cert syslog-tls-cert syslog-tls-key syslog-tls-skip-verify syslog-facility tag"
|
||||
local splunk_options="env labels splunk-caname splunk-capath splunk-index splunk-insecureskipverify splunk-source splunk-sourcetype splunk-token splunk-url tag"
|
||||
|
||||
local all_options="$fluentd_options $gelf_options $journald_options $json_file_options $syslog_options $splunk_options"
|
||||
local all_options="$fluentd_options $gcplogs_options $gelf_options $journald_options $json_file_options $syslog_options $splunk_options"
|
||||
|
||||
case $(__docker_value_of_option --log-driver) in
|
||||
'')
|
||||
|
@ -428,6 +430,9 @@ __docker_complete_log_options() {
|
|||
fluentd)
|
||||
COMPREPLY=( $( compgen -W "$fluentd_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
gcplogs)
|
||||
COMPREPLY=( $( compgen -W "$gcplogs_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
gelf)
|
||||
COMPREPLY=( $( compgen -W "$gelf_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
|
|
|
@ -201,6 +201,7 @@ __docker_get_log_options() {
|
|||
|
||||
awslogs_options=("awslogs-region" "awslogs-group" "awslogs-stream")
|
||||
fluentd_options=("env" "fluentd-address" "labels" "tag")
|
||||
gcplogs_options=("env" "gcp-log-cmd" "gcp-project" "labels")
|
||||
gelf_options=("env" "gelf-address" "labels" "tag")
|
||||
journald_options=("env" "labels")
|
||||
json_file_options=("env" "labels" "max-file" "max-size")
|
||||
|
@ -209,6 +210,7 @@ __docker_get_log_options() {
|
|||
|
||||
[[ $log_driver = (awslogs|all) ]] && _describe -t awslogs-options "awslogs options" awslogs_options "$@" && ret=0
|
||||
[[ $log_driver = (fluentd|all) ]] && _describe -t fluentd-options "fluentd options" fluentd_options "$@" && ret=0
|
||||
[[ $log_driver = (gcplogs|all) ]] && _describe -t gcplogs-options "gcplogs options" gcplogs_options "$@" && ret=0
|
||||
[[ $log_driver = (gelf|all) ]] && _describe -t gelf-options "gelf options" gelf_options "$@" && ret=0
|
||||
[[ $log_driver = (journald|all) ]] && _describe -t journald-options "journald options" journald_options "$@" && ret=0
|
||||
[[ $log_driver = (json-file|all) ]] && _describe -t json-file-options "json-file options" json_file_options "$@" && ret=0
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
// therefore they register themselves to the logdriver factory.
|
||||
_ "github.com/docker/docker/daemon/logger/awslogs"
|
||||
_ "github.com/docker/docker/daemon/logger/fluentd"
|
||||
_ "github.com/docker/docker/daemon/logger/gcplogs"
|
||||
_ "github.com/docker/docker/daemon/logger/gelf"
|
||||
_ "github.com/docker/docker/daemon/logger/journald"
|
||||
_ "github.com/docker/docker/daemon/logger/jsonfilelog"
|
||||
|
|
181
daemon/logger/gcplogs/gcplogging.go
Normal file
181
daemon/logger/gcplogs/gcplogging.go
Normal file
|
@ -0,0 +1,181 @@
|
|||
package gcplogs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/daemon/logger"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/cloud/compute/metadata"
|
||||
"google.golang.org/cloud/logging"
|
||||
)
|
||||
|
||||
const (
|
||||
name = "gcplogs"
|
||||
|
||||
projectOptKey = "gcp-project"
|
||||
logLabelsKey = "labels"
|
||||
logEnvKey = "env"
|
||||
logCmdKey = "gcp-log-cmd"
|
||||
)
|
||||
|
||||
var (
|
||||
// The number of logs the gcplogs driver has dropped.
|
||||
droppedLogs uint64
|
||||
|
||||
onGCE = metadata.OnGCE()
|
||||
|
||||
// instance metadata populated from the metadata server if available
|
||||
projectID string
|
||||
zone string
|
||||
instanceName string
|
||||
instanceID string
|
||||
)
|
||||
|
||||
func init() {
|
||||
if onGCE {
|
||||
// These will fail on instances if the metadata service is
|
||||
// down or the client is compiled with an API version that
|
||||
// has been removed. Since these are not vital, let's ignore
|
||||
// them and make their fields in the dockeLogEntry ,omitempty
|
||||
projectID, _ = metadata.ProjectID()
|
||||
zone, _ = metadata.Zone()
|
||||
instanceName, _ = metadata.InstanceName()
|
||||
instanceID, _ = metadata.InstanceID()
|
||||
}
|
||||
|
||||
if err := logger.RegisterLogDriver(name, New); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if err := logger.RegisterLogOptValidator(name, ValidateLogOpts); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
type gcplogs struct {
|
||||
client *logging.Client
|
||||
instance *instanceInfo
|
||||
container *containerInfo
|
||||
}
|
||||
|
||||
type dockerLogEntry struct {
|
||||
Instance *instanceInfo `json:"instance,omitempty"`
|
||||
Container *containerInfo `json:"container,omitempty"`
|
||||
Data string `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
type instanceInfo struct {
|
||||
Zone string `json:"zone,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
}
|
||||
|
||||
type containerInfo struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
ImageName string `json:"imageName,omitempty"`
|
||||
ImageID string `json:"imageId,omitempty"`
|
||||
Created time.Time `json:"created,omitempty"`
|
||||
Command string `json:"command,omitempty"`
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
// New creates a new logger that logs to Google Cloud Logging using the application
|
||||
// default credentials.
|
||||
//
|
||||
// See https://developers.google.com/identity/protocols/application-default-credentials
|
||||
func New(ctx logger.Context) (logger.Logger, error) {
|
||||
|
||||
var project string
|
||||
if projectID != "" {
|
||||
project = projectID
|
||||
}
|
||||
if projectID, found := ctx.Config[projectOptKey]; found {
|
||||
project = projectID
|
||||
}
|
||||
if project == "" {
|
||||
return nil, fmt.Errorf("No project was specified and couldn't read project from the meatadata server. Please specify a project")
|
||||
}
|
||||
|
||||
c, err := logging.NewClient(context.Background(), project, "gcplogs-docker-driver")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := c.Ping(); err != nil {
|
||||
return nil, fmt.Errorf("unable to connect or authenticate with Google Cloud Logging: %v", err)
|
||||
}
|
||||
|
||||
l := &gcplogs{
|
||||
client: c,
|
||||
container: &containerInfo{
|
||||
Name: ctx.ContainerName,
|
||||
ID: ctx.ContainerID,
|
||||
ImageName: ctx.ContainerImageName,
|
||||
ImageID: ctx.ContainerImageID,
|
||||
Created: ctx.ContainerCreated,
|
||||
Metadata: ctx.ExtraAttributes(nil),
|
||||
},
|
||||
}
|
||||
|
||||
if ctx.Config[logCmdKey] == "true" {
|
||||
l.container.Command = ctx.Command()
|
||||
}
|
||||
|
||||
if onGCE {
|
||||
l.instance = &instanceInfo{
|
||||
Zone: zone,
|
||||
Name: instanceName,
|
||||
ID: instanceID,
|
||||
}
|
||||
}
|
||||
|
||||
// The logger "overflows" at a rate of 10,000 logs per second and this
|
||||
// overflow func is called. We want to surface the error to the user
|
||||
// without overly spamming /var/log/docker.log so we log the first time
|
||||
// we overflow and every 1000th time after.
|
||||
c.Overflow = func(_ *logging.Client, _ logging.Entry) error {
|
||||
if i := atomic.AddUint64(&droppedLogs, 1); i%1000 == 1 {
|
||||
logrus.Errorf("gcplogs driver has dropped %v logs", i)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// ValidateLogOpts validates the opts passed to the gcplogs driver. Currently, the gcplogs
|
||||
// driver doesn't take any arguments.
|
||||
func ValidateLogOpts(cfg map[string]string) error {
|
||||
for k := range cfg {
|
||||
switch k {
|
||||
case projectOptKey, logLabelsKey, logEnvKey, logCmdKey:
|
||||
default:
|
||||
return fmt.Errorf("%q is not a valid option for the gcplogs driver", k)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *gcplogs) Log(m *logger.Message) error {
|
||||
return l.client.Log(logging.Entry{
|
||||
Time: m.Timestamp,
|
||||
Payload: &dockerLogEntry{
|
||||
Instance: l.instance,
|
||||
Container: l.container,
|
||||
Data: string(m.Line),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (l *gcplogs) Close() error {
|
||||
return l.client.Flush()
|
||||
}
|
||||
|
||||
func (l *gcplogs) Name() string {
|
||||
return name
|
||||
}
|
70
docs/admin/logging/gcplogs.md
Normal file
70
docs/admin/logging/gcplogs.md
Normal file
|
@ -0,0 +1,70 @@
|
|||
<!--[metadata]>
|
||||
+++
|
||||
title = "Google Cloud Logging driver"
|
||||
description = "Describes how to use the Google Cloud Logging driver."
|
||||
keywords = ["gcplogs, google, docker, logging, driver"]
|
||||
[menu.main]
|
||||
parent = "smn_logging"
|
||||
weight = 2
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# Google Cloud Logging driver
|
||||
|
||||
The Google Cloud Logging driver sends container logs to <a href="https://cloud.google.com/logging/docs/" target="_blank">Google Cloud
|
||||
Logging</a>.
|
||||
|
||||
## Usage
|
||||
|
||||
You can configure the default logging driver by passing the `--log-driver`
|
||||
option to the Docker daemon:
|
||||
|
||||
docker daemon --log-driver=gcplogs
|
||||
|
||||
You can set the logging driver for a specific container by using the
|
||||
`--log-driver` option to `docker run`:
|
||||
|
||||
docker run --log-driver=gcplogs ...
|
||||
|
||||
This log driver does not implement a reader so it is incompatible with
|
||||
`docker logs`.
|
||||
|
||||
If Docker detects that it is running in a Google Cloud Project, it will discover configuration
|
||||
from the <a href="https://cloud.google.com/compute/docs/metadata" target="_blank">instance metadata service</a>.
|
||||
Otherwise, the user must specify which project to log to using the `--gcp-project`
|
||||
log option and Docker will attempt to obtain credentials from the
|
||||
<a href="https://developers.google.com/identity/protocols/application-default-credentials" target="_blank">Google Application Default Credential</a>.
|
||||
The `--gcp-project` takes precedence over information discovered from the metadata server
|
||||
so a Docker daemon running in a Google Cloud Project can be overriden to log to a different
|
||||
Google Cloud Project using `--gcp-project`.
|
||||
|
||||
## gcplogs options
|
||||
|
||||
You can use the `--log-opt NAME=VALUE` flag to specify these additional Google
|
||||
Cloud Logging driver options:
|
||||
|
||||
| Option | Required | Description |
|
||||
|-----------------------------|----------|---------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `gcp-project` | optional | Which GCP project to log to. Defaults to discovering this value from the GCE metadata service. |
|
||||
| `gcp-log-cmd` | optional | Whether to log the command that the container was started with. Defaults to false. |
|
||||
| `labels` | optional | Comma-separated list of keys of labels, which should be included in message, if these labels are specified for container. |
|
||||
| `env` | optional | Comma-separated list of keys of environment variables, which should be included in message, if these variables are specified for container. |
|
||||
|
||||
If there is collision between `label` and `env` keys, the value of the `env`
|
||||
takes precedence. Both options add additional fields to the attributes of a
|
||||
logging message.
|
||||
|
||||
Below is an example of the logging options required to log to the default
|
||||
logging destination which is discovered by querying the GCE metadata server.
|
||||
|
||||
docker run --log-driver=gcplogs \
|
||||
--log-opt labels=location
|
||||
--log-opt env=TEST
|
||||
--log-opt gcp-log-cmd=true
|
||||
--env "TEST=false"
|
||||
--label location=west
|
||||
your/application
|
||||
|
||||
This configuration also directs the driver to include in the payload the label
|
||||
`location`, the environment variable `ENV`, and the command used to start the
|
||||
container.
|
|
@ -27,6 +27,7 @@ container's logging driver. The following options are supported:
|
|||
| `awslogs` | Amazon CloudWatch Logs logging driver for Docker. Writes log messages to Amazon CloudWatch Logs. |
|
||||
| `splunk` | Splunk logging driver for Docker. Writes log messages to `splunk` using HTTP Event Collector. |
|
||||
| `etwlogs` | ETW logging driver for Docker on Windows. Writes log messages as ETW events. |
|
||||
| `gcplogs` | Google Cloud Logging driver for Docker. Writes log messages to Google Cloud Logging. |
|
||||
|
||||
The `docker logs`command is available only for the `json-file` and `journald`
|
||||
logging drivers.
|
||||
|
@ -213,4 +214,14 @@ as an ETW event. An ETW listener can then be created to listen for these events.
|
|||
|
||||
For detailed information on working with this logging driver, see [the ETW logging driver](etwlogs.md) reference documentation.
|
||||
|
||||
## Google Cloud Logging
|
||||
|
||||
The Google Cloud Logging driver supports the following options:
|
||||
|
||||
--log-opt gcp-project=<gcp_projext>
|
||||
--log-opt labels=<label1>,<label2>
|
||||
--log-opt env=<envvar1>,<envvar2>
|
||||
--log-opt log-cmd=true
|
||||
|
||||
For detailed information about working with this logging driver, see the [Google Cloud Logging driver](gcplogs.md).
|
||||
reference documentation.
|
||||
|
|
|
@ -82,4 +82,9 @@ clone git gopkg.in/fsnotify.v1 v1.2.0
|
|||
clone git github.com/aws/aws-sdk-go v0.9.9
|
||||
clone git github.com/vaughan0/go-ini a98ad7ee00ec53921f08832bc06ecf7fd600e6a1
|
||||
|
||||
# gcplogs deps
|
||||
clone git golang.org/x/oauth2 2baa8a1b9338cf13d9eeb27696d761155fa480be https://github.com/golang/oauth2.git
|
||||
clone git google.golang.org/api dc6d2353af16e2a2b0ff6986af051d473a4ed468 https://code.googlesource.com/google-api-go-client
|
||||
clone git google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0 https://code.googlesource.com/gocloud
|
||||
|
||||
clean
|
||||
|
|
|
@ -214,7 +214,7 @@ millions of trillions.
|
|||
Add link to another container in the form of <name or id>:alias or just
|
||||
<name or id> in which case the alias will match the name.
|
||||
|
||||
**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*none*"
|
||||
**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*"
|
||||
Logging driver for container. Default is defined by daemon `--log-driver` flag.
|
||||
**Warning**: the `docker logs` command works only for the `json-file` and
|
||||
`journald` logging drivers.
|
||||
|
|
|
@ -185,7 +185,7 @@ unix://[/path/to/socket] to use.
|
|||
**--label**="[]"
|
||||
Set key=value labels to the daemon (displayed in `docker info`)
|
||||
|
||||
**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*none*"
|
||||
**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*"
|
||||
Default driver for container logs. Default is `json-file`.
|
||||
**Warning**: `docker logs` command works only for `json-file` logging driver.
|
||||
|
||||
|
|
|
@ -320,7 +320,7 @@ container can access the exposed port via a private networking interface. Docker
|
|||
will set some environment variables in the client container to help indicate
|
||||
which interface and port to use.
|
||||
|
||||
**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*none*"
|
||||
**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*"
|
||||
Logging driver for container. Default is defined by daemon `--log-driver` flag.
|
||||
**Warning**: the `docker logs` command works only for the `json-file` and
|
||||
`journald` logging drivers.
|
||||
|
|
18
vendor/src/golang.org/x/net/context/ctxhttp/cancelreq.go
vendored
Normal file
18
vendor/src/golang.org/x/net/context/ctxhttp/cancelreq.go
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.5
|
||||
|
||||
package ctxhttp
|
||||
|
||||
import "net/http"
|
||||
|
||||
func canceler(client *http.Client, req *http.Request) func() {
|
||||
ch := make(chan struct{})
|
||||
req.Cancel = ch
|
||||
|
||||
return func() {
|
||||
close(ch)
|
||||
}
|
||||
}
|
23
vendor/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go
vendored
Normal file
23
vendor/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.5
|
||||
|
||||
package ctxhttp
|
||||
|
||||
import "net/http"
|
||||
|
||||
type requestCanceler interface {
|
||||
CancelRequest(*http.Request)
|
||||
}
|
||||
|
||||
func canceler(client *http.Client, req *http.Request) func() {
|
||||
rc, ok := client.Transport.(requestCanceler)
|
||||
if !ok {
|
||||
return func() {}
|
||||
}
|
||||
return func() {
|
||||
rc.CancelRequest(req)
|
||||
}
|
||||
}
|
79
vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp.go
vendored
Normal file
79
vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp.go
vendored
Normal file
|
@ -0,0 +1,79 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
|
||||
package ctxhttp // import "golang.org/x/net/context/ctxhttp"
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
|
||||
// If the client is nil, http.DefaultClient is used.
|
||||
// If the context is canceled or times out, ctx.Err() will be returned.
|
||||
func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
||||
if client == nil {
|
||||
client = http.DefaultClient
|
||||
}
|
||||
|
||||
// Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go.
|
||||
cancel := canceler(client, req)
|
||||
|
||||
type responseAndError struct {
|
||||
resp *http.Response
|
||||
err error
|
||||
}
|
||||
result := make(chan responseAndError, 1)
|
||||
|
||||
go func() {
|
||||
resp, err := client.Do(req)
|
||||
result <- responseAndError{resp, err}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
cancel()
|
||||
return nil, ctx.Err()
|
||||
case r := <-result:
|
||||
return r.resp, r.err
|
||||
}
|
||||
}
|
||||
|
||||
// Get issues a GET request via the Do function.
|
||||
func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Do(ctx, client, req)
|
||||
}
|
||||
|
||||
// Head issues a HEAD request via the Do function.
|
||||
func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
|
||||
req, err := http.NewRequest("HEAD", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Do(ctx, client, req)
|
||||
}
|
||||
|
||||
// Post issues a POST request via the Do function.
|
||||
func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
|
||||
req, err := http.NewRequest("POST", url, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", bodyType)
|
||||
return Do(ctx, client, req)
|
||||
}
|
||||
|
||||
// PostForm issues a POST request via the Do function.
|
||||
func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
|
||||
return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
|
||||
}
|
2
vendor/src/golang.org/x/net/http2/.gitignore
vendored
Normal file
2
vendor/src/golang.org/x/net/http2/.gitignore
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
*~
|
||||
h2i/h2i
|
44
vendor/src/golang.org/x/net/http2/Dockerfile
vendored
Normal file
44
vendor/src/golang.org/x/net/http2/Dockerfile
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
#
|
||||
# This Dockerfile builds a recent curl with HTTP/2 client support, using
|
||||
# a recent nghttp2 build.
|
||||
#
|
||||
# See the Makefile for how to tag it. If Docker and that image is found, the
|
||||
# Go tests use this curl binary for integration tests.
|
||||
#
|
||||
|
||||
FROM ubuntu:trusty
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get upgrade -y && \
|
||||
apt-get install -y git-core build-essential wget
|
||||
|
||||
RUN apt-get install -y --no-install-recommends \
|
||||
autotools-dev libtool pkg-config zlib1g-dev \
|
||||
libcunit1-dev libssl-dev libxml2-dev libevent-dev \
|
||||
automake autoconf
|
||||
|
||||
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
|
||||
ENV NGHTTP2_VER af24f8394e43f4
|
||||
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
|
||||
|
||||
WORKDIR /root/nghttp2
|
||||
RUN git reset --hard $NGHTTP2_VER
|
||||
RUN autoreconf -i
|
||||
RUN automake
|
||||
RUN autoconf
|
||||
RUN ./configure
|
||||
RUN make
|
||||
RUN make install
|
||||
|
||||
WORKDIR /root
|
||||
RUN wget http://curl.haxx.se/download/curl-7.40.0.tar.gz
|
||||
RUN tar -zxvf curl-7.40.0.tar.gz
|
||||
WORKDIR /root/curl-7.40.0
|
||||
RUN ./configure --with-ssl --with-nghttp2=/usr/local
|
||||
RUN make
|
||||
RUN make install
|
||||
RUN ldconfig
|
||||
|
||||
CMD ["-h"]
|
||||
ENTRYPOINT ["/usr/local/bin/curl"]
|
||||
|
3
vendor/src/golang.org/x/net/http2/Makefile
vendored
Normal file
3
vendor/src/golang.org/x/net/http2/Makefile
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
curlimage:
|
||||
docker build -t gohttp2/curl .
|
||||
|
20
vendor/src/golang.org/x/net/http2/README
vendored
Normal file
20
vendor/src/golang.org/x/net/http2/README
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
This is a work-in-progress HTTP/2 implementation for Go.
|
||||
|
||||
It will eventually live in the Go standard library and won't require
|
||||
any changes to your code to use. It will just be automatic.
|
||||
|
||||
Status:
|
||||
|
||||
* The server support is pretty good. A few things are missing
|
||||
but are being worked on.
|
||||
* The client work has just started but shares a lot of code
|
||||
is coming along much quicker.
|
||||
|
||||
Docs are at https://godoc.org/golang.org/x/net/http2
|
||||
|
||||
Demo test server at https://http2.golang.org/
|
||||
|
||||
Help & bug reports welcome!
|
||||
|
||||
Contributing: https://golang.org/doc/contribute.html
|
||||
Bugs: https://golang.org/issue/new?title=x/net/http2:+
|
76
vendor/src/golang.org/x/net/http2/buffer.go
vendored
Normal file
76
vendor/src/golang.org/x/net/http2/buffer.go
vendored
Normal file
|
@ -0,0 +1,76 @@
|
|||
// Copyright 2014 The Go Authors.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// buffer is an io.ReadWriteCloser backed by a fixed size buffer.
|
||||
// It never allocates, but moves old data as new data is written.
|
||||
type buffer struct {
|
||||
buf []byte
|
||||
r, w int
|
||||
closed bool
|
||||
err error // err to return to reader
|
||||
}
|
||||
|
||||
var (
|
||||
errReadEmpty = errors.New("read from empty buffer")
|
||||
errWriteClosed = errors.New("write on closed buffer")
|
||||
errWriteFull = errors.New("write on full buffer")
|
||||
)
|
||||
|
||||
// Read copies bytes from the buffer into p.
|
||||
// It is an error to read when no data is available.
|
||||
func (b *buffer) Read(p []byte) (n int, err error) {
|
||||
n = copy(p, b.buf[b.r:b.w])
|
||||
b.r += n
|
||||
if b.closed && b.r == b.w {
|
||||
err = b.err
|
||||
} else if b.r == b.w && n == 0 {
|
||||
err = errReadEmpty
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Len returns the number of bytes of the unread portion of the buffer.
|
||||
func (b *buffer) Len() int {
|
||||
return b.w - b.r
|
||||
}
|
||||
|
||||
// Write copies bytes from p into the buffer.
|
||||
// It is an error to write more data than the buffer can hold.
|
||||
func (b *buffer) Write(p []byte) (n int, err error) {
|
||||
if b.closed {
|
||||
return 0, errWriteClosed
|
||||
}
|
||||
|
||||
// Slide existing data to beginning.
|
||||
if b.r > 0 && len(p) > len(b.buf)-b.w {
|
||||
copy(b.buf, b.buf[b.r:b.w])
|
||||
b.w -= b.r
|
||||
b.r = 0
|
||||
}
|
||||
|
||||
// Write new data.
|
||||
n = copy(b.buf[b.w:], p)
|
||||
b.w += n
|
||||
if n < len(p) {
|
||||
err = errWriteFull
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close marks the buffer as closed. Future calls to Write will
|
||||
// return an error. Future calls to Read, once the buffer is
|
||||
// empty, will return err.
|
||||
func (b *buffer) Close(err error) {
|
||||
if !b.closed {
|
||||
b.closed = true
|
||||
b.err = err
|
||||
}
|
||||
}
|
78
vendor/src/golang.org/x/net/http2/errors.go
vendored
Normal file
78
vendor/src/golang.org/x/net/http2/errors.go
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
// Copyright 2014 The Go Authors.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
|
||||
package http2
|
||||
|
||||
import "fmt"
|
||||
|
||||
// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
|
||||
type ErrCode uint32
|
||||
|
||||
const (
|
||||
ErrCodeNo ErrCode = 0x0
|
||||
ErrCodeProtocol ErrCode = 0x1
|
||||
ErrCodeInternal ErrCode = 0x2
|
||||
ErrCodeFlowControl ErrCode = 0x3
|
||||
ErrCodeSettingsTimeout ErrCode = 0x4
|
||||
ErrCodeStreamClosed ErrCode = 0x5
|
||||
ErrCodeFrameSize ErrCode = 0x6
|
||||
ErrCodeRefusedStream ErrCode = 0x7
|
||||
ErrCodeCancel ErrCode = 0x8
|
||||
ErrCodeCompression ErrCode = 0x9
|
||||
ErrCodeConnect ErrCode = 0xa
|
||||
ErrCodeEnhanceYourCalm ErrCode = 0xb
|
||||
ErrCodeInadequateSecurity ErrCode = 0xc
|
||||
ErrCodeHTTP11Required ErrCode = 0xd
|
||||
)
|
||||
|
||||
var errCodeName = map[ErrCode]string{
|
||||
ErrCodeNo: "NO_ERROR",
|
||||
ErrCodeProtocol: "PROTOCOL_ERROR",
|
||||
ErrCodeInternal: "INTERNAL_ERROR",
|
||||
ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
|
||||
ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
|
||||
ErrCodeStreamClosed: "STREAM_CLOSED",
|
||||
ErrCodeFrameSize: "FRAME_SIZE_ERROR",
|
||||
ErrCodeRefusedStream: "REFUSED_STREAM",
|
||||
ErrCodeCancel: "CANCEL",
|
||||
ErrCodeCompression: "COMPRESSION_ERROR",
|
||||
ErrCodeConnect: "CONNECT_ERROR",
|
||||
ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
|
||||
ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
|
||||
ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
|
||||
}
|
||||
|
||||
func (e ErrCode) String() string {
|
||||
if s, ok := errCodeName[e]; ok {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("unknown error code 0x%x", uint32(e))
|
||||
}
|
||||
|
||||
// ConnectionError is an error that results in the termination of the
|
||||
// entire connection.
|
||||
type ConnectionError ErrCode
|
||||
|
||||
func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) }
|
||||
|
||||
// StreamError is an error that only affects one stream within an
|
||||
// HTTP/2 connection.
|
||||
type StreamError struct {
|
||||
StreamID uint32
|
||||
Code ErrCode
|
||||
}
|
||||
|
||||
func (e StreamError) Error() string {
|
||||
return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
|
||||
}
|
||||
|
||||
// 6.9.1 The Flow Control Window
|
||||
// "If a sender receives a WINDOW_UPDATE that causes a flow control
|
||||
// window to exceed this maximum it MUST terminate either the stream
|
||||
// or the connection, as appropriate. For streams, [...]; for the
|
||||
// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
|
||||
type goAwayFlowError struct{}
|
||||
|
||||
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
|
51
vendor/src/golang.org/x/net/http2/flow.go
vendored
Normal file
51
vendor/src/golang.org/x/net/http2/flow.go
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
// Copyright 2014 The Go Authors.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
|
||||
// Flow control
|
||||
|
||||
package http2
|
||||
|
||||
// flow is the flow control window's size.
|
||||
type flow struct {
|
||||
// n is the number of DATA bytes we're allowed to send.
|
||||
// A flow is kept both on a conn and a per-stream.
|
||||
n int32
|
||||
|
||||
// conn points to the shared connection-level flow that is
|
||||
// shared by all streams on that conn. It is nil for the flow
|
||||
// that's on the conn directly.
|
||||
conn *flow
|
||||
}
|
||||
|
||||
func (f *flow) setConnFlow(cf *flow) { f.conn = cf }
|
||||
|
||||
func (f *flow) available() int32 {
|
||||
n := f.n
|
||||
if f.conn != nil && f.conn.n < n {
|
||||
n = f.conn.n
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (f *flow) take(n int32) {
|
||||
if n > f.available() {
|
||||
panic("internal error: took too much")
|
||||
}
|
||||
f.n -= n
|
||||
if f.conn != nil {
|
||||
f.conn.n -= n
|
||||
}
|
||||
}
|
||||
|
||||
// add adds n bytes (positive or negative) to the flow control window.
|
||||
// It returns false if the sum would exceed 2^31-1.
|
||||
func (f *flow) add(n int32) bool {
|
||||
remain := (1<<31 - 1) - f.n
|
||||
if n > remain {
|
||||
return false
|
||||
}
|
||||
f.n += n
|
||||
return true
|
||||
}
|
1113
vendor/src/golang.org/x/net/http2/frame.go
vendored
Normal file
1113
vendor/src/golang.org/x/net/http2/frame.go
vendored
Normal file
File diff suppressed because it is too large
Load diff
173
vendor/src/golang.org/x/net/http2/gotrack.go
vendored
Normal file
173
vendor/src/golang.org/x/net/http2/gotrack.go
vendored
Normal file
|
@ -0,0 +1,173 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
|
||||
// Defensive debug-only utility to track that functions run on the
|
||||
// goroutine that they're supposed to.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
|
||||
|
||||
type goroutineLock uint64
|
||||
|
||||
func newGoroutineLock() goroutineLock {
|
||||
if !DebugGoroutines {
|
||||
return 0
|
||||
}
|
||||
return goroutineLock(curGoroutineID())
|
||||
}
|
||||
|
||||
func (g goroutineLock) check() {
|
||||
if !DebugGoroutines {
|
||||
return
|
||||
}
|
||||
if curGoroutineID() != uint64(g) {
|
||||
panic("running on the wrong goroutine")
|
||||
}
|
||||
}
|
||||
|
||||
func (g goroutineLock) checkNotOn() {
|
||||
if !DebugGoroutines {
|
||||
return
|
||||
}
|
||||
if curGoroutineID() == uint64(g) {
|
||||
panic("running on the wrong goroutine")
|
||||
}
|
||||
}
|
||||
|
||||
var goroutineSpace = []byte("goroutine ")
|
||||
|
||||
func curGoroutineID() uint64 {
|
||||
bp := littleBuf.Get().(*[]byte)
|
||||
defer littleBuf.Put(bp)
|
||||
b := *bp
|
||||
b = b[:runtime.Stack(b, false)]
|
||||
// Parse the 4707 out of "goroutine 4707 ["
|
||||
b = bytes.TrimPrefix(b, goroutineSpace)
|
||||
i := bytes.IndexByte(b, ' ')
|
||||
if i < 0 {
|
||||
panic(fmt.Sprintf("No space found in %q", b))
|
||||
}
|
||||
b = b[:i]
|
||||
n, err := parseUintBytes(b, 10, 64)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
var littleBuf = sync.Pool{
|
||||
New: func() interface{} {
|
||||
buf := make([]byte, 64)
|
||||
return &buf
|
||||
},
|
||||
}
|
||||
|
||||
// parseUintBytes is like strconv.ParseUint, but using a []byte.
|
||||
func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
|
||||
var cutoff, maxVal uint64
|
||||
|
||||
if bitSize == 0 {
|
||||
bitSize = int(strconv.IntSize)
|
||||
}
|
||||
|
||||
s0 := s
|
||||
switch {
|
||||
case len(s) < 1:
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
|
||||
case 2 <= base && base <= 36:
|
||||
// valid base; nothing to do
|
||||
|
||||
case base == 0:
|
||||
// Look for octal, hex prefix.
|
||||
switch {
|
||||
case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
|
||||
base = 16
|
||||
s = s[2:]
|
||||
if len(s) < 1 {
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
}
|
||||
case s[0] == '0':
|
||||
base = 8
|
||||
default:
|
||||
base = 10
|
||||
}
|
||||
|
||||
default:
|
||||
err = errors.New("invalid base " + strconv.Itoa(base))
|
||||
goto Error
|
||||
}
|
||||
|
||||
n = 0
|
||||
cutoff = cutoff64(base)
|
||||
maxVal = 1<<uint(bitSize) - 1
|
||||
|
||||
for i := 0; i < len(s); i++ {
|
||||
var v byte
|
||||
d := s[i]
|
||||
switch {
|
||||
case '0' <= d && d <= '9':
|
||||
v = d - '0'
|
||||
case 'a' <= d && d <= 'z':
|
||||
v = d - 'a' + 10
|
||||
case 'A' <= d && d <= 'Z':
|
||||
v = d - 'A' + 10
|
||||
default:
|
||||
n = 0
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
}
|
||||
if int(v) >= base {
|
||||
n = 0
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
}
|
||||
|
||||
if n >= cutoff {
|
||||
// n*base overflows
|
||||
n = 1<<64 - 1
|
||||
err = strconv.ErrRange
|
||||
goto Error
|
||||
}
|
||||
n *= uint64(base)
|
||||
|
||||
n1 := n + uint64(v)
|
||||
if n1 < n || n1 > maxVal {
|
||||
// n+v overflows
|
||||
n = 1<<64 - 1
|
||||
err = strconv.ErrRange
|
||||
goto Error
|
||||
}
|
||||
n = n1
|
||||
}
|
||||
|
||||
return n, nil
|
||||
|
||||
Error:
|
||||
return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
|
||||
}
|
||||
|
||||
// Return the first number n such that n*base >= 1<<64.
|
||||
func cutoff64(base int) uint64 {
|
||||
if base < 2 {
|
||||
return 0
|
||||
}
|
||||
return (1<<64-1)/uint64(base) + 1
|
||||
}
|
80
vendor/src/golang.org/x/net/http2/headermap.go
vendored
Normal file
80
vendor/src/golang.org/x/net/http2/headermap.go
vendored
Normal file
|
@ -0,0 +1,80 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case
|
||||
commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case
|
||||
)
|
||||
|
||||
func init() {
|
||||
for _, v := range []string{
|
||||
"accept",
|
||||
"accept-charset",
|
||||
"accept-encoding",
|
||||
"accept-language",
|
||||
"accept-ranges",
|
||||
"age",
|
||||
"access-control-allow-origin",
|
||||
"allow",
|
||||
"authorization",
|
||||
"cache-control",
|
||||
"content-disposition",
|
||||
"content-encoding",
|
||||
"content-language",
|
||||
"content-length",
|
||||
"content-location",
|
||||
"content-range",
|
||||
"content-type",
|
||||
"cookie",
|
||||
"date",
|
||||
"etag",
|
||||
"expect",
|
||||
"expires",
|
||||
"from",
|
||||
"host",
|
||||
"if-match",
|
||||
"if-modified-since",
|
||||
"if-none-match",
|
||||
"if-unmodified-since",
|
||||
"last-modified",
|
||||
"link",
|
||||
"location",
|
||||
"max-forwards",
|
||||
"proxy-authenticate",
|
||||
"proxy-authorization",
|
||||
"range",
|
||||
"referer",
|
||||
"refresh",
|
||||
"retry-after",
|
||||
"server",
|
||||
"set-cookie",
|
||||
"strict-transport-security",
|
||||
"transfer-encoding",
|
||||
"user-agent",
|
||||
"vary",
|
||||
"via",
|
||||
"www-authenticate",
|
||||
} {
|
||||
chk := http.CanonicalHeaderKey(v)
|
||||
commonLowerHeader[chk] = v
|
||||
commonCanonHeader[v] = chk
|
||||
}
|
||||
}
|
||||
|
||||
func lowerHeader(v string) string {
|
||||
if s, ok := commonLowerHeader[v]; ok {
|
||||
return s
|
||||
}
|
||||
return strings.ToLower(v)
|
||||
}
|
252
vendor/src/golang.org/x/net/http2/hpack/encode.go
vendored
Normal file
252
vendor/src/golang.org/x/net/http2/hpack/encode.go
vendored
Normal file
|
@ -0,0 +1,252 @@
|
|||
// Copyright 2014 The Go Authors.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
uint32Max = ^uint32(0)
|
||||
initialHeaderTableSize = 4096
|
||||
)
|
||||
|
||||
type Encoder struct {
|
||||
dynTab dynamicTable
|
||||
// minSize is the minimum table size set by
|
||||
// SetMaxDynamicTableSize after the previous Header Table Size
|
||||
// Update.
|
||||
minSize uint32
|
||||
// maxSizeLimit is the maximum table size this encoder
|
||||
// supports. This will protect the encoder from too large
|
||||
// size.
|
||||
maxSizeLimit uint32
|
||||
// tableSizeUpdate indicates whether "Header Table Size
|
||||
// Update" is required.
|
||||
tableSizeUpdate bool
|
||||
w io.Writer
|
||||
buf []byte
|
||||
}
|
||||
|
||||
// NewEncoder returns a new Encoder which performs HPACK encoding. An
|
||||
// encoded data is written to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
e := &Encoder{
|
||||
minSize: uint32Max,
|
||||
maxSizeLimit: initialHeaderTableSize,
|
||||
tableSizeUpdate: false,
|
||||
w: w,
|
||||
}
|
||||
e.dynTab.setMaxSize(initialHeaderTableSize)
|
||||
return e
|
||||
}
|
||||
|
||||
// WriteField encodes f into a single Write to e's underlying Writer.
|
||||
// This function may also produce bytes for "Header Table Size Update"
|
||||
// if necessary. If produced, it is done before encoding f.
|
||||
func (e *Encoder) WriteField(f HeaderField) error {
|
||||
e.buf = e.buf[:0]
|
||||
|
||||
if e.tableSizeUpdate {
|
||||
e.tableSizeUpdate = false
|
||||
if e.minSize < e.dynTab.maxSize {
|
||||
e.buf = appendTableSize(e.buf, e.minSize)
|
||||
}
|
||||
e.minSize = uint32Max
|
||||
e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
|
||||
}
|
||||
|
||||
idx, nameValueMatch := e.searchTable(f)
|
||||
if nameValueMatch {
|
||||
e.buf = appendIndexed(e.buf, idx)
|
||||
} else {
|
||||
indexing := e.shouldIndex(f)
|
||||
if indexing {
|
||||
e.dynTab.add(f)
|
||||
}
|
||||
|
||||
if idx == 0 {
|
||||
e.buf = appendNewName(e.buf, f, indexing)
|
||||
} else {
|
||||
e.buf = appendIndexedName(e.buf, f, idx, indexing)
|
||||
}
|
||||
}
|
||||
n, err := e.w.Write(e.buf)
|
||||
if err == nil && n != len(e.buf) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// searchTable searches f in both stable and dynamic header tables.
|
||||
// The static header table is searched first. Only when there is no
|
||||
// exact match for both name and value, the dynamic header table is
|
||||
// then searched. If there is no match, i is 0. If both name and value
|
||||
// match, i is the matched index and nameValueMatch becomes true. If
|
||||
// only name matches, i points to that index and nameValueMatch
|
||||
// becomes false.
|
||||
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||
for idx, hf := range staticTable {
|
||||
if !constantTimeStringCompare(hf.Name, f.Name) {
|
||||
continue
|
||||
}
|
||||
if i == 0 {
|
||||
i = uint64(idx + 1)
|
||||
}
|
||||
if f.Sensitive {
|
||||
continue
|
||||
}
|
||||
if !constantTimeStringCompare(hf.Value, f.Value) {
|
||||
continue
|
||||
}
|
||||
i = uint64(idx + 1)
|
||||
nameValueMatch = true
|
||||
return
|
||||
}
|
||||
|
||||
j, nameValueMatch := e.dynTab.search(f)
|
||||
if nameValueMatch || (i == 0 && j != 0) {
|
||||
i = j + uint64(len(staticTable))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetMaxDynamicTableSize changes the dynamic header table size to v.
|
||||
// The actual size is bounded by the value passed to
|
||||
// SetMaxDynamicTableSizeLimit.
|
||||
func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
|
||||
if v > e.maxSizeLimit {
|
||||
v = e.maxSizeLimit
|
||||
}
|
||||
if v < e.minSize {
|
||||
e.minSize = v
|
||||
}
|
||||
e.tableSizeUpdate = true
|
||||
e.dynTab.setMaxSize(v)
|
||||
}
|
||||
|
||||
// SetMaxDynamicTableSizeLimit changes the maximum value that can be
|
||||
// specified in SetMaxDynamicTableSize to v. By default, it is set to
|
||||
// 4096, which is the same size of the default dynamic header table
|
||||
// size described in HPACK specification. If the current maximum
|
||||
// dynamic header table size is strictly greater than v, "Header Table
|
||||
// Size Update" will be done in the next WriteField call and the
|
||||
// maximum dynamic header table size is truncated to v.
|
||||
func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
|
||||
e.maxSizeLimit = v
|
||||
if e.dynTab.maxSize > v {
|
||||
e.tableSizeUpdate = true
|
||||
e.dynTab.setMaxSize(v)
|
||||
}
|
||||
}
|
||||
|
||||
// shouldIndex reports whether f should be indexed.
|
||||
func (e *Encoder) shouldIndex(f HeaderField) bool {
|
||||
return !f.Sensitive && f.size() <= e.dynTab.maxSize
|
||||
}
|
||||
|
||||
// appendIndexed appends index i, as encoded in "Indexed Header Field"
|
||||
// representation, to dst and returns the extended buffer.
|
||||
func appendIndexed(dst []byte, i uint64) []byte {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 7, i)
|
||||
dst[first] |= 0x80
|
||||
return dst
|
||||
}
|
||||
|
||||
// appendNewName appends f, as encoded in one of "Literal Header field
|
||||
// - New Name" representation variants, to dst and returns the
|
||||
// extended buffer.
|
||||
//
|
||||
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
||||
// f.Sensitive is false and indexing is true, "Inremental Indexing"
|
||||
// representation is used.
|
||||
func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
|
||||
dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
|
||||
dst = appendHpackString(dst, f.Name)
|
||||
return appendHpackString(dst, f.Value)
|
||||
}
|
||||
|
||||
// appendIndexedName appends f and index i referring indexed name
|
||||
// entry, as encoded in one of "Literal Header field - Indexed Name"
|
||||
// representation variants, to dst and returns the extended buffer.
|
||||
//
|
||||
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
||||
// f.Sensitive is false and indexing is true, "Incremental Indexing"
|
||||
// representation is used.
|
||||
func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
|
||||
first := len(dst)
|
||||
var n byte
|
||||
if indexing {
|
||||
n = 6
|
||||
} else {
|
||||
n = 4
|
||||
}
|
||||
dst = appendVarInt(dst, n, i)
|
||||
dst[first] |= encodeTypeByte(indexing, f.Sensitive)
|
||||
return appendHpackString(dst, f.Value)
|
||||
}
|
||||
|
||||
// appendTableSize appends v, as encoded in "Header Table Size Update"
|
||||
// representation, to dst and returns the extended buffer.
|
||||
func appendTableSize(dst []byte, v uint32) []byte {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 5, uint64(v))
|
||||
dst[first] |= 0x20
|
||||
return dst
|
||||
}
|
||||
|
||||
// appendVarInt appends i, as encoded in variable integer form using n
|
||||
// bit prefix, to dst and returns the extended buffer.
|
||||
//
|
||||
// See
|
||||
// http://http2.github.io/http2-spec/compression.html#integer.representation
|
||||
func appendVarInt(dst []byte, n byte, i uint64) []byte {
|
||||
k := uint64((1 << n) - 1)
|
||||
if i < k {
|
||||
return append(dst, byte(i))
|
||||
}
|
||||
dst = append(dst, byte(k))
|
||||
i -= k
|
||||
for ; i >= 128; i >>= 7 {
|
||||
dst = append(dst, byte(0x80|(i&0x7f)))
|
||||
}
|
||||
return append(dst, byte(i))
|
||||
}
|
||||
|
||||
// appendHpackString appends s, as encoded in "String Literal"
|
||||
// representation, to dst and returns the the extended buffer.
|
||||
//
|
||||
// s will be encoded in Huffman codes only when it produces strictly
|
||||
// shorter byte string.
|
||||
func appendHpackString(dst []byte, s string) []byte {
|
||||
huffmanLength := HuffmanEncodeLength(s)
|
||||
if huffmanLength < uint64(len(s)) {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 7, huffmanLength)
|
||||
dst = AppendHuffmanString(dst, s)
|
||||
dst[first] |= 0x80
|
||||
} else {
|
||||
dst = appendVarInt(dst, 7, uint64(len(s)))
|
||||
dst = append(dst, s...)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// encodeTypeByte returns type byte. If sensitive is true, type byte
|
||||
// for "Never Indexed" representation is returned. If sensitive is
|
||||
// false and indexing is true, type byte for "Incremental Indexing"
|
||||
// representation is returned. Otherwise, type byte for "Without
|
||||
// Indexing" is returned.
|
||||
func encodeTypeByte(indexing, sensitive bool) byte {
|
||||
if sensitive {
|
||||
return 0x10
|
||||
}
|
||||
if indexing {
|
||||
return 0x40
|
||||
}
|
||||
return 0
|
||||
}
|
445
vendor/src/golang.org/x/net/http2/hpack/hpack.go
vendored
Normal file
445
vendor/src/golang.org/x/net/http2/hpack/hpack.go
vendored
Normal file
|
@ -0,0 +1,445 @@
|
|||
// Copyright 2014 The Go Authors.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
|
||||
// Package hpack implements HPACK, a compression format for
|
||||
// efficiently representing HTTP header fields in the context of HTTP/2.
|
||||
//
|
||||
// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// A DecodingError is something the spec defines as a decoding error.
|
||||
type DecodingError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (de DecodingError) Error() string {
|
||||
return fmt.Sprintf("decoding error: %v", de.Err)
|
||||
}
|
||||
|
||||
// An InvalidIndexError is returned when an encoder references a table
|
||||
// entry before the static table or after the end of the dynamic table.
|
||||
type InvalidIndexError int
|
||||
|
||||
func (e InvalidIndexError) Error() string {
|
||||
return fmt.Sprintf("invalid indexed representation index %d", int(e))
|
||||
}
|
||||
|
||||
// A HeaderField is a name-value pair. Both the name and value are
|
||||
// treated as opaque sequences of octets.
|
||||
type HeaderField struct {
|
||||
Name, Value string
|
||||
|
||||
// Sensitive means that this header field should never be
|
||||
// indexed.
|
||||
Sensitive bool
|
||||
}
|
||||
|
||||
func (hf *HeaderField) size() uint32 {
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
|
||||
// "The size of the dynamic table is the sum of the size of
|
||||
// its entries. The size of an entry is the sum of its name's
|
||||
// length in octets (as defined in Section 5.2), its value's
|
||||
// length in octets (see Section 5.2), plus 32. The size of
|
||||
// an entry is calculated using the length of the name and
|
||||
// value without any Huffman encoding applied."
|
||||
|
||||
// This can overflow if somebody makes a large HeaderField
|
||||
// Name and/or Value by hand, but we don't care, because that
|
||||
// won't happen on the wire because the encoding doesn't allow
|
||||
// it.
|
||||
return uint32(len(hf.Name) + len(hf.Value) + 32)
|
||||
}
|
||||
|
||||
// A Decoder is the decoding context for incremental processing of
|
||||
// header blocks.
|
||||
type Decoder struct {
|
||||
dynTab dynamicTable
|
||||
emit func(f HeaderField)
|
||||
|
||||
// buf is the unparsed buffer. It's only written to
|
||||
// saveBuf if it was truncated in the middle of a header
|
||||
// block. Because it's usually not owned, we can only
|
||||
// process it under Write.
|
||||
buf []byte // usually not owned
|
||||
saveBuf bytes.Buffer
|
||||
}
|
||||
|
||||
func NewDecoder(maxSize uint32, emitFunc func(f HeaderField)) *Decoder {
|
||||
d := &Decoder{
|
||||
emit: emitFunc,
|
||||
}
|
||||
d.dynTab.allowedMaxSize = maxSize
|
||||
d.dynTab.setMaxSize(maxSize)
|
||||
return d
|
||||
}
|
||||
|
||||
// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
|
||||
// underlying buffers for garbage reasons.
|
||||
|
||||
func (d *Decoder) SetMaxDynamicTableSize(v uint32) {
|
||||
d.dynTab.setMaxSize(v)
|
||||
}
|
||||
|
||||
// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded
|
||||
// stream (via dynamic table size updates) may set the maximum size
|
||||
// to.
|
||||
func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
|
||||
d.dynTab.allowedMaxSize = v
|
||||
}
|
||||
|
||||
type dynamicTable struct {
|
||||
// ents is the FIFO described at
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
|
||||
// The newest (low index) is append at the end, and items are
|
||||
// evicted from the front.
|
||||
ents []HeaderField
|
||||
size uint32
|
||||
maxSize uint32 // current maxSize
|
||||
allowedMaxSize uint32 // maxSize may go up to this, inclusive
|
||||
}
|
||||
|
||||
func (dt *dynamicTable) setMaxSize(v uint32) {
|
||||
dt.maxSize = v
|
||||
dt.evict()
|
||||
}
|
||||
|
||||
// TODO: change dynamicTable to be a struct with a slice and a size int field,
|
||||
// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1:
|
||||
//
|
||||
//
|
||||
// Then make add increment the size. maybe the max size should move from Decoder to
|
||||
// dynamicTable and add should return an ok bool if there was enough space.
|
||||
//
|
||||
// Later we'll need a remove operation on dynamicTable.
|
||||
|
||||
func (dt *dynamicTable) add(f HeaderField) {
|
||||
dt.ents = append(dt.ents, f)
|
||||
dt.size += f.size()
|
||||
dt.evict()
|
||||
}
|
||||
|
||||
// If we're too big, evict old stuff (front of the slice)
|
||||
func (dt *dynamicTable) evict() {
|
||||
base := dt.ents // keep base pointer of slice
|
||||
for dt.size > dt.maxSize {
|
||||
dt.size -= dt.ents[0].size()
|
||||
dt.ents = dt.ents[1:]
|
||||
}
|
||||
|
||||
// Shift slice contents down if we evicted things.
|
||||
if len(dt.ents) != len(base) {
|
||||
copy(base, dt.ents)
|
||||
dt.ents = base[:len(dt.ents)]
|
||||
}
|
||||
}
|
||||
|
||||
// constantTimeStringCompare compares string a and b in a constant
|
||||
// time manner.
|
||||
func constantTimeStringCompare(a, b string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
|
||||
c := byte(0)
|
||||
|
||||
for i := 0; i < len(a); i++ {
|
||||
c |= a[i] ^ b[i]
|
||||
}
|
||||
|
||||
return c == 0
|
||||
}
|
||||
|
||||
// Search searches f in the table. The return value i is 0 if there is
|
||||
// no name match. If there is name match or name/value match, i is the
|
||||
// index of that entry (1-based). If both name and value match,
|
||||
// nameValueMatch becomes true.
|
||||
func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||
l := len(dt.ents)
|
||||
for j := l - 1; j >= 0; j-- {
|
||||
ent := dt.ents[j]
|
||||
if !constantTimeStringCompare(ent.Name, f.Name) {
|
||||
continue
|
||||
}
|
||||
if i == 0 {
|
||||
i = uint64(l - j)
|
||||
}
|
||||
if f.Sensitive {
|
||||
continue
|
||||
}
|
||||
if !constantTimeStringCompare(ent.Value, f.Value) {
|
||||
continue
|
||||
}
|
||||
i = uint64(l - j)
|
||||
nameValueMatch = true
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *Decoder) maxTableIndex() int {
|
||||
return len(d.dynTab.ents) + len(staticTable)
|
||||
}
|
||||
|
||||
func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
|
||||
if i < 1 {
|
||||
return
|
||||
}
|
||||
if i > uint64(d.maxTableIndex()) {
|
||||
return
|
||||
}
|
||||
if i <= uint64(len(staticTable)) {
|
||||
return staticTable[i-1], true
|
||||
}
|
||||
dents := d.dynTab.ents
|
||||
return dents[len(dents)-(int(i)-len(staticTable))], true
|
||||
}
|
||||
|
||||
// Decode decodes an entire block.
|
||||
//
|
||||
// TODO: remove this method and make it incremental later? This is
|
||||
// easier for debugging now.
|
||||
func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
|
||||
var hf []HeaderField
|
||||
saveFunc := d.emit
|
||||
defer func() { d.emit = saveFunc }()
|
||||
d.emit = func(f HeaderField) { hf = append(hf, f) }
|
||||
if _, err := d.Write(p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := d.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return hf, nil
|
||||
}
|
||||
|
||||
func (d *Decoder) Close() error {
|
||||
if d.saveBuf.Len() > 0 {
|
||||
d.saveBuf.Reset()
|
||||
return DecodingError{errors.New("truncated headers")}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) Write(p []byte) (n int, err error) {
|
||||
if len(p) == 0 {
|
||||
// Prevent state machine CPU attacks (making us redo
|
||||
// work up to the point of finding out we don't have
|
||||
// enough data)
|
||||
return
|
||||
}
|
||||
// Only copy the data if we have to. Optimistically assume
|
||||
// that p will contain a complete header block.
|
||||
if d.saveBuf.Len() == 0 {
|
||||
d.buf = p
|
||||
} else {
|
||||
d.saveBuf.Write(p)
|
||||
d.buf = d.saveBuf.Bytes()
|
||||
d.saveBuf.Reset()
|
||||
}
|
||||
|
||||
for len(d.buf) > 0 {
|
||||
err = d.parseHeaderFieldRepr()
|
||||
if err != nil {
|
||||
if err == errNeedMore {
|
||||
err = nil
|
||||
d.saveBuf.Write(d.buf)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return len(p), err
|
||||
}
|
||||
|
||||
// errNeedMore is an internal sentinel error value that means the
|
||||
// buffer is truncated and we need to read more data before we can
|
||||
// continue parsing.
|
||||
var errNeedMore = errors.New("need more data")
|
||||
|
||||
type indexType int
|
||||
|
||||
const (
|
||||
indexedTrue indexType = iota
|
||||
indexedFalse
|
||||
indexedNever
|
||||
)
|
||||
|
||||
func (v indexType) indexed() bool { return v == indexedTrue }
|
||||
func (v indexType) sensitive() bool { return v == indexedNever }
|
||||
|
||||
// returns errNeedMore if there isn't enough data available.
|
||||
// any other error is fatal.
|
||||
// consumes d.buf iff it returns nil.
|
||||
// precondition: must be called with len(d.buf) > 0
|
||||
func (d *Decoder) parseHeaderFieldRepr() error {
|
||||
b := d.buf[0]
|
||||
switch {
|
||||
case b&128 != 0:
|
||||
// Indexed representation.
|
||||
// High bit set?
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.1
|
||||
return d.parseFieldIndexed()
|
||||
case b&192 == 64:
|
||||
// 6.2.1 Literal Header Field with Incremental Indexing
|
||||
// 0b10xxxxxx: top two bits are 10
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1
|
||||
return d.parseFieldLiteral(6, indexedTrue)
|
||||
case b&240 == 0:
|
||||
// 6.2.2 Literal Header Field without Indexing
|
||||
// 0b0000xxxx: top four bits are 0000
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2
|
||||
return d.parseFieldLiteral(4, indexedFalse)
|
||||
case b&240 == 16:
|
||||
// 6.2.3 Literal Header Field never Indexed
|
||||
// 0b0001xxxx: top four bits are 0001
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3
|
||||
return d.parseFieldLiteral(4, indexedNever)
|
||||
case b&224 == 32:
|
||||
// 6.3 Dynamic Table Size Update
|
||||
// Top three bits are '001'.
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.3
|
||||
return d.parseDynamicTableSizeUpdate()
|
||||
}
|
||||
|
||||
return DecodingError{errors.New("invalid encoding")}
|
||||
}
|
||||
|
||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||
func (d *Decoder) parseFieldIndexed() error {
|
||||
buf := d.buf
|
||||
idx, buf, err := readVarInt(7, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hf, ok := d.at(idx)
|
||||
if !ok {
|
||||
return DecodingError{InvalidIndexError(idx)}
|
||||
}
|
||||
d.emit(HeaderField{Name: hf.Name, Value: hf.Value})
|
||||
d.buf = buf
|
||||
return nil
|
||||
}
|
||||
|
||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||
func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
|
||||
buf := d.buf
|
||||
nameIdx, buf, err := readVarInt(n, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var hf HeaderField
|
||||
if nameIdx > 0 {
|
||||
ihf, ok := d.at(nameIdx)
|
||||
if !ok {
|
||||
return DecodingError{InvalidIndexError(nameIdx)}
|
||||
}
|
||||
hf.Name = ihf.Name
|
||||
} else {
|
||||
hf.Name, buf, err = readString(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
hf.Value, buf, err = readString(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.buf = buf
|
||||
if it.indexed() {
|
||||
d.dynTab.add(hf)
|
||||
}
|
||||
hf.Sensitive = it.sensitive()
|
||||
d.emit(hf)
|
||||
return nil
|
||||
}
|
||||
|
||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||
func (d *Decoder) parseDynamicTableSizeUpdate() error {
|
||||
buf := d.buf
|
||||
size, buf, err := readVarInt(5, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if size > uint64(d.dynTab.allowedMaxSize) {
|
||||
return DecodingError{errors.New("dynamic table size update too large")}
|
||||
}
|
||||
d.dynTab.setMaxSize(uint32(size))
|
||||
d.buf = buf
|
||||
return nil
|
||||
}
|
||||
|
||||
var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
|
||||
|
||||
// readVarInt reads an unsigned variable length integer off the
|
||||
// beginning of p. n is the parameter as described in
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.
|
||||
//
|
||||
// n must always be between 1 and 8.
|
||||
//
|
||||
// The returned remain buffer is either a smaller suffix of p, or err != nil.
|
||||
// The error is errNeedMore if p doesn't contain a complete integer.
|
||||
func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
|
||||
if n < 1 || n > 8 {
|
||||
panic("bad n")
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return 0, p, errNeedMore
|
||||
}
|
||||
i = uint64(p[0])
|
||||
if n < 8 {
|
||||
i &= (1 << uint64(n)) - 1
|
||||
}
|
||||
if i < (1<<uint64(n))-1 {
|
||||
return i, p[1:], nil
|
||||
}
|
||||
|
||||
origP := p
|
||||
p = p[1:]
|
||||
var m uint64
|
||||
for len(p) > 0 {
|
||||
b := p[0]
|
||||
p = p[1:]
|
||||
i += uint64(b&127) << m
|
||||
if b&128 == 0 {
|
||||
return i, p, nil
|
||||
}
|
||||
m += 7
|
||||
if m >= 63 { // TODO: proper overflow check. making this up.
|
||||
return 0, origP, errVarintOverflow
|
||||
}
|
||||
}
|
||||
return 0, origP, errNeedMore
|
||||
}
|
||||
|
||||
func readString(p []byte) (s string, remain []byte, err error) {
|
||||
if len(p) == 0 {
|
||||
return "", p, errNeedMore
|
||||
}
|
||||
isHuff := p[0]&128 != 0
|
||||
strLen, p, err := readVarInt(7, p)
|
||||
if err != nil {
|
||||
return "", p, err
|
||||
}
|
||||
if uint64(len(p)) < strLen {
|
||||
return "", p, errNeedMore
|
||||
}
|
||||
if !isHuff {
|
||||
return string(p[:strLen]), p[strLen:], nil
|
||||
}
|
||||
|
||||
// TODO: optimize this garbage:
|
||||
var buf bytes.Buffer
|
||||
if _, err := HuffmanDecode(&buf, p[:strLen]); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return buf.String(), p[strLen:], nil
|
||||
}
|
159
vendor/src/golang.org/x/net/http2/hpack/huffman.go
vendored
Normal file
159
vendor/src/golang.org/x/net/http2/hpack/huffman.go
vendored
Normal file
|
@ -0,0 +1,159 @@
|
|||
// Copyright 2014 The Go Authors.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} { return new(bytes.Buffer) },
|
||||
}
|
||||
|
||||
// HuffmanDecode decodes the string in v and writes the expanded
|
||||
// result to w, returning the number of bytes written to w and the
|
||||
// Write call's return value. At most one Write call is made.
|
||||
func HuffmanDecode(w io.Writer, v []byte) (int, error) {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
defer bufPool.Put(buf)
|
||||
|
||||
n := rootHuffmanNode
|
||||
cur, nbits := uint(0), uint8(0)
|
||||
for _, b := range v {
|
||||
cur = cur<<8 | uint(b)
|
||||
nbits += 8
|
||||
for nbits >= 8 {
|
||||
n = n.children[byte(cur>>(nbits-8))]
|
||||
if n.children == nil {
|
||||
buf.WriteByte(n.sym)
|
||||
nbits -= n.codeLen
|
||||
n = rootHuffmanNode
|
||||
} else {
|
||||
nbits -= 8
|
||||
}
|
||||
}
|
||||
}
|
||||
for nbits > 0 {
|
||||
n = n.children[byte(cur<<(8-nbits))]
|
||||
if n.children != nil || n.codeLen > nbits {
|
||||
break
|
||||
}
|
||||
buf.WriteByte(n.sym)
|
||||
nbits -= n.codeLen
|
||||
n = rootHuffmanNode
|
||||
}
|
||||
return w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
type node struct {
|
||||
// children is non-nil for internal nodes
|
||||
children []*node
|
||||
|
||||
// The following are only valid if children is nil:
|
||||
codeLen uint8 // number of bits that led to the output of sym
|
||||
sym byte // output symbol
|
||||
}
|
||||
|
||||
func newInternalNode() *node {
|
||||
return &node{children: make([]*node, 256)}
|
||||
}
|
||||
|
||||
var rootHuffmanNode = newInternalNode()
|
||||
|
||||
func init() {
|
||||
for i, code := range huffmanCodes {
|
||||
if i > 255 {
|
||||
panic("too many huffman codes")
|
||||
}
|
||||
addDecoderNode(byte(i), code, huffmanCodeLen[i])
|
||||
}
|
||||
}
|
||||
|
||||
func addDecoderNode(sym byte, code uint32, codeLen uint8) {
|
||||
cur := rootHuffmanNode
|
||||
for codeLen > 8 {
|
||||
codeLen -= 8
|
||||
i := uint8(code >> codeLen)
|
||||
if cur.children[i] == nil {
|
||||
cur.children[i] = newInternalNode()
|
||||
}
|
||||
cur = cur.children[i]
|
||||
}
|
||||
shift := 8 - codeLen
|
||||
start, end := int(uint8(code<<shift)), int(1<<shift)
|
||||
for i := start; i < start+end; i++ {
|
||||
cur.children[i] = &node{sym: sym, codeLen: codeLen}
|
||||
}
|
||||
}
|
||||
|
||||
// AppendHuffmanString appends s, as encoded in Huffman codes, to dst
|
||||
// and returns the extended buffer.
|
||||
func AppendHuffmanString(dst []byte, s string) []byte {
|
||||
rembits := uint8(8)
|
||||
|
||||
for i := 0; i < len(s); i++ {
|
||||
if rembits == 8 {
|
||||
dst = append(dst, 0)
|
||||
}
|
||||
dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i])
|
||||
}
|
||||
|
||||
if rembits < 8 {
|
||||
// special EOS symbol
|
||||
code := uint32(0x3fffffff)
|
||||
nbits := uint8(30)
|
||||
|
||||
t := uint8(code >> (nbits - rembits))
|
||||
dst[len(dst)-1] |= t
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
// HuffmanEncodeLength returns the number of bytes required to encode
|
||||
// s in Huffman codes. The result is round up to byte boundary.
|
||||
func HuffmanEncodeLength(s string) uint64 {
|
||||
n := uint64(0)
|
||||
for i := 0; i < len(s); i++ {
|
||||
n += uint64(huffmanCodeLen[s[i]])
|
||||
}
|
||||
return (n + 7) / 8
|
||||
}
|
||||
|
||||
// appendByteToHuffmanCode appends Huffman code for c to dst and
|
||||
// returns the extended buffer and the remaining bits in the last
|
||||
// element. The appending is not byte aligned and the remaining bits
|
||||
// in the last element of dst is given in rembits.
|
||||
func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {
|
||||
code := huffmanCodes[c]
|
||||
nbits := huffmanCodeLen[c]
|
||||
|
||||
for {
|
||||
if rembits > nbits {
|
||||
t := uint8(code << (rembits - nbits))
|
||||
dst[len(dst)-1] |= t
|
||||
rembits -= nbits
|
||||
break
|
||||
}
|
||||
|
||||
t := uint8(code >> (nbits - rembits))
|
||||
dst[len(dst)-1] |= t
|
||||
|
||||
nbits -= rembits
|
||||
rembits = 8
|
||||
|
||||
if nbits == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
dst = append(dst, 0)
|
||||
}
|
||||
|
||||
return dst, rembits
|
||||
}
|
353
vendor/src/golang.org/x/net/http2/hpack/tables.go
vendored
Normal file
353
vendor/src/golang.org/x/net/http2/hpack/tables.go
vendored
Normal file
|
@ -0,0 +1,353 @@
|
|||
// Copyright 2014 The Go Authors.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
|
||||
package hpack
|
||||
|
||||
func pair(name, value string) HeaderField {
|
||||
return HeaderField{Name: name, Value: value}
|
||||
}
|
||||
|
||||
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
|
||||
var staticTable = []HeaderField{
|
||||
pair(":authority", ""), // index 1 (1-based)
|
||||
pair(":method", "GET"),
|
||||
pair(":method", "POST"),
|
||||
pair(":path", "/"),
|
||||
pair(":path", "/index.html"),
|
||||
pair(":scheme", "http"),
|
||||
pair(":scheme", "https"),
|
||||
pair(":status", "200"),
|
||||
pair(":status", "204"),
|
||||
pair(":status", "206"),
|
||||
pair(":status", "304"),
|
||||
pair(":status", "400"),
|
||||
pair(":status", "404"),
|
||||
pair(":status", "500"),
|
||||
pair("accept-charset", ""),
|
||||
pair("accept-encoding", "gzip, deflate"),
|
||||
pair("accept-language", ""),
|
||||
pair("accept-ranges", ""),
|
||||
pair("accept", ""),
|
||||
pair("access-control-allow-origin", ""),
|
||||
pair("age", ""),
|
||||
pair("allow", ""),
|
||||
pair("authorization", ""),
|
||||
pair("cache-control", ""),
|
||||
pair("content-disposition", ""),
|
||||
pair("content-encoding", ""),
|
||||
pair("content-language", ""),
|
||||
pair("content-length", ""),
|
||||
pair("content-location", ""),
|
||||
pair("content-range", ""),
|
||||
pair("content-type", ""),
|
||||
pair("cookie", ""),
|
||||
pair("date", ""),
|
||||
pair("etag", ""),
|
||||
pair("expect", ""),
|
||||
pair("expires", ""),
|
||||
pair("from", ""),
|
||||
pair("host", ""),
|
||||
pair("if-match", ""),
|
||||
pair("if-modified-since", ""),
|
||||
pair("if-none-match", ""),
|
||||
pair("if-range", ""),
|
||||
pair("if-unmodified-since", ""),
|
||||
pair("last-modified", ""),
|
||||
pair("link", ""),
|
||||
pair("location", ""),
|
||||
pair("max-forwards", ""),
|
||||
pair("proxy-authenticate", ""),
|
||||
pair("proxy-authorization", ""),
|
||||
pair("range", ""),
|
||||
pair("referer", ""),
|
||||
pair("refresh", ""),
|
||||
pair("retry-after", ""),
|
||||
pair("server", ""),
|
||||
pair("set-cookie", ""),
|
||||
pair("strict-transport-security", ""),
|
||||
pair("transfer-encoding", ""),
|
||||
pair("user-agent", ""),
|
||||
pair("vary", ""),
|
||||
pair("via", ""),
|
||||
pair("www-authenticate", ""),
|
||||
}
|
||||
|
||||
var huffmanCodes = []uint32{
|
||||
0x1ff8,
|
||||
0x7fffd8,
|
||||
0xfffffe2,
|
||||
0xfffffe3,
|
||||
0xfffffe4,
|
||||
0xfffffe5,
|
||||
0xfffffe6,
|
||||
0xfffffe7,
|
||||
0xfffffe8,
|
||||
0xffffea,
|
||||
0x3ffffffc,
|
||||
0xfffffe9,
|
||||
0xfffffea,
|
||||
0x3ffffffd,
|
||||
0xfffffeb,
|
||||
0xfffffec,
|
||||
0xfffffed,
|
||||
0xfffffee,
|
||||
0xfffffef,
|
||||
0xffffff0,
|
||||
0xffffff1,
|
||||
0xffffff2,
|
||||
0x3ffffffe,
|
||||
0xffffff3,
|
||||
0xffffff4,
|
||||
0xffffff5,
|
||||
0xffffff6,
|
||||
0xffffff7,
|
||||
0xffffff8,
|
||||
0xffffff9,
|
||||
0xffffffa,
|
||||
0xffffffb,
|
||||
0x14,
|
||||
0x3f8,
|
||||
0x3f9,
|
||||
0xffa,
|
||||
0x1ff9,
|
||||
0x15,
|
||||
0xf8,
|
||||
0x7fa,
|
||||
0x3fa,
|
||||
0x3fb,
|
||||
0xf9,
|
||||
0x7fb,
|
||||
0xfa,
|
||||
0x16,
|
||||
0x17,
|
||||
0x18,
|
||||
0x0,
|
||||
0x1,
|
||||
0x2,
|
||||
0x19,
|
||||
0x1a,
|
||||
0x1b,
|
||||
0x1c,
|
||||
0x1d,
|
||||
0x1e,
|
||||
0x1f,
|
||||
0x5c,
|
||||
0xfb,
|
||||
0x7ffc,
|
||||
0x20,
|
||||
0xffb,
|
||||
0x3fc,
|
||||
0x1ffa,
|
||||
0x21,
|
||||
0x5d,
|
||||
0x5e,
|
||||
0x5f,
|
||||
0x60,
|
||||
0x61,
|
||||
0x62,
|
||||
0x63,
|
||||
0x64,
|
||||
0x65,
|
||||
0x66,
|
||||
0x67,
|
||||
0x68,
|
||||
0x69,
|
||||
0x6a,
|
||||
0x6b,
|
||||
0x6c,
|
||||
0x6d,
|
||||
0x6e,
|
||||
0x6f,
|
||||
0x70,
|
||||
0x71,
|
||||
0x72,
|
||||
0xfc,
|
||||
0x73,
|
||||
0xfd,
|
||||
0x1ffb,
|
||||
0x7fff0,
|
||||
0x1ffc,
|
||||
0x3ffc,
|
||||
0x22,
|
||||
0x7ffd,
|
||||
0x3,
|
||||
0x23,
|
||||
0x4,
|
||||
0x24,
|
||||
0x5,
|
||||
0x25,
|
||||
0x26,
|
||||
0x27,
|
||||
0x6,
|
||||
0x74,
|
||||
0x75,
|
||||
0x28,
|
||||
0x29,
|
||||
0x2a,
|
||||
0x7,
|
||||
0x2b,
|
||||
0x76,
|
||||
0x2c,
|
||||
0x8,
|
||||
0x9,
|
||||
0x2d,
|
||||
0x77,
|
||||
0x78,
|
||||
0x79,
|
||||
0x7a,
|
||||
0x7b,
|
||||
0x7ffe,
|
||||
0x7fc,
|
||||
0x3ffd,
|
||||
0x1ffd,
|
||||
0xffffffc,
|
||||
0xfffe6,
|
||||
0x3fffd2,
|
||||
0xfffe7,
|
||||
0xfffe8,
|
||||
0x3fffd3,
|
||||
0x3fffd4,
|
||||
0x3fffd5,
|
||||
0x7fffd9,
|
||||
0x3fffd6,
|
||||
0x7fffda,
|
||||
0x7fffdb,
|
||||
0x7fffdc,
|
||||
0x7fffdd,
|
||||
0x7fffde,
|
||||
0xffffeb,
|
||||
0x7fffdf,
|
||||
0xffffec,
|
||||
0xffffed,
|
||||
0x3fffd7,
|
||||
0x7fffe0,
|
||||
0xffffee,
|
||||
0x7fffe1,
|
||||
0x7fffe2,
|
||||
0x7fffe3,
|
||||
0x7fffe4,
|
||||
0x1fffdc,
|
||||
0x3fffd8,
|
||||
0x7fffe5,
|
||||
0x3fffd9,
|
||||
0x7fffe6,
|
||||
0x7fffe7,
|
||||
0xffffef,
|
||||
0x3fffda,
|
||||
0x1fffdd,
|
||||
0xfffe9,
|
||||
0x3fffdb,
|
||||
0x3fffdc,
|
||||
0x7fffe8,
|
||||
0x7fffe9,
|
||||
0x1fffde,
|
||||
0x7fffea,
|
||||
0x3fffdd,
|
||||
0x3fffde,
|
||||
0xfffff0,
|
||||
0x1fffdf,
|
||||
0x3fffdf,
|
||||
0x7fffeb,
|
||||
0x7fffec,
|
||||
0x1fffe0,
|
||||
0x1fffe1,
|
||||
0x3fffe0,
|
||||
0x1fffe2,
|
||||
0x7fffed,
|
||||
0x3fffe1,
|
||||
0x7fffee,
|
||||
0x7fffef,
|
||||
0xfffea,
|
||||
0x3fffe2,
|
||||
0x3fffe3,
|
||||
0x3fffe4,
|
||||
0x7ffff0,
|
||||
0x3fffe5,
|
||||
0x3fffe6,
|
||||
0x7ffff1,
|
||||
0x3ffffe0,
|
||||
0x3ffffe1,
|
||||
0xfffeb,
|
||||
0x7fff1,
|
||||
0x3fffe7,
|
||||
0x7ffff2,
|
||||
0x3fffe8,
|
||||
0x1ffffec,
|
||||
0x3ffffe2,
|
||||
0x3ffffe3,
|
||||
0x3ffffe4,
|
||||
0x7ffffde,
|
||||
0x7ffffdf,
|
||||
0x3ffffe5,
|
||||
0xfffff1,
|
||||
0x1ffffed,
|
||||
0x7fff2,
|
||||
0x1fffe3,
|
||||
0x3ffffe6,
|
||||
0x7ffffe0,
|
||||
0x7ffffe1,
|
||||
0x3ffffe7,
|
||||
0x7ffffe2,
|
||||
0xfffff2,
|
||||
0x1fffe4,
|
||||
0x1fffe5,
|
||||
0x3ffffe8,
|
||||
0x3ffffe9,
|
||||
0xffffffd,
|
||||
0x7ffffe3,
|
||||
0x7ffffe4,
|
||||
0x7ffffe5,
|
||||
0xfffec,
|
||||
0xfffff3,
|
||||
0xfffed,
|
||||
0x1fffe6,
|
||||
0x3fffe9,
|
||||
0x1fffe7,
|
||||
0x1fffe8,
|
||||
0x7ffff3,
|
||||
0x3fffea,
|
||||
0x3fffeb,
|
||||
0x1ffffee,
|
||||
0x1ffffef,
|
||||
0xfffff4,
|
||||
0xfffff5,
|
||||
0x3ffffea,
|
||||
0x7ffff4,
|
||||
0x3ffffeb,
|
||||
0x7ffffe6,
|
||||
0x3ffffec,
|
||||
0x3ffffed,
|
||||
0x7ffffe7,
|
||||
0x7ffffe8,
|
||||
0x7ffffe9,
|
||||
0x7ffffea,
|
||||
0x7ffffeb,
|
||||
0xffffffe,
|
||||
0x7ffffec,
|
||||
0x7ffffed,
|
||||
0x7ffffee,
|
||||
0x7ffffef,
|
||||
0x7fffff0,
|
||||
0x3ffffee,
|
||||
}
|
||||
|
||||
var huffmanCodeLen = []uint8{
|
||||
13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
|
||||
28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
|
||||
6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
|
||||
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
|
||||
13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
||||
7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
|
||||
15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
|
||||
6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
|
||||
20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
|
||||
24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
|
||||
22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
|
||||
21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
|
||||
26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
|
||||
19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
|
||||
20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
|
||||
26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
|
||||
}
|
249
vendor/src/golang.org/x/net/http2/http2.go
vendored
Normal file
249
vendor/src/golang.org/x/net/http2/http2.go
vendored
Normal file
|
@ -0,0 +1,249 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
|
||||
// Package http2 implements the HTTP/2 protocol.
|
||||
//
|
||||
// This is a work in progress. This package is low-level and intended
|
||||
// to be used directly by very few people. Most users will use it
|
||||
// indirectly through integration with the net/http package. See
|
||||
// ConfigureServer. That ConfigureServer call will likely be automatic
|
||||
// or available via an empty import in the future.
|
||||
//
|
||||
// See http://http2.github.io/
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var VerboseLogs = false
|
||||
|
||||
const (
|
||||
// ClientPreface is the string that must be sent by new
|
||||
// connections from clients.
|
||||
ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
|
||||
|
||||
// SETTINGS_MAX_FRAME_SIZE default
|
||||
// http://http2.github.io/http2-spec/#rfc.section.6.5.2
|
||||
initialMaxFrameSize = 16384
|
||||
|
||||
// NextProtoTLS is the NPN/ALPN protocol negotiated during
|
||||
// HTTP/2's TLS setup.
|
||||
NextProtoTLS = "h2"
|
||||
|
||||
// http://http2.github.io/http2-spec/#SettingValues
|
||||
initialHeaderTableSize = 4096
|
||||
|
||||
initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
|
||||
|
||||
defaultMaxReadFrameSize = 1 << 20
|
||||
)
|
||||
|
||||
var (
|
||||
clientPreface = []byte(ClientPreface)
|
||||
)
|
||||
|
||||
type streamState int
|
||||
|
||||
const (
|
||||
stateIdle streamState = iota
|
||||
stateOpen
|
||||
stateHalfClosedLocal
|
||||
stateHalfClosedRemote
|
||||
stateResvLocal
|
||||
stateResvRemote
|
||||
stateClosed
|
||||
)
|
||||
|
||||
var stateName = [...]string{
|
||||
stateIdle: "Idle",
|
||||
stateOpen: "Open",
|
||||
stateHalfClosedLocal: "HalfClosedLocal",
|
||||
stateHalfClosedRemote: "HalfClosedRemote",
|
||||
stateResvLocal: "ResvLocal",
|
||||
stateResvRemote: "ResvRemote",
|
||||
stateClosed: "Closed",
|
||||
}
|
||||
|
||||
func (st streamState) String() string {
|
||||
return stateName[st]
|
||||
}
|
||||
|
||||
// Setting is a setting parameter: which setting it is, and its value.
|
||||
type Setting struct {
|
||||
// ID is which setting is being set.
|
||||
// See http://http2.github.io/http2-spec/#SettingValues
|
||||
ID SettingID
|
||||
|
||||
// Val is the value.
|
||||
Val uint32
|
||||
}
|
||||
|
||||
func (s Setting) String() string {
|
||||
return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
|
||||
}
|
||||
|
||||
// Valid reports whether the setting is valid.
|
||||
func (s Setting) Valid() error {
|
||||
// Limits and error codes from 6.5.2 Defined SETTINGS Parameters
|
||||
switch s.ID {
|
||||
case SettingEnablePush:
|
||||
if s.Val != 1 && s.Val != 0 {
|
||||
return ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
case SettingInitialWindowSize:
|
||||
if s.Val > 1<<31-1 {
|
||||
return ConnectionError(ErrCodeFlowControl)
|
||||
}
|
||||
case SettingMaxFrameSize:
|
||||
if s.Val < 16384 || s.Val > 1<<24-1 {
|
||||
return ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A SettingID is an HTTP/2 setting as defined in
|
||||
// http://http2.github.io/http2-spec/#iana-settings
|
||||
type SettingID uint16
|
||||
|
||||
const (
|
||||
SettingHeaderTableSize SettingID = 0x1
|
||||
SettingEnablePush SettingID = 0x2
|
||||
SettingMaxConcurrentStreams SettingID = 0x3
|
||||
SettingInitialWindowSize SettingID = 0x4
|
||||
SettingMaxFrameSize SettingID = 0x5
|
||||
SettingMaxHeaderListSize SettingID = 0x6
|
||||
)
|
||||
|
||||
var settingName = map[SettingID]string{
|
||||
SettingHeaderTableSize: "HEADER_TABLE_SIZE",
|
||||
SettingEnablePush: "ENABLE_PUSH",
|
||||
SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
|
||||
SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
|
||||
SettingMaxFrameSize: "MAX_FRAME_SIZE",
|
||||
SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
|
||||
}
|
||||
|
||||
func (s SettingID) String() string {
|
||||
if v, ok := settingName[s]; ok {
|
||||
return v
|
||||
}
|
||||
return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
|
||||
}
|
||||
|
||||
func validHeader(v string) bool {
|
||||
if len(v) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, r := range v {
|
||||
// "Just as in HTTP/1.x, header field names are
|
||||
// strings of ASCII characters that are compared in a
|
||||
// case-insensitive fashion. However, header field
|
||||
// names MUST be converted to lowercase prior to their
|
||||
// encoding in HTTP/2. "
|
||||
if r >= 127 || ('A' <= r && r <= 'Z') {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n)
|
||||
|
||||
func init() {
|
||||
for i := 100; i <= 999; i++ {
|
||||
if v := http.StatusText(i); v != "" {
|
||||
httpCodeStringCommon[i] = strconv.Itoa(i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func httpCodeString(code int) string {
|
||||
if s, ok := httpCodeStringCommon[code]; ok {
|
||||
return s
|
||||
}
|
||||
return strconv.Itoa(code)
|
||||
}
|
||||
|
||||
// from pkg io
|
||||
type stringWriter interface {
|
||||
WriteString(s string) (n int, err error)
|
||||
}
|
||||
|
||||
// A gate lets two goroutines coordinate their activities.
|
||||
type gate chan struct{}
|
||||
|
||||
func (g gate) Done() { g <- struct{}{} }
|
||||
func (g gate) Wait() { <-g }
|
||||
|
||||
// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
|
||||
type closeWaiter chan struct{}
|
||||
|
||||
// Init makes a closeWaiter usable.
|
||||
// It exists because so a closeWaiter value can be placed inside a
|
||||
// larger struct and have the Mutex and Cond's memory in the same
|
||||
// allocation.
|
||||
func (cw *closeWaiter) Init() {
|
||||
*cw = make(chan struct{})
|
||||
}
|
||||
|
||||
// Close marks the closeWaiter as closed and unblocks any waiters.
|
||||
func (cw closeWaiter) Close() {
|
||||
close(cw)
|
||||
}
|
||||
|
||||
// Wait waits for the closeWaiter to become closed.
|
||||
func (cw closeWaiter) Wait() {
|
||||
<-cw
|
||||
}
|
||||
|
||||
// bufferedWriter is a buffered writer that writes to w.
|
||||
// Its buffered writer is lazily allocated as needed, to minimize
|
||||
// idle memory usage with many connections.
|
||||
type bufferedWriter struct {
|
||||
w io.Writer // immutable
|
||||
bw *bufio.Writer // non-nil when data is buffered
|
||||
}
|
||||
|
||||
func newBufferedWriter(w io.Writer) *bufferedWriter {
|
||||
return &bufferedWriter{w: w}
|
||||
}
|
||||
|
||||
var bufWriterPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
// TODO: pick something better? this is a bit under
|
||||
// (3 x typical 1500 byte MTU) at least.
|
||||
return bufio.NewWriterSize(nil, 4<<10)
|
||||
},
|
||||
}
|
||||
|
||||
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
|
||||
if w.bw == nil {
|
||||
bw := bufWriterPool.Get().(*bufio.Writer)
|
||||
bw.Reset(w.w)
|
||||
w.bw = bw
|
||||
}
|
||||
return w.bw.Write(p)
|
||||
}
|
||||
|
||||
func (w *bufferedWriter) Flush() error {
|
||||
bw := w.bw
|
||||
if bw == nil {
|
||||
return nil
|
||||
}
|
||||
err := bw.Flush()
|
||||
bw.Reset(nil)
|
||||
bufWriterPool.Put(bw)
|
||||
w.bw = nil
|
||||
return err
|
||||
}
|
43
vendor/src/golang.org/x/net/http2/pipe.go
vendored
Normal file
43
vendor/src/golang.org/x/net/http2/pipe.go
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
// Copyright 2014 The Go Authors.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type pipe struct {
|
||||
b buffer
|
||||
c sync.Cond
|
||||
m sync.Mutex
|
||||
}
|
||||
|
||||
// Read waits until data is available and copies bytes
|
||||
// from the buffer into p.
|
||||
func (r *pipe) Read(p []byte) (n int, err error) {
|
||||
r.c.L.Lock()
|
||||
defer r.c.L.Unlock()
|
||||
for r.b.Len() == 0 && !r.b.closed {
|
||||
r.c.Wait()
|
||||
}
|
||||
return r.b.Read(p)
|
||||
}
|
||||
|
||||
// Write copies bytes from p into the buffer and wakes a reader.
|
||||
// It is an error to write more data than the buffer can hold.
|
||||
func (w *pipe) Write(p []byte) (n int, err error) {
|
||||
w.c.L.Lock()
|
||||
defer w.c.L.Unlock()
|
||||
defer w.c.Signal()
|
||||
return w.b.Write(p)
|
||||
}
|
||||
|
||||
func (c *pipe) Close(err error) {
|
||||
c.c.L.Lock()
|
||||
defer c.c.L.Unlock()
|
||||
defer c.c.Signal()
|
||||
c.b.Close(err)
|
||||
}
|
1780
vendor/src/golang.org/x/net/http2/server.go
vendored
Normal file
1780
vendor/src/golang.org/x/net/http2/server.go
vendored
Normal file
File diff suppressed because it is too large
Load diff
553
vendor/src/golang.org/x/net/http2/transport.go
vendored
Normal file
553
vendor/src/golang.org/x/net/http2/transport.go
vendored
Normal file
|
@ -0,0 +1,553 @@
|
|||
// Copyright 2015 The Go Authors.
|
||||
// See https://go.googlesource.com/go/+/master/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://go.googlesource.com/go/+/master/LICENSE
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/http2/hpack"
|
||||
)
|
||||
|
||||
type Transport struct {
|
||||
Fallback http.RoundTripper
|
||||
|
||||
// TODO: remove this and make more general with a TLS dial hook, like http
|
||||
InsecureTLSDial bool
|
||||
|
||||
connMu sync.Mutex
|
||||
conns map[string][]*clientConn // key is host:port
|
||||
}
|
||||
|
||||
type clientConn struct {
|
||||
t *Transport
|
||||
tconn *tls.Conn
|
||||
tlsState *tls.ConnectionState
|
||||
connKey []string // key(s) this connection is cached in, in t.conns
|
||||
|
||||
readerDone chan struct{} // closed on error
|
||||
readerErr error // set before readerDone is closed
|
||||
hdec *hpack.Decoder
|
||||
nextRes *http.Response
|
||||
|
||||
mu sync.Mutex
|
||||
closed bool
|
||||
goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
|
||||
streams map[uint32]*clientStream
|
||||
nextStreamID uint32
|
||||
bw *bufio.Writer
|
||||
werr error // first write error that has occurred
|
||||
br *bufio.Reader
|
||||
fr *Framer
|
||||
// Settings from peer:
|
||||
maxFrameSize uint32
|
||||
maxConcurrentStreams uint32
|
||||
initialWindowSize uint32
|
||||
hbuf bytes.Buffer // HPACK encoder writes into this
|
||||
henc *hpack.Encoder
|
||||
}
|
||||
|
||||
type clientStream struct {
|
||||
ID uint32
|
||||
resc chan resAndError
|
||||
pw *io.PipeWriter
|
||||
pr *io.PipeReader
|
||||
}
|
||||
|
||||
type stickyErrWriter struct {
|
||||
w io.Writer
|
||||
err *error
|
||||
}
|
||||
|
||||
func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
|
||||
if *sew.err != nil {
|
||||
return 0, *sew.err
|
||||
}
|
||||
n, err = sew.w.Write(p)
|
||||
*sew.err = err
|
||||
return
|
||||
}
|
||||
|
||||
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
if req.URL.Scheme != "https" {
|
||||
if t.Fallback == nil {
|
||||
return nil, errors.New("http2: unsupported scheme and no Fallback")
|
||||
}
|
||||
return t.Fallback.RoundTrip(req)
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(req.URL.Host)
|
||||
if err != nil {
|
||||
host = req.URL.Host
|
||||
port = "443"
|
||||
}
|
||||
|
||||
for {
|
||||
cc, err := t.getClientConn(host, port)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res, err := cc.roundTrip(req)
|
||||
if shouldRetryRequest(err) { // TODO: or clientconn is overloaded (too many outstanding requests)?
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
}
|
||||
|
||||
// CloseIdleConnections closes any connections which were previously
|
||||
// connected from previous requests but are now sitting idle.
|
||||
// It does not interrupt any connections currently in use.
|
||||
func (t *Transport) CloseIdleConnections() {
|
||||
t.connMu.Lock()
|
||||
defer t.connMu.Unlock()
|
||||
for _, vv := range t.conns {
|
||||
for _, cc := range vv {
|
||||
cc.closeIfIdle()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var errClientConnClosed = errors.New("http2: client conn is closed")
|
||||
|
||||
func shouldRetryRequest(err error) bool {
|
||||
// TODO: or GOAWAY graceful shutdown stuff
|
||||
return err == errClientConnClosed
|
||||
}
|
||||
|
||||
func (t *Transport) removeClientConn(cc *clientConn) {
|
||||
t.connMu.Lock()
|
||||
defer t.connMu.Unlock()
|
||||
for _, key := range cc.connKey {
|
||||
vv, ok := t.conns[key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
newList := filterOutClientConn(vv, cc)
|
||||
if len(newList) > 0 {
|
||||
t.conns[key] = newList
|
||||
} else {
|
||||
delete(t.conns, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func filterOutClientConn(in []*clientConn, exclude *clientConn) []*clientConn {
|
||||
out := in[:0]
|
||||
for _, v := range in {
|
||||
if v != exclude {
|
||||
out = append(out, v)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (t *Transport) getClientConn(host, port string) (*clientConn, error) {
|
||||
t.connMu.Lock()
|
||||
defer t.connMu.Unlock()
|
||||
|
||||
key := net.JoinHostPort(host, port)
|
||||
|
||||
for _, cc := range t.conns[key] {
|
||||
if cc.canTakeNewRequest() {
|
||||
return cc, nil
|
||||
}
|
||||
}
|
||||
if t.conns == nil {
|
||||
t.conns = make(map[string][]*clientConn)
|
||||
}
|
||||
cc, err := t.newClientConn(host, port, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.conns[key] = append(t.conns[key], cc)
|
||||
return cc, nil
|
||||
}
|
||||
|
||||
func (t *Transport) newClientConn(host, port, key string) (*clientConn, error) {
|
||||
cfg := &tls.Config{
|
||||
ServerName: host,
|
||||
NextProtos: []string{NextProtoTLS},
|
||||
InsecureSkipVerify: t.InsecureTLSDial,
|
||||
}
|
||||
tconn, err := tls.Dial("tcp", net.JoinHostPort(host, port), cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := tconn.Handshake(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !t.InsecureTLSDial {
|
||||
if err := tconn.VerifyHostname(cfg.ServerName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
state := tconn.ConnectionState()
|
||||
if p := state.NegotiatedProtocol; p != NextProtoTLS {
|
||||
// TODO(bradfitz): fall back to Fallback
|
||||
return nil, fmt.Errorf("bad protocol: %v", p)
|
||||
}
|
||||
if !state.NegotiatedProtocolIsMutual {
|
||||
return nil, errors.New("could not negotiate protocol mutually")
|
||||
}
|
||||
if _, err := tconn.Write(clientPreface); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cc := &clientConn{
|
||||
t: t,
|
||||
tconn: tconn,
|
||||
connKey: []string{key}, // TODO: cert's validated hostnames too
|
||||
tlsState: &state,
|
||||
readerDone: make(chan struct{}),
|
||||
nextStreamID: 1,
|
||||
maxFrameSize: 16 << 10, // spec default
|
||||
initialWindowSize: 65535, // spec default
|
||||
maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
|
||||
streams: make(map[uint32]*clientStream),
|
||||
}
|
||||
cc.bw = bufio.NewWriter(stickyErrWriter{tconn, &cc.werr})
|
||||
cc.br = bufio.NewReader(tconn)
|
||||
cc.fr = NewFramer(cc.bw, cc.br)
|
||||
cc.henc = hpack.NewEncoder(&cc.hbuf)
|
||||
|
||||
cc.fr.WriteSettings()
|
||||
// TODO: re-send more conn-level flow control tokens when server uses all these.
|
||||
cc.fr.WriteWindowUpdate(0, 1<<30) // um, 0x7fffffff doesn't work to Google? it hangs?
|
||||
cc.bw.Flush()
|
||||
if cc.werr != nil {
|
||||
return nil, cc.werr
|
||||
}
|
||||
|
||||
// Read the obligatory SETTINGS frame
|
||||
f, err := cc.fr.ReadFrame()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sf, ok := f.(*SettingsFrame)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected settings frame, got: %T", f)
|
||||
}
|
||||
cc.fr.WriteSettingsAck()
|
||||
cc.bw.Flush()
|
||||
|
||||
sf.ForeachSetting(func(s Setting) error {
|
||||
switch s.ID {
|
||||
case SettingMaxFrameSize:
|
||||
cc.maxFrameSize = s.Val
|
||||
case SettingMaxConcurrentStreams:
|
||||
cc.maxConcurrentStreams = s.Val
|
||||
case SettingInitialWindowSize:
|
||||
cc.initialWindowSize = s.Val
|
||||
default:
|
||||
// TODO(bradfitz): handle more
|
||||
log.Printf("Unhandled Setting: %v", s)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
// TODO: figure out henc size
|
||||
cc.hdec = hpack.NewDecoder(initialHeaderTableSize, cc.onNewHeaderField)
|
||||
|
||||
go cc.readLoop()
|
||||
return cc, nil
|
||||
}
|
||||
|
||||
func (cc *clientConn) setGoAway(f *GoAwayFrame) {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
cc.goAway = f
|
||||
}
|
||||
|
||||
func (cc *clientConn) canTakeNewRequest() bool {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
return cc.goAway == nil &&
|
||||
int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) &&
|
||||
cc.nextStreamID < 2147483647
|
||||
}
|
||||
|
||||
func (cc *clientConn) closeIfIdle() {
|
||||
cc.mu.Lock()
|
||||
if len(cc.streams) > 0 {
|
||||
cc.mu.Unlock()
|
||||
return
|
||||
}
|
||||
cc.closed = true
|
||||
// TODO: do clients send GOAWAY too? maybe? Just Close:
|
||||
cc.mu.Unlock()
|
||||
|
||||
cc.tconn.Close()
|
||||
}
|
||||
|
||||
func (cc *clientConn) roundTrip(req *http.Request) (*http.Response, error) {
|
||||
cc.mu.Lock()
|
||||
|
||||
if cc.closed {
|
||||
cc.mu.Unlock()
|
||||
return nil, errClientConnClosed
|
||||
}
|
||||
|
||||
cs := cc.newStream()
|
||||
hasBody := false // TODO
|
||||
|
||||
// we send: HEADERS[+CONTINUATION] + (DATA?)
|
||||
hdrs := cc.encodeHeaders(req)
|
||||
first := true
|
||||
for len(hdrs) > 0 {
|
||||
chunk := hdrs
|
||||
if len(chunk) > int(cc.maxFrameSize) {
|
||||
chunk = chunk[:cc.maxFrameSize]
|
||||
}
|
||||
hdrs = hdrs[len(chunk):]
|
||||
endHeaders := len(hdrs) == 0
|
||||
if first {
|
||||
cc.fr.WriteHeaders(HeadersFrameParam{
|
||||
StreamID: cs.ID,
|
||||
BlockFragment: chunk,
|
||||
EndStream: !hasBody,
|
||||
EndHeaders: endHeaders,
|
||||
})
|
||||
first = false
|
||||
} else {
|
||||
cc.fr.WriteContinuation(cs.ID, endHeaders, chunk)
|
||||
}
|
||||
}
|
||||
cc.bw.Flush()
|
||||
werr := cc.werr
|
||||
cc.mu.Unlock()
|
||||
|
||||
if hasBody {
|
||||
// TODO: write data. and it should probably be interleaved:
|
||||
// go ... io.Copy(dataFrameWriter{cc, cs, ...}, req.Body) ... etc
|
||||
}
|
||||
|
||||
if werr != nil {
|
||||
return nil, werr
|
||||
}
|
||||
|
||||
re := <-cs.resc
|
||||
if re.err != nil {
|
||||
return nil, re.err
|
||||
}
|
||||
res := re.res
|
||||
res.Request = req
|
||||
res.TLS = cc.tlsState
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// requires cc.mu be held.
|
||||
func (cc *clientConn) encodeHeaders(req *http.Request) []byte {
|
||||
cc.hbuf.Reset()
|
||||
|
||||
// TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go
|
||||
host := req.Host
|
||||
if host == "" {
|
||||
host = req.URL.Host
|
||||
}
|
||||
|
||||
path := req.URL.Path
|
||||
if path == "" {
|
||||
path = "/"
|
||||
}
|
||||
|
||||
cc.writeHeader(":authority", host) // probably not right for all sites
|
||||
cc.writeHeader(":method", req.Method)
|
||||
cc.writeHeader(":path", path)
|
||||
cc.writeHeader(":scheme", "https")
|
||||
|
||||
for k, vv := range req.Header {
|
||||
lowKey := strings.ToLower(k)
|
||||
if lowKey == "host" {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
cc.writeHeader(lowKey, v)
|
||||
}
|
||||
}
|
||||
return cc.hbuf.Bytes()
|
||||
}
|
||||
|
||||
func (cc *clientConn) writeHeader(name, value string) {
|
||||
log.Printf("sending %q = %q", name, value)
|
||||
cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
|
||||
}
|
||||
|
||||
type resAndError struct {
|
||||
res *http.Response
|
||||
err error
|
||||
}
|
||||
|
||||
// requires cc.mu be held.
|
||||
func (cc *clientConn) newStream() *clientStream {
|
||||
cs := &clientStream{
|
||||
ID: cc.nextStreamID,
|
||||
resc: make(chan resAndError, 1),
|
||||
}
|
||||
cc.nextStreamID += 2
|
||||
cc.streams[cs.ID] = cs
|
||||
return cs
|
||||
}
|
||||
|
||||
func (cc *clientConn) streamByID(id uint32, andRemove bool) *clientStream {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
cs := cc.streams[id]
|
||||
if andRemove {
|
||||
delete(cc.streams, id)
|
||||
}
|
||||
return cs
|
||||
}
|
||||
|
||||
// runs in its own goroutine.
|
||||
func (cc *clientConn) readLoop() {
|
||||
defer cc.t.removeClientConn(cc)
|
||||
defer close(cc.readerDone)
|
||||
|
||||
activeRes := map[uint32]*clientStream{} // keyed by streamID
|
||||
// Close any response bodies if the server closes prematurely.
|
||||
// TODO: also do this if we've written the headers but not
|
||||
// gotten a response yet.
|
||||
defer func() {
|
||||
err := cc.readerErr
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
for _, cs := range activeRes {
|
||||
cs.pw.CloseWithError(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// continueStreamID is the stream ID we're waiting for
|
||||
// continuation frames for.
|
||||
var continueStreamID uint32
|
||||
|
||||
for {
|
||||
f, err := cc.fr.ReadFrame()
|
||||
if err != nil {
|
||||
cc.readerErr = err
|
||||
return
|
||||
}
|
||||
log.Printf("Transport received %v: %#v", f.Header(), f)
|
||||
|
||||
streamID := f.Header().StreamID
|
||||
|
||||
_, isContinue := f.(*ContinuationFrame)
|
||||
if isContinue {
|
||||
if streamID != continueStreamID {
|
||||
log.Printf("Protocol violation: got CONTINUATION with id %d; want %d", streamID, continueStreamID)
|
||||
cc.readerErr = ConnectionError(ErrCodeProtocol)
|
||||
return
|
||||
}
|
||||
} else if continueStreamID != 0 {
|
||||
// Continue frames need to be adjacent in the stream
|
||||
// and we were in the middle of headers.
|
||||
log.Printf("Protocol violation: got %T for stream %d, want CONTINUATION for %d", f, streamID, continueStreamID)
|
||||
cc.readerErr = ConnectionError(ErrCodeProtocol)
|
||||
return
|
||||
}
|
||||
|
||||
if streamID%2 == 0 {
|
||||
// Ignore streams pushed from the server for now.
|
||||
// These always have an even stream id.
|
||||
continue
|
||||
}
|
||||
streamEnded := false
|
||||
if ff, ok := f.(streamEnder); ok {
|
||||
streamEnded = ff.StreamEnded()
|
||||
}
|
||||
|
||||
cs := cc.streamByID(streamID, streamEnded)
|
||||
if cs == nil {
|
||||
log.Printf("Received frame for untracked stream ID %d", streamID)
|
||||
continue
|
||||
}
|
||||
|
||||
switch f := f.(type) {
|
||||
case *HeadersFrame:
|
||||
cc.nextRes = &http.Response{
|
||||
Proto: "HTTP/2.0",
|
||||
ProtoMajor: 2,
|
||||
Header: make(http.Header),
|
||||
}
|
||||
cs.pr, cs.pw = io.Pipe()
|
||||
cc.hdec.Write(f.HeaderBlockFragment())
|
||||
case *ContinuationFrame:
|
||||
cc.hdec.Write(f.HeaderBlockFragment())
|
||||
case *DataFrame:
|
||||
log.Printf("DATA: %q", f.Data())
|
||||
cs.pw.Write(f.Data())
|
||||
case *GoAwayFrame:
|
||||
cc.t.removeClientConn(cc)
|
||||
if f.ErrCode != 0 {
|
||||
// TODO: deal with GOAWAY more. particularly the error code
|
||||
log.Printf("transport got GOAWAY with error code = %v", f.ErrCode)
|
||||
}
|
||||
cc.setGoAway(f)
|
||||
default:
|
||||
log.Printf("Transport: unhandled response frame type %T", f)
|
||||
}
|
||||
headersEnded := false
|
||||
if he, ok := f.(headersEnder); ok {
|
||||
headersEnded = he.HeadersEnded()
|
||||
if headersEnded {
|
||||
continueStreamID = 0
|
||||
} else {
|
||||
continueStreamID = streamID
|
||||
}
|
||||
}
|
||||
|
||||
if streamEnded {
|
||||
cs.pw.Close()
|
||||
delete(activeRes, streamID)
|
||||
}
|
||||
if headersEnded {
|
||||
if cs == nil {
|
||||
panic("couldn't find stream") // TODO be graceful
|
||||
}
|
||||
// TODO: set the Body to one which notes the
|
||||
// Close and also sends the server a
|
||||
// RST_STREAM
|
||||
cc.nextRes.Body = cs.pr
|
||||
res := cc.nextRes
|
||||
activeRes[streamID] = cs
|
||||
cs.resc <- resAndError{res: res}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cc *clientConn) onNewHeaderField(f hpack.HeaderField) {
|
||||
// TODO: verifiy pseudo headers come before non-pseudo headers
|
||||
// TODO: verifiy the status is set
|
||||
log.Printf("Header field: %+v", f)
|
||||
if f.Name == ":status" {
|
||||
code, err := strconv.Atoi(f.Value)
|
||||
if err != nil {
|
||||
panic("TODO: be graceful")
|
||||
}
|
||||
cc.nextRes.Status = f.Value + " " + http.StatusText(code)
|
||||
cc.nextRes.StatusCode = code
|
||||
return
|
||||
}
|
||||
if strings.HasPrefix(f.Name, ":") {
|
||||
// "Endpoints MUST NOT generate pseudo-header fields other than those defined in this document."
|
||||
// TODO: treat as invalid?
|
||||
return
|
||||
}
|
||||
cc.nextRes.Header.Add(http.CanonicalHeaderKey(f.Name), f.Value)
|
||||
}
|
204
vendor/src/golang.org/x/net/http2/write.go
vendored
Normal file
204
vendor/src/golang.org/x/net/http2/write.go
vendored
Normal file
|
@ -0,0 +1,204 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/http2/hpack"
|
||||
)
|
||||
|
||||
// writeFramer is implemented by any type that is used to write frames.
|
||||
type writeFramer interface {
|
||||
writeFrame(writeContext) error
|
||||
}
|
||||
|
||||
// writeContext is the interface needed by the various frame writer
|
||||
// types below. All the writeFrame methods below are scheduled via the
|
||||
// frame writing scheduler (see writeScheduler in writesched.go).
|
||||
//
|
||||
// This interface is implemented by *serverConn.
|
||||
// TODO: use it from the client code too, once it exists.
|
||||
type writeContext interface {
|
||||
Framer() *Framer
|
||||
Flush() error
|
||||
CloseConn() error
|
||||
// HeaderEncoder returns an HPACK encoder that writes to the
|
||||
// returned buffer.
|
||||
HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
|
||||
}
|
||||
|
||||
// endsStream reports whether the given frame writer w will locally
|
||||
// close the stream.
|
||||
func endsStream(w writeFramer) bool {
|
||||
switch v := w.(type) {
|
||||
case *writeData:
|
||||
return v.endStream
|
||||
case *writeResHeaders:
|
||||
return v.endStream
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type flushFrameWriter struct{}
|
||||
|
||||
func (flushFrameWriter) writeFrame(ctx writeContext) error {
|
||||
return ctx.Flush()
|
||||
}
|
||||
|
||||
type writeSettings []Setting
|
||||
|
||||
func (s writeSettings) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteSettings([]Setting(s)...)
|
||||
}
|
||||
|
||||
type writeGoAway struct {
|
||||
maxStreamID uint32
|
||||
code ErrCode
|
||||
}
|
||||
|
||||
func (p *writeGoAway) writeFrame(ctx writeContext) error {
|
||||
err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
|
||||
if p.code != 0 {
|
||||
ctx.Flush() // ignore error: we're hanging up on them anyway
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
ctx.CloseConn()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type writeData struct {
|
||||
streamID uint32
|
||||
p []byte
|
||||
endStream bool
|
||||
}
|
||||
|
||||
func (w *writeData) String() string {
|
||||
return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
|
||||
}
|
||||
|
||||
func (w *writeData) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
|
||||
}
|
||||
|
||||
func (se StreamError) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
|
||||
}
|
||||
|
||||
type writePingAck struct{ pf *PingFrame }
|
||||
|
||||
func (w writePingAck) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WritePing(true, w.pf.Data)
|
||||
}
|
||||
|
||||
type writeSettingsAck struct{}
|
||||
|
||||
func (writeSettingsAck) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteSettingsAck()
|
||||
}
|
||||
|
||||
// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
|
||||
// for HTTP response headers from a server handler.
|
||||
type writeResHeaders struct {
|
||||
streamID uint32
|
||||
httpResCode int
|
||||
h http.Header // may be nil
|
||||
endStream bool
|
||||
|
||||
contentType string
|
||||
contentLength string
|
||||
}
|
||||
|
||||
func (w *writeResHeaders) writeFrame(ctx writeContext) error {
|
||||
enc, buf := ctx.HeaderEncoder()
|
||||
buf.Reset()
|
||||
enc.WriteField(hpack.HeaderField{Name: ":status", Value: httpCodeString(w.httpResCode)})
|
||||
for k, vv := range w.h {
|
||||
k = lowerHeader(k)
|
||||
for _, v := range vv {
|
||||
// TODO: more of "8.1.2.2 Connection-Specific Header Fields"
|
||||
if k == "transfer-encoding" && v != "trailers" {
|
||||
continue
|
||||
}
|
||||
enc.WriteField(hpack.HeaderField{Name: k, Value: v})
|
||||
}
|
||||
}
|
||||
if w.contentType != "" {
|
||||
enc.WriteField(hpack.HeaderField{Name: "content-type", Value: w.contentType})
|
||||
}
|
||||
if w.contentLength != "" {
|
||||
enc.WriteField(hpack.HeaderField{Name: "content-length", Value: w.contentLength})
|
||||
}
|
||||
|
||||
headerBlock := buf.Bytes()
|
||||
if len(headerBlock) == 0 {
|
||||
panic("unexpected empty hpack")
|
||||
}
|
||||
|
||||
// For now we're lazy and just pick the minimum MAX_FRAME_SIZE
|
||||
// that all peers must support (16KB). Later we could care
|
||||
// more and send larger frames if the peer advertised it, but
|
||||
// there's little point. Most headers are small anyway (so we
|
||||
// generally won't have CONTINUATION frames), and extra frames
|
||||
// only waste 9 bytes anyway.
|
||||
const maxFrameSize = 16384
|
||||
|
||||
first := true
|
||||
for len(headerBlock) > 0 {
|
||||
frag := headerBlock
|
||||
if len(frag) > maxFrameSize {
|
||||
frag = frag[:maxFrameSize]
|
||||
}
|
||||
headerBlock = headerBlock[len(frag):]
|
||||
endHeaders := len(headerBlock) == 0
|
||||
var err error
|
||||
if first {
|
||||
first = false
|
||||
err = ctx.Framer().WriteHeaders(HeadersFrameParam{
|
||||
StreamID: w.streamID,
|
||||
BlockFragment: frag,
|
||||
EndStream: w.endStream,
|
||||
EndHeaders: endHeaders,
|
||||
})
|
||||
} else {
|
||||
err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type write100ContinueHeadersFrame struct {
|
||||
streamID uint32
|
||||
}
|
||||
|
||||
func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
|
||||
enc, buf := ctx.HeaderEncoder()
|
||||
buf.Reset()
|
||||
enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"})
|
||||
return ctx.Framer().WriteHeaders(HeadersFrameParam{
|
||||
StreamID: w.streamID,
|
||||
BlockFragment: buf.Bytes(),
|
||||
EndStream: false,
|
||||
EndHeaders: true,
|
||||
})
|
||||
}
|
||||
|
||||
type writeWindowUpdate struct {
|
||||
streamID uint32 // or 0 for conn-level
|
||||
n uint32
|
||||
}
|
||||
|
||||
func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
|
||||
}
|
286
vendor/src/golang.org/x/net/http2/writesched.go
vendored
Normal file
286
vendor/src/golang.org/x/net/http2/writesched.go
vendored
Normal file
|
@ -0,0 +1,286 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
|
||||
package http2
|
||||
|
||||
import "fmt"
|
||||
|
||||
// frameWriteMsg is a request to write a frame.
|
||||
type frameWriteMsg struct {
|
||||
// write is the interface value that does the writing, once the
|
||||
// writeScheduler (below) has decided to select this frame
|
||||
// to write. The write functions are all defined in write.go.
|
||||
write writeFramer
|
||||
|
||||
stream *stream // used for prioritization. nil for non-stream frames.
|
||||
|
||||
// done, if non-nil, must be a buffered channel with space for
|
||||
// 1 message and is sent the return value from write (or an
|
||||
// earlier error) when the frame has been written.
|
||||
done chan error
|
||||
}
|
||||
|
||||
// for debugging only:
|
||||
func (wm frameWriteMsg) String() string {
|
||||
var streamID uint32
|
||||
if wm.stream != nil {
|
||||
streamID = wm.stream.id
|
||||
}
|
||||
var des string
|
||||
if s, ok := wm.write.(fmt.Stringer); ok {
|
||||
des = s.String()
|
||||
} else {
|
||||
des = fmt.Sprintf("%T", wm.write)
|
||||
}
|
||||
return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des)
|
||||
}
|
||||
|
||||
// writeScheduler tracks pending frames to write, priorities, and decides
|
||||
// the next one to use. It is not thread-safe.
|
||||
type writeScheduler struct {
|
||||
// zero are frames not associated with a specific stream.
|
||||
// They're sent before any stream-specific freams.
|
||||
zero writeQueue
|
||||
|
||||
// maxFrameSize is the maximum size of a DATA frame
|
||||
// we'll write. Must be non-zero and between 16K-16M.
|
||||
maxFrameSize uint32
|
||||
|
||||
// sq contains the stream-specific queues, keyed by stream ID.
|
||||
// when a stream is idle, it's deleted from the map.
|
||||
sq map[uint32]*writeQueue
|
||||
|
||||
// canSend is a slice of memory that's reused between frame
|
||||
// scheduling decisions to hold the list of writeQueues (from sq)
|
||||
// which have enough flow control data to send. After canSend is
|
||||
// built, the best is selected.
|
||||
canSend []*writeQueue
|
||||
|
||||
// pool of empty queues for reuse.
|
||||
queuePool []*writeQueue
|
||||
}
|
||||
|
||||
func (ws *writeScheduler) putEmptyQueue(q *writeQueue) {
|
||||
if len(q.s) != 0 {
|
||||
panic("queue must be empty")
|
||||
}
|
||||
ws.queuePool = append(ws.queuePool, q)
|
||||
}
|
||||
|
||||
func (ws *writeScheduler) getEmptyQueue() *writeQueue {
|
||||
ln := len(ws.queuePool)
|
||||
if ln == 0 {
|
||||
return new(writeQueue)
|
||||
}
|
||||
q := ws.queuePool[ln-1]
|
||||
ws.queuePool = ws.queuePool[:ln-1]
|
||||
return q
|
||||
}
|
||||
|
||||
func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 }
|
||||
|
||||
func (ws *writeScheduler) add(wm frameWriteMsg) {
|
||||
st := wm.stream
|
||||
if st == nil {
|
||||
ws.zero.push(wm)
|
||||
} else {
|
||||
ws.streamQueue(st.id).push(wm)
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue {
|
||||
if q, ok := ws.sq[streamID]; ok {
|
||||
return q
|
||||
}
|
||||
if ws.sq == nil {
|
||||
ws.sq = make(map[uint32]*writeQueue)
|
||||
}
|
||||
q := ws.getEmptyQueue()
|
||||
ws.sq[streamID] = q
|
||||
return q
|
||||
}
|
||||
|
||||
// take returns the most important frame to write and removes it from the scheduler.
|
||||
// It is illegal to call this if the scheduler is empty or if there are no connection-level
|
||||
// flow control bytes available.
|
||||
func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) {
|
||||
if ws.maxFrameSize == 0 {
|
||||
panic("internal error: ws.maxFrameSize not initialized or invalid")
|
||||
}
|
||||
|
||||
// If there any frames not associated with streams, prefer those first.
|
||||
// These are usually SETTINGS, etc.
|
||||
if !ws.zero.empty() {
|
||||
return ws.zero.shift(), true
|
||||
}
|
||||
if len(ws.sq) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Next, prioritize frames on streams that aren't DATA frames (no cost).
|
||||
for id, q := range ws.sq {
|
||||
if q.firstIsNoCost() {
|
||||
return ws.takeFrom(id, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Now, all that remains are DATA frames with non-zero bytes to
|
||||
// send. So pick the best one.
|
||||
if len(ws.canSend) != 0 {
|
||||
panic("should be empty")
|
||||
}
|
||||
for _, q := range ws.sq {
|
||||
if n := ws.streamWritableBytes(q); n > 0 {
|
||||
ws.canSend = append(ws.canSend, q)
|
||||
}
|
||||
}
|
||||
if len(ws.canSend) == 0 {
|
||||
return
|
||||
}
|
||||
defer ws.zeroCanSend()
|
||||
|
||||
// TODO: find the best queue
|
||||
q := ws.canSend[0]
|
||||
|
||||
return ws.takeFrom(q.streamID(), q)
|
||||
}
|
||||
|
||||
// zeroCanSend is defered from take.
|
||||
func (ws *writeScheduler) zeroCanSend() {
|
||||
for i := range ws.canSend {
|
||||
ws.canSend[i] = nil
|
||||
}
|
||||
ws.canSend = ws.canSend[:0]
|
||||
}
|
||||
|
||||
// streamWritableBytes returns the number of DATA bytes we could write
|
||||
// from the given queue's stream, if this stream/queue were
|
||||
// selected. It is an error to call this if q's head isn't a
|
||||
// *writeData.
|
||||
func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 {
|
||||
wm := q.head()
|
||||
ret := wm.stream.flow.available() // max we can write
|
||||
if ret == 0 {
|
||||
return 0
|
||||
}
|
||||
if int32(ws.maxFrameSize) < ret {
|
||||
ret = int32(ws.maxFrameSize)
|
||||
}
|
||||
if ret == 0 {
|
||||
panic("internal error: ws.maxFrameSize not initialized or invalid")
|
||||
}
|
||||
wd := wm.write.(*writeData)
|
||||
if len(wd.p) < int(ret) {
|
||||
ret = int32(len(wd.p))
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) {
|
||||
wm = q.head()
|
||||
// If the first item in this queue costs flow control tokens
|
||||
// and we don't have enough, write as much as we can.
|
||||
if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 {
|
||||
allowed := wm.stream.flow.available() // max we can write
|
||||
if allowed == 0 {
|
||||
// No quota available. Caller can try the next stream.
|
||||
return frameWriteMsg{}, false
|
||||
}
|
||||
if int32(ws.maxFrameSize) < allowed {
|
||||
allowed = int32(ws.maxFrameSize)
|
||||
}
|
||||
// TODO: further restrict the allowed size, because even if
|
||||
// the peer says it's okay to write 16MB data frames, we might
|
||||
// want to write smaller ones to properly weight competing
|
||||
// streams' priorities.
|
||||
|
||||
if len(wd.p) > int(allowed) {
|
||||
wm.stream.flow.take(allowed)
|
||||
chunk := wd.p[:allowed]
|
||||
wd.p = wd.p[allowed:]
|
||||
// Make up a new write message of a valid size, rather
|
||||
// than shifting one off the queue.
|
||||
return frameWriteMsg{
|
||||
stream: wm.stream,
|
||||
write: &writeData{
|
||||
streamID: wd.streamID,
|
||||
p: chunk,
|
||||
// even if the original had endStream set, there
|
||||
// arebytes remaining because len(wd.p) > allowed,
|
||||
// so we know endStream is false:
|
||||
endStream: false,
|
||||
},
|
||||
// our caller is blocking on the final DATA frame, not
|
||||
// these intermediates, so no need to wait:
|
||||
done: nil,
|
||||
}, true
|
||||
}
|
||||
wm.stream.flow.take(int32(len(wd.p)))
|
||||
}
|
||||
|
||||
q.shift()
|
||||
if q.empty() {
|
||||
ws.putEmptyQueue(q)
|
||||
delete(ws.sq, id)
|
||||
}
|
||||
return wm, true
|
||||
}
|
||||
|
||||
func (ws *writeScheduler) forgetStream(id uint32) {
|
||||
q, ok := ws.sq[id]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
delete(ws.sq, id)
|
||||
|
||||
// But keep it for others later.
|
||||
for i := range q.s {
|
||||
q.s[i] = frameWriteMsg{}
|
||||
}
|
||||
q.s = q.s[:0]
|
||||
ws.putEmptyQueue(q)
|
||||
}
|
||||
|
||||
type writeQueue struct {
|
||||
s []frameWriteMsg
|
||||
}
|
||||
|
||||
// streamID returns the stream ID for a non-empty stream-specific queue.
|
||||
func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id }
|
||||
|
||||
func (q *writeQueue) empty() bool { return len(q.s) == 0 }
|
||||
|
||||
func (q *writeQueue) push(wm frameWriteMsg) {
|
||||
q.s = append(q.s, wm)
|
||||
}
|
||||
|
||||
// head returns the next item that would be removed by shift.
|
||||
func (q *writeQueue) head() frameWriteMsg {
|
||||
if len(q.s) == 0 {
|
||||
panic("invalid use of queue")
|
||||
}
|
||||
return q.s[0]
|
||||
}
|
||||
|
||||
func (q *writeQueue) shift() frameWriteMsg {
|
||||
if len(q.s) == 0 {
|
||||
panic("invalid use of queue")
|
||||
}
|
||||
wm := q.s[0]
|
||||
// TODO: less copy-happy queue.
|
||||
copy(q.s, q.s[1:])
|
||||
q.s[len(q.s)-1] = frameWriteMsg{}
|
||||
q.s = q.s[:len(q.s)-1]
|
||||
return wm
|
||||
}
|
||||
|
||||
func (q *writeQueue) firstIsNoCost() bool {
|
||||
if df, ok := q.s[0].write.(*writeData); ok {
|
||||
return len(df.p) == 0
|
||||
}
|
||||
return true
|
||||
}
|
525
vendor/src/golang.org/x/net/internal/timeseries/timeseries.go
vendored
Normal file
525
vendor/src/golang.org/x/net/internal/timeseries/timeseries.go
vendored
Normal file
|
@ -0,0 +1,525 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package timeseries implements a time series structure for stats collection.
|
||||
package timeseries // import "golang.org/x/net/internal/timeseries"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
timeSeriesNumBuckets = 64
|
||||
minuteHourSeriesNumBuckets = 60
|
||||
)
|
||||
|
||||
var timeSeriesResolutions = []time.Duration{
|
||||
1 * time.Second,
|
||||
10 * time.Second,
|
||||
1 * time.Minute,
|
||||
10 * time.Minute,
|
||||
1 * time.Hour,
|
||||
6 * time.Hour,
|
||||
24 * time.Hour, // 1 day
|
||||
7 * 24 * time.Hour, // 1 week
|
||||
4 * 7 * 24 * time.Hour, // 4 weeks
|
||||
16 * 7 * 24 * time.Hour, // 16 weeks
|
||||
}
|
||||
|
||||
var minuteHourSeriesResolutions = []time.Duration{
|
||||
1 * time.Second,
|
||||
1 * time.Minute,
|
||||
}
|
||||
|
||||
// An Observable is a kind of data that can be aggregated in a time series.
|
||||
type Observable interface {
|
||||
Multiply(ratio float64) // Multiplies the data in self by a given ratio
|
||||
Add(other Observable) // Adds the data from a different observation to self
|
||||
Clear() // Clears the observation so it can be reused.
|
||||
CopyFrom(other Observable) // Copies the contents of a given observation to self
|
||||
}
|
||||
|
||||
// Float attaches the methods of Observable to a float64.
|
||||
type Float float64
|
||||
|
||||
// NewFloat returns a Float.
|
||||
func NewFloat() Observable {
|
||||
f := Float(0)
|
||||
return &f
|
||||
}
|
||||
|
||||
// String returns the float as a string.
|
||||
func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) }
|
||||
|
||||
// Value returns the float's value.
|
||||
func (f *Float) Value() float64 { return float64(*f) }
|
||||
|
||||
func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) }
|
||||
|
||||
func (f *Float) Add(other Observable) {
|
||||
o := other.(*Float)
|
||||
*f += *o
|
||||
}
|
||||
|
||||
func (f *Float) Clear() { *f = 0 }
|
||||
|
||||
func (f *Float) CopyFrom(other Observable) {
|
||||
o := other.(*Float)
|
||||
*f = *o
|
||||
}
|
||||
|
||||
// A Clock tells the current time.
|
||||
type Clock interface {
|
||||
Time() time.Time
|
||||
}
|
||||
|
||||
type defaultClock int
|
||||
|
||||
var defaultClockInstance defaultClock
|
||||
|
||||
func (defaultClock) Time() time.Time { return time.Now() }
|
||||
|
||||
// Information kept per level. Each level consists of a circular list of
|
||||
// observations. The start of the level may be derived from end and the
|
||||
// len(buckets) * sizeInMillis.
|
||||
type tsLevel struct {
|
||||
oldest int // index to oldest bucketed Observable
|
||||
newest int // index to newest bucketed Observable
|
||||
end time.Time // end timestamp for this level
|
||||
size time.Duration // duration of the bucketed Observable
|
||||
buckets []Observable // collections of observations
|
||||
provider func() Observable // used for creating new Observable
|
||||
}
|
||||
|
||||
func (l *tsLevel) Clear() {
|
||||
l.oldest = 0
|
||||
l.newest = len(l.buckets) - 1
|
||||
l.end = time.Time{}
|
||||
for i := range l.buckets {
|
||||
if l.buckets[i] != nil {
|
||||
l.buckets[i].Clear()
|
||||
l.buckets[i] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) {
|
||||
l.size = size
|
||||
l.provider = f
|
||||
l.buckets = make([]Observable, numBuckets)
|
||||
}
|
||||
|
||||
// Keeps a sequence of levels. Each level is responsible for storing data at
|
||||
// a given resolution. For example, the first level stores data at a one
|
||||
// minute resolution while the second level stores data at a one hour
|
||||
// resolution.
|
||||
|
||||
// Each level is represented by a sequence of buckets. Each bucket spans an
|
||||
// interval equal to the resolution of the level. New observations are added
|
||||
// to the last bucket.
|
||||
type timeSeries struct {
|
||||
provider func() Observable // make more Observable
|
||||
numBuckets int // number of buckets in each level
|
||||
levels []*tsLevel // levels of bucketed Observable
|
||||
lastAdd time.Time // time of last Observable tracked
|
||||
total Observable // convenient aggregation of all Observable
|
||||
clock Clock // Clock for getting current time
|
||||
pending Observable // observations not yet bucketed
|
||||
pendingTime time.Time // what time are we keeping in pending
|
||||
dirty bool // if there are pending observations
|
||||
}
|
||||
|
||||
// init initializes a level according to the supplied criteria.
|
||||
func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) {
|
||||
ts.provider = f
|
||||
ts.numBuckets = numBuckets
|
||||
ts.clock = clock
|
||||
ts.levels = make([]*tsLevel, len(resolutions))
|
||||
|
||||
for i := range resolutions {
|
||||
if i > 0 && resolutions[i-1] >= resolutions[i] {
|
||||
log.Print("timeseries: resolutions must be monotonically increasing")
|
||||
break
|
||||
}
|
||||
newLevel := new(tsLevel)
|
||||
newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider)
|
||||
ts.levels[i] = newLevel
|
||||
}
|
||||
|
||||
ts.Clear()
|
||||
}
|
||||
|
||||
// Clear removes all observations from the time series.
|
||||
func (ts *timeSeries) Clear() {
|
||||
ts.lastAdd = time.Time{}
|
||||
ts.total = ts.resetObservation(ts.total)
|
||||
ts.pending = ts.resetObservation(ts.pending)
|
||||
ts.pendingTime = time.Time{}
|
||||
ts.dirty = false
|
||||
|
||||
for i := range ts.levels {
|
||||
ts.levels[i].Clear()
|
||||
}
|
||||
}
|
||||
|
||||
// Add records an observation at the current time.
|
||||
func (ts *timeSeries) Add(observation Observable) {
|
||||
ts.AddWithTime(observation, ts.clock.Time())
|
||||
}
|
||||
|
||||
// AddWithTime records an observation at the specified time.
|
||||
func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) {
|
||||
|
||||
smallBucketDuration := ts.levels[0].size
|
||||
|
||||
if t.After(ts.lastAdd) {
|
||||
ts.lastAdd = t
|
||||
}
|
||||
|
||||
if t.After(ts.pendingTime) {
|
||||
ts.advance(t)
|
||||
ts.mergePendingUpdates()
|
||||
ts.pendingTime = ts.levels[0].end
|
||||
ts.pending.CopyFrom(observation)
|
||||
ts.dirty = true
|
||||
} else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) {
|
||||
// The observation is close enough to go into the pending bucket.
|
||||
// This compensates for clock skewing and small scheduling delays
|
||||
// by letting the update stay in the fast path.
|
||||
ts.pending.Add(observation)
|
||||
ts.dirty = true
|
||||
} else {
|
||||
ts.mergeValue(observation, t)
|
||||
}
|
||||
}
|
||||
|
||||
// mergeValue inserts the observation at the specified time in the past into all levels.
|
||||
func (ts *timeSeries) mergeValue(observation Observable, t time.Time) {
|
||||
for _, level := range ts.levels {
|
||||
index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size)
|
||||
if 0 <= index && index < ts.numBuckets {
|
||||
bucketNumber := (level.oldest + index) % ts.numBuckets
|
||||
if level.buckets[bucketNumber] == nil {
|
||||
level.buckets[bucketNumber] = level.provider()
|
||||
}
|
||||
level.buckets[bucketNumber].Add(observation)
|
||||
}
|
||||
}
|
||||
ts.total.Add(observation)
|
||||
}
|
||||
|
||||
// mergePendingUpdates applies the pending updates into all levels.
|
||||
func (ts *timeSeries) mergePendingUpdates() {
|
||||
if ts.dirty {
|
||||
ts.mergeValue(ts.pending, ts.pendingTime)
|
||||
ts.pending = ts.resetObservation(ts.pending)
|
||||
ts.dirty = false
|
||||
}
|
||||
}
|
||||
|
||||
// advance cycles the buckets at each level until the latest bucket in
|
||||
// each level can hold the time specified.
|
||||
func (ts *timeSeries) advance(t time.Time) {
|
||||
if !t.After(ts.levels[0].end) {
|
||||
return
|
||||
}
|
||||
for i := 0; i < len(ts.levels); i++ {
|
||||
level := ts.levels[i]
|
||||
if !level.end.Before(t) {
|
||||
break
|
||||
}
|
||||
|
||||
// If the time is sufficiently far, just clear the level and advance
|
||||
// directly.
|
||||
if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {
|
||||
for _, b := range level.buckets {
|
||||
ts.resetObservation(b)
|
||||
}
|
||||
level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())
|
||||
}
|
||||
|
||||
for t.After(level.end) {
|
||||
level.end = level.end.Add(level.size)
|
||||
level.newest = level.oldest
|
||||
level.oldest = (level.oldest + 1) % ts.numBuckets
|
||||
ts.resetObservation(level.buckets[level.newest])
|
||||
}
|
||||
|
||||
t = level.end
|
||||
}
|
||||
}
|
||||
|
||||
// Latest returns the sum of the num latest buckets from the level.
|
||||
func (ts *timeSeries) Latest(level, num int) Observable {
|
||||
now := ts.clock.Time()
|
||||
if ts.levels[0].end.Before(now) {
|
||||
ts.advance(now)
|
||||
}
|
||||
|
||||
ts.mergePendingUpdates()
|
||||
|
||||
result := ts.provider()
|
||||
l := ts.levels[level]
|
||||
index := l.newest
|
||||
|
||||
for i := 0; i < num; i++ {
|
||||
if l.buckets[index] != nil {
|
||||
result.Add(l.buckets[index])
|
||||
}
|
||||
if index == 0 {
|
||||
index = ts.numBuckets
|
||||
}
|
||||
index--
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// LatestBuckets returns a copy of the num latest buckets from level.
|
||||
func (ts *timeSeries) LatestBuckets(level, num int) []Observable {
|
||||
if level < 0 || level > len(ts.levels) {
|
||||
log.Print("timeseries: bad level argument: ", level)
|
||||
return nil
|
||||
}
|
||||
if num < 0 || num >= ts.numBuckets {
|
||||
log.Print("timeseries: bad num argument: ", num)
|
||||
return nil
|
||||
}
|
||||
|
||||
results := make([]Observable, num)
|
||||
now := ts.clock.Time()
|
||||
if ts.levels[0].end.Before(now) {
|
||||
ts.advance(now)
|
||||
}
|
||||
|
||||
ts.mergePendingUpdates()
|
||||
|
||||
l := ts.levels[level]
|
||||
index := l.newest
|
||||
|
||||
for i := 0; i < num; i++ {
|
||||
result := ts.provider()
|
||||
results[i] = result
|
||||
if l.buckets[index] != nil {
|
||||
result.CopyFrom(l.buckets[index])
|
||||
}
|
||||
|
||||
if index == 0 {
|
||||
index = ts.numBuckets
|
||||
}
|
||||
index -= 1
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// ScaleBy updates observations by scaling by factor.
|
||||
func (ts *timeSeries) ScaleBy(factor float64) {
|
||||
for _, l := range ts.levels {
|
||||
for i := 0; i < ts.numBuckets; i++ {
|
||||
l.buckets[i].Multiply(factor)
|
||||
}
|
||||
}
|
||||
|
||||
ts.total.Multiply(factor)
|
||||
ts.pending.Multiply(factor)
|
||||
}
|
||||
|
||||
// Range returns the sum of observations added over the specified time range.
|
||||
// If start or finish times don't fall on bucket boundaries of the same
|
||||
// level, then return values are approximate answers.
|
||||
func (ts *timeSeries) Range(start, finish time.Time) Observable {
|
||||
return ts.ComputeRange(start, finish, 1)[0]
|
||||
}
|
||||
|
||||
// Recent returns the sum of observations from the last delta.
|
||||
func (ts *timeSeries) Recent(delta time.Duration) Observable {
|
||||
now := ts.clock.Time()
|
||||
return ts.Range(now.Add(-delta), now)
|
||||
}
|
||||
|
||||
// Total returns the total of all observations.
|
||||
func (ts *timeSeries) Total() Observable {
|
||||
ts.mergePendingUpdates()
|
||||
return ts.total
|
||||
}
|
||||
|
||||
// ComputeRange computes a specified number of values into a slice using
|
||||
// the observations recorded over the specified time period. The return
|
||||
// values are approximate if the start or finish times don't fall on the
|
||||
// bucket boundaries at the same level or if the number of buckets spanning
|
||||
// the range is not an integral multiple of num.
|
||||
func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable {
|
||||
if start.After(finish) {
|
||||
log.Printf("timeseries: start > finish, %v>%v", start, finish)
|
||||
return nil
|
||||
}
|
||||
|
||||
if num < 0 {
|
||||
log.Printf("timeseries: num < 0, %v", num)
|
||||
return nil
|
||||
}
|
||||
|
||||
results := make([]Observable, num)
|
||||
|
||||
for _, l := range ts.levels {
|
||||
if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) {
|
||||
ts.extract(l, start, finish, num, results)
|
||||
return results
|
||||
}
|
||||
}
|
||||
|
||||
// Failed to find a level that covers the desired range. So just
|
||||
// extract from the last level, even if it doesn't cover the entire
|
||||
// desired range.
|
||||
ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
// RecentList returns the specified number of values in slice over the most
|
||||
// recent time period of the specified range.
|
||||
func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable {
|
||||
if delta < 0 {
|
||||
return nil
|
||||
}
|
||||
now := ts.clock.Time()
|
||||
return ts.ComputeRange(now.Add(-delta), now, num)
|
||||
}
|
||||
|
||||
// extract returns a slice of specified number of observations from a given
|
||||
// level over a given range.
|
||||
func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) {
|
||||
ts.mergePendingUpdates()
|
||||
|
||||
srcInterval := l.size
|
||||
dstInterval := finish.Sub(start) / time.Duration(num)
|
||||
dstStart := start
|
||||
srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets))
|
||||
|
||||
srcIndex := 0
|
||||
|
||||
// Where should scanning start?
|
||||
if dstStart.After(srcStart) {
|
||||
advance := dstStart.Sub(srcStart) / srcInterval
|
||||
srcIndex += int(advance)
|
||||
srcStart = srcStart.Add(advance * srcInterval)
|
||||
}
|
||||
|
||||
// The i'th value is computed as show below.
|
||||
// interval = (finish/start)/num
|
||||
// i'th value = sum of observation in range
|
||||
// [ start + i * interval,
|
||||
// start + (i + 1) * interval )
|
||||
for i := 0; i < num; i++ {
|
||||
results[i] = ts.resetObservation(results[i])
|
||||
dstEnd := dstStart.Add(dstInterval)
|
||||
for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) {
|
||||
srcEnd := srcStart.Add(srcInterval)
|
||||
if srcEnd.After(ts.lastAdd) {
|
||||
srcEnd = ts.lastAdd
|
||||
}
|
||||
|
||||
if !srcEnd.Before(dstStart) {
|
||||
srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets]
|
||||
if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) {
|
||||
// dst completely contains src.
|
||||
if srcValue != nil {
|
||||
results[i].Add(srcValue)
|
||||
}
|
||||
} else {
|
||||
// dst partially overlaps src.
|
||||
overlapStart := maxTime(srcStart, dstStart)
|
||||
overlapEnd := minTime(srcEnd, dstEnd)
|
||||
base := srcEnd.Sub(srcStart)
|
||||
fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds()
|
||||
|
||||
used := ts.provider()
|
||||
if srcValue != nil {
|
||||
used.CopyFrom(srcValue)
|
||||
}
|
||||
used.Multiply(fraction)
|
||||
results[i].Add(used)
|
||||
}
|
||||
|
||||
if srcEnd.After(dstEnd) {
|
||||
break
|
||||
}
|
||||
}
|
||||
srcIndex++
|
||||
srcStart = srcStart.Add(srcInterval)
|
||||
}
|
||||
dstStart = dstStart.Add(dstInterval)
|
||||
}
|
||||
}
|
||||
|
||||
// resetObservation clears the content so the struct may be reused.
|
||||
func (ts *timeSeries) resetObservation(observation Observable) Observable {
|
||||
if observation == nil {
|
||||
observation = ts.provider()
|
||||
} else {
|
||||
observation.Clear()
|
||||
}
|
||||
return observation
|
||||
}
|
||||
|
||||
// TimeSeries tracks data at granularities from 1 second to 16 weeks.
|
||||
type TimeSeries struct {
|
||||
timeSeries
|
||||
}
|
||||
|
||||
// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable.
|
||||
func NewTimeSeries(f func() Observable) *TimeSeries {
|
||||
return NewTimeSeriesWithClock(f, defaultClockInstance)
|
||||
}
|
||||
|
||||
// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for
|
||||
// assigning timestamps.
|
||||
func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries {
|
||||
ts := new(TimeSeries)
|
||||
ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock)
|
||||
return ts
|
||||
}
|
||||
|
||||
// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour.
|
||||
type MinuteHourSeries struct {
|
||||
timeSeries
|
||||
}
|
||||
|
||||
// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable.
|
||||
func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries {
|
||||
return NewMinuteHourSeriesWithClock(f, defaultClockInstance)
|
||||
}
|
||||
|
||||
// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for
|
||||
// assigning timestamps.
|
||||
func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries {
|
||||
ts := new(MinuteHourSeries)
|
||||
ts.timeSeries.init(minuteHourSeriesResolutions, f,
|
||||
minuteHourSeriesNumBuckets, clock)
|
||||
return ts
|
||||
}
|
||||
|
||||
func (ts *MinuteHourSeries) Minute() Observable {
|
||||
return ts.timeSeries.Latest(0, 60)
|
||||
}
|
||||
|
||||
func (ts *MinuteHourSeries) Hour() Observable {
|
||||
return ts.timeSeries.Latest(1, 60)
|
||||
}
|
||||
|
||||
func minTime(a, b time.Time) time.Time {
|
||||
if a.Before(b) {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func maxTime(a, b time.Time) time.Time {
|
||||
if a.After(b) {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
524
vendor/src/golang.org/x/net/trace/events.go
vendored
Normal file
524
vendor/src/golang.org/x/net/trace/events.go
vendored
Normal file
|
@ -0,0 +1,524 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package trace
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
)
|
||||
|
||||
var eventsTmpl = template.Must(template.New("events").Funcs(template.FuncMap{
|
||||
"elapsed": elapsed,
|
||||
"trimSpace": strings.TrimSpace,
|
||||
}).Parse(eventsHTML))
|
||||
|
||||
const maxEventsPerLog = 100
|
||||
|
||||
type bucket struct {
|
||||
MaxErrAge time.Duration
|
||||
String string
|
||||
}
|
||||
|
||||
var buckets = []bucket{
|
||||
{0, "total"},
|
||||
{10 * time.Second, "errs<10s"},
|
||||
{1 * time.Minute, "errs<1m"},
|
||||
{10 * time.Minute, "errs<10m"},
|
||||
{1 * time.Hour, "errs<1h"},
|
||||
{10 * time.Hour, "errs<10h"},
|
||||
{24000 * time.Hour, "errors"},
|
||||
}
|
||||
|
||||
// RenderEvents renders the HTML page typically served at /debug/events.
|
||||
// It does not do any auth checking; see AuthRequest for the default auth check
|
||||
// used by the handler registered on http.DefaultServeMux.
|
||||
// req may be nil.
|
||||
func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
|
||||
now := time.Now()
|
||||
data := &struct {
|
||||
Families []string // family names
|
||||
Buckets []bucket
|
||||
Counts [][]int // eventLog count per family/bucket
|
||||
|
||||
// Set when a bucket has been selected.
|
||||
Family string
|
||||
Bucket int
|
||||
EventLogs eventLogs
|
||||
Expanded bool
|
||||
}{
|
||||
Buckets: buckets,
|
||||
}
|
||||
|
||||
data.Families = make([]string, 0, len(families))
|
||||
famMu.RLock()
|
||||
for name := range families {
|
||||
data.Families = append(data.Families, name)
|
||||
}
|
||||
famMu.RUnlock()
|
||||
sort.Strings(data.Families)
|
||||
|
||||
// Count the number of eventLogs in each family for each error age.
|
||||
data.Counts = make([][]int, len(data.Families))
|
||||
for i, name := range data.Families {
|
||||
// TODO(sameer): move this loop under the family lock.
|
||||
f := getEventFamily(name)
|
||||
data.Counts[i] = make([]int, len(data.Buckets))
|
||||
for j, b := range data.Buckets {
|
||||
data.Counts[i][j] = f.Count(now, b.MaxErrAge)
|
||||
}
|
||||
}
|
||||
|
||||
if req != nil {
|
||||
var ok bool
|
||||
data.Family, data.Bucket, ok = parseEventsArgs(req)
|
||||
if !ok {
|
||||
// No-op
|
||||
} else {
|
||||
data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge)
|
||||
}
|
||||
if data.EventLogs != nil {
|
||||
defer data.EventLogs.Free()
|
||||
sort.Sort(data.EventLogs)
|
||||
}
|
||||
if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
|
||||
data.Expanded = exp
|
||||
}
|
||||
}
|
||||
|
||||
famMu.RLock()
|
||||
defer famMu.RUnlock()
|
||||
if err := eventsTmpl.Execute(w, data); err != nil {
|
||||
log.Printf("net/trace: Failed executing template: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) {
|
||||
fam, bStr := req.FormValue("fam"), req.FormValue("b")
|
||||
if fam == "" || bStr == "" {
|
||||
return "", 0, false
|
||||
}
|
||||
b, err := strconv.Atoi(bStr)
|
||||
if err != nil || b < 0 || b >= len(buckets) {
|
||||
return "", 0, false
|
||||
}
|
||||
return fam, b, true
|
||||
}
|
||||
|
||||
// An EventLog provides a log of events associated with a specific object.
|
||||
type EventLog interface {
|
||||
// Printf formats its arguments with fmt.Sprintf and adds the
|
||||
// result to the event log.
|
||||
Printf(format string, a ...interface{})
|
||||
|
||||
// Errorf is like Printf, but it marks this event as an error.
|
||||
Errorf(format string, a ...interface{})
|
||||
|
||||
// Finish declares that this event log is complete.
|
||||
// The event log should not be used after calling this method.
|
||||
Finish()
|
||||
}
|
||||
|
||||
// NewEventLog returns a new EventLog with the specified family name
|
||||
// and title.
|
||||
func NewEventLog(family, title string) EventLog {
|
||||
el := newEventLog()
|
||||
el.ref()
|
||||
el.Family, el.Title = family, title
|
||||
el.Start = time.Now()
|
||||
el.events = make([]logEntry, 0, maxEventsPerLog)
|
||||
el.stack = make([]uintptr, 32)
|
||||
n := runtime.Callers(2, el.stack)
|
||||
el.stack = el.stack[:n]
|
||||
|
||||
getEventFamily(family).add(el)
|
||||
return el
|
||||
}
|
||||
|
||||
func (el *eventLog) Finish() {
|
||||
getEventFamily(el.Family).remove(el)
|
||||
el.unref() // matches ref in New
|
||||
}
|
||||
|
||||
var (
|
||||
famMu sync.RWMutex
|
||||
families = make(map[string]*eventFamily) // family name => family
|
||||
)
|
||||
|
||||
func getEventFamily(fam string) *eventFamily {
|
||||
famMu.Lock()
|
||||
defer famMu.Unlock()
|
||||
f := families[fam]
|
||||
if f == nil {
|
||||
f = &eventFamily{}
|
||||
families[fam] = f
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
type eventFamily struct {
|
||||
mu sync.RWMutex
|
||||
eventLogs eventLogs
|
||||
}
|
||||
|
||||
func (f *eventFamily) add(el *eventLog) {
|
||||
f.mu.Lock()
|
||||
f.eventLogs = append(f.eventLogs, el)
|
||||
f.mu.Unlock()
|
||||
}
|
||||
|
||||
func (f *eventFamily) remove(el *eventLog) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
for i, el0 := range f.eventLogs {
|
||||
if el == el0 {
|
||||
copy(f.eventLogs[i:], f.eventLogs[i+1:])
|
||||
f.eventLogs = f.eventLogs[:len(f.eventLogs)-1]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) {
|
||||
f.mu.RLock()
|
||||
defer f.mu.RUnlock()
|
||||
for _, el := range f.eventLogs {
|
||||
if el.hasRecentError(now, maxErrAge) {
|
||||
n++
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) {
|
||||
f.mu.RLock()
|
||||
defer f.mu.RUnlock()
|
||||
els = make(eventLogs, 0, len(f.eventLogs))
|
||||
for _, el := range f.eventLogs {
|
||||
if el.hasRecentError(now, maxErrAge) {
|
||||
el.ref()
|
||||
els = append(els, el)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type eventLogs []*eventLog
|
||||
|
||||
// Free calls unref on each element of the list.
|
||||
func (els eventLogs) Free() {
|
||||
for _, el := range els {
|
||||
el.unref()
|
||||
}
|
||||
}
|
||||
|
||||
// eventLogs may be sorted in reverse chronological order.
|
||||
func (els eventLogs) Len() int { return len(els) }
|
||||
func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) }
|
||||
func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] }
|
||||
|
||||
// A logEntry is a timestamped log entry in an event log.
|
||||
type logEntry struct {
|
||||
When time.Time
|
||||
Elapsed time.Duration // since previous event in log
|
||||
NewDay bool // whether this event is on a different day to the previous event
|
||||
What string
|
||||
IsErr bool
|
||||
}
|
||||
|
||||
// WhenString returns a string representation of the elapsed time of the event.
|
||||
// It will include the date if midnight was crossed.
|
||||
func (e logEntry) WhenString() string {
|
||||
if e.NewDay {
|
||||
return e.When.Format("2006/01/02 15:04:05.000000")
|
||||
}
|
||||
return e.When.Format("15:04:05.000000")
|
||||
}
|
||||
|
||||
// An eventLog represents an active event log.
|
||||
type eventLog struct {
|
||||
// Family is the top-level grouping of event logs to which this belongs.
|
||||
Family string
|
||||
|
||||
// Title is the title of this event log.
|
||||
Title string
|
||||
|
||||
// Timing information.
|
||||
Start time.Time
|
||||
|
||||
// Call stack where this event log was created.
|
||||
stack []uintptr
|
||||
|
||||
// Append-only sequence of events.
|
||||
//
|
||||
// TODO(sameer): change this to a ring buffer to avoid the array copy
|
||||
// when we hit maxEventsPerLog.
|
||||
mu sync.RWMutex
|
||||
events []logEntry
|
||||
LastErrorTime time.Time
|
||||
discarded int
|
||||
|
||||
refs int32 // how many buckets this is in
|
||||
}
|
||||
|
||||
func (el *eventLog) reset() {
|
||||
// Clear all but the mutex. Mutexes may not be copied, even when unlocked.
|
||||
el.Family = ""
|
||||
el.Title = ""
|
||||
el.Start = time.Time{}
|
||||
el.stack = nil
|
||||
el.events = nil
|
||||
el.LastErrorTime = time.Time{}
|
||||
el.discarded = 0
|
||||
el.refs = 0
|
||||
}
|
||||
|
||||
func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool {
|
||||
if maxErrAge == 0 {
|
||||
return true
|
||||
}
|
||||
el.mu.RLock()
|
||||
defer el.mu.RUnlock()
|
||||
return now.Sub(el.LastErrorTime) < maxErrAge
|
||||
}
|
||||
|
||||
// delta returns the elapsed time since the last event or the log start,
|
||||
// and whether it spans midnight.
|
||||
// L >= el.mu
|
||||
func (el *eventLog) delta(t time.Time) (time.Duration, bool) {
|
||||
if len(el.events) == 0 {
|
||||
return t.Sub(el.Start), false
|
||||
}
|
||||
prev := el.events[len(el.events)-1].When
|
||||
return t.Sub(prev), prev.Day() != t.Day()
|
||||
|
||||
}
|
||||
|
||||
func (el *eventLog) Printf(format string, a ...interface{}) {
|
||||
el.printf(false, format, a...)
|
||||
}
|
||||
|
||||
func (el *eventLog) Errorf(format string, a ...interface{}) {
|
||||
el.printf(true, format, a...)
|
||||
}
|
||||
|
||||
func (el *eventLog) printf(isErr bool, format string, a ...interface{}) {
|
||||
e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)}
|
||||
el.mu.Lock()
|
||||
e.Elapsed, e.NewDay = el.delta(e.When)
|
||||
if len(el.events) < maxEventsPerLog {
|
||||
el.events = append(el.events, e)
|
||||
} else {
|
||||
// Discard the oldest event.
|
||||
if el.discarded == 0 {
|
||||
// el.discarded starts at two to count for the event it
|
||||
// is replacing, plus the next one that we are about to
|
||||
// drop.
|
||||
el.discarded = 2
|
||||
} else {
|
||||
el.discarded++
|
||||
}
|
||||
// TODO(sameer): if this causes allocations on a critical path,
|
||||
// change eventLog.What to be a fmt.Stringer, as in trace.go.
|
||||
el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded)
|
||||
// The timestamp of the discarded meta-event should be
|
||||
// the time of the last event it is representing.
|
||||
el.events[0].When = el.events[1].When
|
||||
copy(el.events[1:], el.events[2:])
|
||||
el.events[maxEventsPerLog-1] = e
|
||||
}
|
||||
if e.IsErr {
|
||||
el.LastErrorTime = e.When
|
||||
}
|
||||
el.mu.Unlock()
|
||||
}
|
||||
|
||||
func (el *eventLog) ref() {
|
||||
atomic.AddInt32(&el.refs, 1)
|
||||
}
|
||||
|
||||
func (el *eventLog) unref() {
|
||||
if atomic.AddInt32(&el.refs, -1) == 0 {
|
||||
freeEventLog(el)
|
||||
}
|
||||
}
|
||||
|
||||
func (el *eventLog) When() string {
|
||||
return el.Start.Format("2006/01/02 15:04:05.000000")
|
||||
}
|
||||
|
||||
func (el *eventLog) ElapsedTime() string {
|
||||
elapsed := time.Since(el.Start)
|
||||
return fmt.Sprintf("%.6f", elapsed.Seconds())
|
||||
}
|
||||
|
||||
func (el *eventLog) Stack() string {
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0)
|
||||
printStackRecord(tw, el.stack)
|
||||
tw.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// printStackRecord prints the function + source line information
|
||||
// for a single stack trace.
|
||||
// Adapted from runtime/pprof/pprof.go.
|
||||
func printStackRecord(w io.Writer, stk []uintptr) {
|
||||
for _, pc := range stk {
|
||||
f := runtime.FuncForPC(pc)
|
||||
if f == nil {
|
||||
continue
|
||||
}
|
||||
file, line := f.FileLine(pc)
|
||||
name := f.Name()
|
||||
// Hide runtime.goexit and any runtime functions at the beginning.
|
||||
if strings.HasPrefix(name, "runtime.") {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line)
|
||||
}
|
||||
}
|
||||
|
||||
func (el *eventLog) Events() []logEntry {
|
||||
el.mu.RLock()
|
||||
defer el.mu.RUnlock()
|
||||
return el.events
|
||||
}
|
||||
|
||||
// freeEventLogs is a freelist of *eventLog
|
||||
var freeEventLogs = make(chan *eventLog, 1000)
|
||||
|
||||
// newEventLog returns a event log ready to use.
|
||||
func newEventLog() *eventLog {
|
||||
select {
|
||||
case el := <-freeEventLogs:
|
||||
return el
|
||||
default:
|
||||
return new(eventLog)
|
||||
}
|
||||
}
|
||||
|
||||
// freeEventLog adds el to freeEventLogs if there's room.
|
||||
// This is non-blocking.
|
||||
func freeEventLog(el *eventLog) {
|
||||
el.reset()
|
||||
select {
|
||||
case freeEventLogs <- el:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
const eventsHTML = `
|
||||
<html>
|
||||
<head>
|
||||
<title>events</title>
|
||||
</head>
|
||||
<style type="text/css">
|
||||
body {
|
||||
font-family: sans-serif;
|
||||
}
|
||||
table#req-status td.family {
|
||||
padding-right: 2em;
|
||||
}
|
||||
table#req-status td.active {
|
||||
padding-right: 1em;
|
||||
}
|
||||
table#req-status td.empty {
|
||||
color: #aaa;
|
||||
}
|
||||
table#reqs {
|
||||
margin-top: 1em;
|
||||
}
|
||||
table#reqs tr.first {
|
||||
{{if $.Expanded}}font-weight: bold;{{end}}
|
||||
}
|
||||
table#reqs td {
|
||||
font-family: monospace;
|
||||
}
|
||||
table#reqs td.when {
|
||||
text-align: right;
|
||||
white-space: nowrap;
|
||||
}
|
||||
table#reqs td.elapsed {
|
||||
padding: 0 0.5em;
|
||||
text-align: right;
|
||||
white-space: pre;
|
||||
width: 10em;
|
||||
}
|
||||
address {
|
||||
font-size: smaller;
|
||||
margin-top: 5em;
|
||||
}
|
||||
</style>
|
||||
<body>
|
||||
|
||||
<h1>/debug/events</h1>
|
||||
|
||||
<table id="req-status">
|
||||
{{range $i, $fam := .Families}}
|
||||
<tr>
|
||||
<td class="family">{{$fam}}</td>
|
||||
|
||||
{{range $j, $bucket := $.Buckets}}
|
||||
{{$n := index $.Counts $i $j}}
|
||||
<td class="{{if not $bucket.MaxErrAge}}active{{end}}{{if not $n}}empty{{end}}">
|
||||
{{if $n}}<a href="?fam={{$fam}}&b={{$j}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
|
||||
[{{$n}} {{$bucket.String}}]
|
||||
{{if $n}}</a>{{end}}
|
||||
</td>
|
||||
{{end}}
|
||||
|
||||
</tr>{{end}}
|
||||
</table>
|
||||
|
||||
{{if $.EventLogs}}
|
||||
<hr />
|
||||
<h3>Family: {{$.Family}}</h3>
|
||||
|
||||
{{if $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}">{{end}}
|
||||
[Summary]{{if $.Expanded}}</a>{{end}}
|
||||
|
||||
{{if not $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">{{end}}
|
||||
[Expanded]{{if not $.Expanded}}</a>{{end}}
|
||||
|
||||
<table id="reqs">
|
||||
<tr><th>When</th><th>Elapsed</th></tr>
|
||||
{{range $el := $.EventLogs}}
|
||||
<tr class="first">
|
||||
<td class="when">{{$el.When}}</td>
|
||||
<td class="elapsed">{{$el.ElapsedTime}}</td>
|
||||
<td>{{$el.Title}}
|
||||
</tr>
|
||||
{{if $.Expanded}}
|
||||
<tr>
|
||||
<td class="when"></td>
|
||||
<td class="elapsed"></td>
|
||||
<td><pre>{{$el.Stack|trimSpace}}</pre></td>
|
||||
</tr>
|
||||
{{range $el.Events}}
|
||||
<tr>
|
||||
<td class="when">{{.WhenString}}</td>
|
||||
<td class="elapsed">{{elapsed .Elapsed}}</td>
|
||||
<td>.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
</table>
|
||||
{{end}}
|
||||
</body>
|
||||
</html>
|
||||
`
|
356
vendor/src/golang.org/x/net/trace/histogram.go
vendored
Normal file
356
vendor/src/golang.org/x/net/trace/histogram.go
vendored
Normal file
|
@ -0,0 +1,356 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package trace
|
||||
|
||||
// This file implements histogramming for RPC statistics collection.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"log"
|
||||
"math"
|
||||
|
||||
"golang.org/x/net/internal/timeseries"
|
||||
)
|
||||
|
||||
const (
|
||||
bucketCount = 38
|
||||
)
|
||||
|
||||
// histogram keeps counts of values in buckets that are spaced
|
||||
// out in powers of 2: 0-1, 2-3, 4-7...
|
||||
// histogram implements timeseries.Observable
|
||||
type histogram struct {
|
||||
sum int64 // running total of measurements
|
||||
sumOfSquares float64 // square of running total
|
||||
buckets []int64 // bucketed values for histogram
|
||||
value int // holds a single value as an optimization
|
||||
valueCount int64 // number of values recorded for single value
|
||||
}
|
||||
|
||||
// AddMeasurement records a value measurement observation to the histogram.
|
||||
func (h *histogram) addMeasurement(value int64) {
|
||||
// TODO: assert invariant
|
||||
h.sum += value
|
||||
h.sumOfSquares += float64(value) * float64(value)
|
||||
|
||||
bucketIndex := getBucket(value)
|
||||
|
||||
if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {
|
||||
h.value = bucketIndex
|
||||
h.valueCount++
|
||||
} else {
|
||||
h.allocateBuckets()
|
||||
h.buckets[bucketIndex]++
|
||||
}
|
||||
}
|
||||
|
||||
func (h *histogram) allocateBuckets() {
|
||||
if h.buckets == nil {
|
||||
h.buckets = make([]int64, bucketCount)
|
||||
h.buckets[h.value] = h.valueCount
|
||||
h.value = 0
|
||||
h.valueCount = -1
|
||||
}
|
||||
}
|
||||
|
||||
func log2(i int64) int {
|
||||
n := 0
|
||||
for ; i >= 0x100; i >>= 8 {
|
||||
n += 8
|
||||
}
|
||||
for ; i > 0; i >>= 1 {
|
||||
n += 1
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func getBucket(i int64) (index int) {
|
||||
index = log2(i) - 1
|
||||
if index < 0 {
|
||||
index = 0
|
||||
}
|
||||
if index >= bucketCount {
|
||||
index = bucketCount - 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Total returns the number of recorded observations.
|
||||
func (h *histogram) total() (total int64) {
|
||||
if h.valueCount >= 0 {
|
||||
total = h.valueCount
|
||||
}
|
||||
for _, val := range h.buckets {
|
||||
total += int64(val)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Average returns the average value of recorded observations.
|
||||
func (h *histogram) average() float64 {
|
||||
t := h.total()
|
||||
if t == 0 {
|
||||
return 0
|
||||
}
|
||||
return float64(h.sum) / float64(t)
|
||||
}
|
||||
|
||||
// Variance returns the variance of recorded observations.
|
||||
func (h *histogram) variance() float64 {
|
||||
t := float64(h.total())
|
||||
if t == 0 {
|
||||
return 0
|
||||
}
|
||||
s := float64(h.sum) / t
|
||||
return h.sumOfSquares/t - s*s
|
||||
}
|
||||
|
||||
// StandardDeviation returns the standard deviation of recorded observations.
|
||||
func (h *histogram) standardDeviation() float64 {
|
||||
return math.Sqrt(h.variance())
|
||||
}
|
||||
|
||||
// PercentileBoundary estimates the value that the given fraction of recorded
|
||||
// observations are less than.
|
||||
func (h *histogram) percentileBoundary(percentile float64) int64 {
|
||||
total := h.total()
|
||||
|
||||
// Corner cases (make sure result is strictly less than Total())
|
||||
if total == 0 {
|
||||
return 0
|
||||
} else if total == 1 {
|
||||
return int64(h.average())
|
||||
}
|
||||
|
||||
percentOfTotal := round(float64(total) * percentile)
|
||||
var runningTotal int64
|
||||
|
||||
for i := range h.buckets {
|
||||
value := h.buckets[i]
|
||||
runningTotal += value
|
||||
if runningTotal == percentOfTotal {
|
||||
// We hit an exact bucket boundary. If the next bucket has data, it is a
|
||||
// good estimate of the value. If the bucket is empty, we interpolate the
|
||||
// midpoint between the next bucket's boundary and the next non-zero
|
||||
// bucket. If the remaining buckets are all empty, then we use the
|
||||
// boundary for the next bucket as the estimate.
|
||||
j := uint8(i + 1)
|
||||
min := bucketBoundary(j)
|
||||
if runningTotal < total {
|
||||
for h.buckets[j] == 0 {
|
||||
j++
|
||||
}
|
||||
}
|
||||
max := bucketBoundary(j)
|
||||
return min + round(float64(max-min)/2)
|
||||
} else if runningTotal > percentOfTotal {
|
||||
// The value is in this bucket. Interpolate the value.
|
||||
delta := runningTotal - percentOfTotal
|
||||
percentBucket := float64(value-delta) / float64(value)
|
||||
bucketMin := bucketBoundary(uint8(i))
|
||||
nextBucketMin := bucketBoundary(uint8(i + 1))
|
||||
bucketSize := nextBucketMin - bucketMin
|
||||
return bucketMin + round(percentBucket*float64(bucketSize))
|
||||
}
|
||||
}
|
||||
return bucketBoundary(bucketCount - 1)
|
||||
}
|
||||
|
||||
// Median returns the estimated median of the observed values.
|
||||
func (h *histogram) median() int64 {
|
||||
return h.percentileBoundary(0.5)
|
||||
}
|
||||
|
||||
// Add adds other to h.
|
||||
func (h *histogram) Add(other timeseries.Observable) {
|
||||
o := other.(*histogram)
|
||||
if o.valueCount == 0 {
|
||||
// Other histogram is empty
|
||||
} else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {
|
||||
// Both have a single bucketed value, aggregate them
|
||||
h.valueCount += o.valueCount
|
||||
} else {
|
||||
// Two different values necessitate buckets in this histogram
|
||||
h.allocateBuckets()
|
||||
if o.valueCount >= 0 {
|
||||
h.buckets[o.value] += o.valueCount
|
||||
} else {
|
||||
for i := range h.buckets {
|
||||
h.buckets[i] += o.buckets[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
h.sumOfSquares += o.sumOfSquares
|
||||
h.sum += o.sum
|
||||
}
|
||||
|
||||
// Clear resets the histogram to an empty state, removing all observed values.
|
||||
func (h *histogram) Clear() {
|
||||
h.buckets = nil
|
||||
h.value = 0
|
||||
h.valueCount = 0
|
||||
h.sum = 0
|
||||
h.sumOfSquares = 0
|
||||
}
|
||||
|
||||
// CopyFrom copies from other, which must be a *histogram, into h.
|
||||
func (h *histogram) CopyFrom(other timeseries.Observable) {
|
||||
o := other.(*histogram)
|
||||
if o.valueCount == -1 {
|
||||
h.allocateBuckets()
|
||||
copy(h.buckets, o.buckets)
|
||||
}
|
||||
h.sum = o.sum
|
||||
h.sumOfSquares = o.sumOfSquares
|
||||
h.value = o.value
|
||||
h.valueCount = o.valueCount
|
||||
}
|
||||
|
||||
// Multiply scales the histogram by the specified ratio.
|
||||
func (h *histogram) Multiply(ratio float64) {
|
||||
if h.valueCount == -1 {
|
||||
for i := range h.buckets {
|
||||
h.buckets[i] = int64(float64(h.buckets[i]) * ratio)
|
||||
}
|
||||
} else {
|
||||
h.valueCount = int64(float64(h.valueCount) * ratio)
|
||||
}
|
||||
h.sum = int64(float64(h.sum) * ratio)
|
||||
h.sumOfSquares = h.sumOfSquares * ratio
|
||||
}
|
||||
|
||||
// New creates a new histogram.
|
||||
func (h *histogram) New() timeseries.Observable {
|
||||
r := new(histogram)
|
||||
r.Clear()
|
||||
return r
|
||||
}
|
||||
|
||||
func (h *histogram) String() string {
|
||||
return fmt.Sprintf("%d, %f, %d, %d, %v",
|
||||
h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)
|
||||
}
|
||||
|
||||
// round returns the closest int64 to the argument
|
||||
func round(in float64) int64 {
|
||||
return int64(math.Floor(in + 0.5))
|
||||
}
|
||||
|
||||
// bucketBoundary returns the first value in the bucket.
|
||||
func bucketBoundary(bucket uint8) int64 {
|
||||
if bucket == 0 {
|
||||
return 0
|
||||
}
|
||||
return 1 << bucket
|
||||
}
|
||||
|
||||
// bucketData holds data about a specific bucket for use in distTmpl.
|
||||
type bucketData struct {
|
||||
Lower, Upper int64
|
||||
N int64
|
||||
Pct, CumulativePct float64
|
||||
GraphWidth int
|
||||
}
|
||||
|
||||
// data holds data about a Distribution for use in distTmpl.
|
||||
type data struct {
|
||||
Buckets []*bucketData
|
||||
Count, Median int64
|
||||
Mean, StandardDeviation float64
|
||||
}
|
||||
|
||||
// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.
|
||||
const maxHTMLBarWidth = 350.0
|
||||
|
||||
// newData returns data representing h for use in distTmpl.
|
||||
func (h *histogram) newData() *data {
|
||||
// Force the allocation of buckets to simplify the rendering implementation
|
||||
h.allocateBuckets()
|
||||
// We scale the bars on the right so that the largest bar is
|
||||
// maxHTMLBarWidth pixels in width.
|
||||
maxBucket := int64(0)
|
||||
for _, n := range h.buckets {
|
||||
if n > maxBucket {
|
||||
maxBucket = n
|
||||
}
|
||||
}
|
||||
total := h.total()
|
||||
barsizeMult := maxHTMLBarWidth / float64(maxBucket)
|
||||
var pctMult float64
|
||||
if total == 0 {
|
||||
pctMult = 1.0
|
||||
} else {
|
||||
pctMult = 100.0 / float64(total)
|
||||
}
|
||||
|
||||
buckets := make([]*bucketData, len(h.buckets))
|
||||
runningTotal := int64(0)
|
||||
for i, n := range h.buckets {
|
||||
if n == 0 {
|
||||
continue
|
||||
}
|
||||
runningTotal += n
|
||||
var upperBound int64
|
||||
if i < bucketCount-1 {
|
||||
upperBound = bucketBoundary(uint8(i + 1))
|
||||
} else {
|
||||
upperBound = math.MaxInt64
|
||||
}
|
||||
buckets[i] = &bucketData{
|
||||
Lower: bucketBoundary(uint8(i)),
|
||||
Upper: upperBound,
|
||||
N: n,
|
||||
Pct: float64(n) * pctMult,
|
||||
CumulativePct: float64(runningTotal) * pctMult,
|
||||
GraphWidth: int(float64(n) * barsizeMult),
|
||||
}
|
||||
}
|
||||
return &data{
|
||||
Buckets: buckets,
|
||||
Count: total,
|
||||
Median: h.median(),
|
||||
Mean: h.average(),
|
||||
StandardDeviation: h.standardDeviation(),
|
||||
}
|
||||
}
|
||||
|
||||
func (h *histogram) html() template.HTML {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := distTmpl.Execute(buf, h.newData()); err != nil {
|
||||
buf.Reset()
|
||||
log.Printf("net/trace: couldn't execute template: %v", err)
|
||||
}
|
||||
return template.HTML(buf.String())
|
||||
}
|
||||
|
||||
// Input: data
|
||||
var distTmpl = template.Must(template.New("distTmpl").Parse(`
|
||||
<table>
|
||||
<tr>
|
||||
<td style="padding:0.25em">Count: {{.Count}}</td>
|
||||
<td style="padding:0.25em">Mean: {{printf "%.0f" .Mean}}</td>
|
||||
<td style="padding:0.25em">StdDev: {{printf "%.0f" .StandardDeviation}}</td>
|
||||
<td style="padding:0.25em">Median: {{.Median}}</td>
|
||||
</tr>
|
||||
</table>
|
||||
<hr>
|
||||
<table>
|
||||
{{range $b := .Buckets}}
|
||||
{{if $b}}
|
||||
<tr>
|
||||
<td style="padding:0 0 0 0.25em">[</td>
|
||||
<td style="text-align:right;padding:0 0.25em">{{.Lower}},</td>
|
||||
<td style="text-align:right;padding:0 0.25em">{{.Upper}})</td>
|
||||
<td style="text-align:right;padding:0 0.25em">{{.N}}</td>
|
||||
<td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .Pct}}%</td>
|
||||
<td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .CumulativePct}}%</td>
|
||||
<td><div style="background-color: blue; height: 1em; width: {{.GraphWidth}};"></div></td>
|
||||
</tr>
|
||||
{{end}}
|
||||
{{end}}
|
||||
</table>
|
||||
`))
|
1057
vendor/src/golang.org/x/net/trace/trace.go
vendored
Normal file
1057
vendor/src/golang.org/x/net/trace/trace.go
vendored
Normal file
File diff suppressed because it is too large
Load diff
14
vendor/src/golang.org/x/oauth2/.travis.yml
vendored
Normal file
14
vendor/src/golang.org/x/oauth2/.travis.yml
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
|
||||
install:
|
||||
- export GOPATH="$HOME/gopath"
|
||||
- mkdir -p "$GOPATH/src/golang.org/x"
|
||||
- mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
|
||||
- go get -v -t -d golang.org/x/oauth2/...
|
||||
|
||||
script:
|
||||
- go test -v golang.org/x/oauth2/...
|
3
vendor/src/golang.org/x/oauth2/AUTHORS
vendored
Normal file
3
vendor/src/golang.org/x/oauth2/AUTHORS
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
31
vendor/src/golang.org/x/oauth2/CONTRIBUTING.md
vendored
Normal file
31
vendor/src/golang.org/x/oauth2/CONTRIBUTING.md
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
# Contributing to Go
|
||||
|
||||
Go is an open source project.
|
||||
|
||||
It is the work of hundreds of contributors. We appreciate your help!
|
||||
|
||||
|
||||
## Filing issues
|
||||
|
||||
When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
|
||||
|
||||
1. What version of Go are you using (`go version`)?
|
||||
2. What operating system and processor architecture are you using?
|
||||
3. What did you do?
|
||||
4. What did you expect to see?
|
||||
5. What did you see instead?
|
||||
|
||||
General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
|
||||
The gophers there will answer or ask you to file an issue if you've tripped over a bug.
|
||||
|
||||
## Contributing code
|
||||
|
||||
Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
|
||||
before sending patches.
|
||||
|
||||
**We do not accept GitHub pull requests**
|
||||
(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
|
||||
|
||||
Unless otherwise noted, the Go source files are distributed under
|
||||
the BSD-style license found in the LICENSE file.
|
||||
|
3
vendor/src/golang.org/x/oauth2/CONTRIBUTORS
vendored
Normal file
3
vendor/src/golang.org/x/oauth2/CONTRIBUTORS
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
27
vendor/src/golang.org/x/oauth2/LICENSE
vendored
Normal file
27
vendor/src/golang.org/x/oauth2/LICENSE
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2009 The oauth2 Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
64
vendor/src/golang.org/x/oauth2/README.md
vendored
Normal file
64
vendor/src/golang.org/x/oauth2/README.md
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
# OAuth2 for Go
|
||||
|
||||
[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2)
|
||||
|
||||
oauth2 package contains a client implementation for OAuth 2.0 spec.
|
||||
|
||||
## Installation
|
||||
|
||||
~~~~
|
||||
go get golang.org/x/oauth2
|
||||
~~~~
|
||||
|
||||
See godoc for further documentation and examples.
|
||||
|
||||
* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
|
||||
* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
|
||||
|
||||
|
||||
## App Engine
|
||||
|
||||
In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor
|
||||
of the [`context.Context`](https://golang.org/x/net/context#Context) type from
|
||||
the `golang.org/x/net/context` package
|
||||
|
||||
This means its no longer possible to use the "Classic App Engine"
|
||||
`appengine.Context` type with the `oauth2` package. (You're using
|
||||
Classic App Engine if you import the package `"appengine"`.)
|
||||
|
||||
To work around this, you may use the new `"google.golang.org/appengine"`
|
||||
package. This package has almost the same API as the `"appengine"` package,
|
||||
but it can be fetched with `go get` and used on "Managed VMs" and well as
|
||||
Classic App Engine.
|
||||
|
||||
See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app)
|
||||
for information on updating your app.
|
||||
|
||||
If you don't want to update your entire app to use the new App Engine packages,
|
||||
you may use both sets of packages in parallel, using only the new packages
|
||||
with the `oauth2` package.
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
newappengine "google.golang.org/appengine"
|
||||
newurlfetch "google.golang.org/appengine/urlfetch"
|
||||
|
||||
"appengine"
|
||||
)
|
||||
|
||||
func handler(w http.ResponseWriter, r *http.Request) {
|
||||
var c appengine.Context = appengine.NewContext(r)
|
||||
c.Infof("Logging a message with the old package")
|
||||
|
||||
var ctx context.Context = newappengine.NewContext(r)
|
||||
client := &http.Client{
|
||||
Transport: &oauth2.Transport{
|
||||
Source: google.AppEngineTokenSource(ctx, "scope"),
|
||||
Base: &newurlfetch.Transport{Context: ctx},
|
||||
},
|
||||
}
|
||||
client.Get("...")
|
||||
}
|
||||
|
25
vendor/src/golang.org/x/oauth2/client_appengine.go
vendored
Normal file
25
vendor/src/golang.org/x/oauth2/client_appengine.go
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
// App Engine hooks.
|
||||
|
||||
package oauth2
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2/internal"
|
||||
"google.golang.org/appengine/urlfetch"
|
||||
)
|
||||
|
||||
func init() {
|
||||
internal.RegisterContextClientFunc(contextClientAppEngine)
|
||||
}
|
||||
|
||||
func contextClientAppEngine(ctx context.Context) (*http.Client, error) {
|
||||
return urlfetch.Client(ctx), nil
|
||||
}
|
86
vendor/src/golang.org/x/oauth2/google/appengine.go
vendored
Normal file
86
vendor/src/golang.org/x/oauth2/google/appengine.go
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package google
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// Set at init time by appenginevm_hook.go. If true, we are on App Engine Managed VMs.
|
||||
var appengineVM bool
|
||||
|
||||
// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
|
||||
var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error)
|
||||
|
||||
// AppEngineTokenSource returns a token source that fetches tokens
|
||||
// issued to the current App Engine application's service account.
|
||||
// If you are implementing a 3-legged OAuth 2.0 flow on App Engine
|
||||
// that involves user accounts, see oauth2.Config instead.
|
||||
//
|
||||
// The provided context must have come from appengine.NewContext.
|
||||
func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource {
|
||||
if appengineTokenFunc == nil {
|
||||
panic("google: AppEngineTokenSource can only be used on App Engine.")
|
||||
}
|
||||
scopes := append([]string{}, scope...)
|
||||
sort.Strings(scopes)
|
||||
return &appEngineTokenSource{
|
||||
ctx: ctx,
|
||||
scopes: scopes,
|
||||
key: strings.Join(scopes, " "),
|
||||
}
|
||||
}
|
||||
|
||||
// aeTokens helps the fetched tokens to be reused until their expiration.
|
||||
var (
|
||||
aeTokensMu sync.Mutex
|
||||
aeTokens = make(map[string]*tokenLock) // key is space-separated scopes
|
||||
)
|
||||
|
||||
type tokenLock struct {
|
||||
mu sync.Mutex // guards t; held while fetching or updating t
|
||||
t *oauth2.Token
|
||||
}
|
||||
|
||||
type appEngineTokenSource struct {
|
||||
ctx context.Context
|
||||
scopes []string
|
||||
key string // to aeTokens map; space-separated scopes
|
||||
}
|
||||
|
||||
func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) {
|
||||
if appengineTokenFunc == nil {
|
||||
panic("google: AppEngineTokenSource can only be used on App Engine.")
|
||||
}
|
||||
|
||||
aeTokensMu.Lock()
|
||||
tok, ok := aeTokens[ts.key]
|
||||
if !ok {
|
||||
tok = &tokenLock{}
|
||||
aeTokens[ts.key] = tok
|
||||
}
|
||||
aeTokensMu.Unlock()
|
||||
|
||||
tok.mu.Lock()
|
||||
defer tok.mu.Unlock()
|
||||
if tok.t.Valid() {
|
||||
return tok.t, nil
|
||||
}
|
||||
access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tok.t = &oauth2.Token{
|
||||
AccessToken: access,
|
||||
Expiry: exp,
|
||||
}
|
||||
return tok.t, nil
|
||||
}
|
13
vendor/src/golang.org/x/oauth2/google/appengine_hook.go
vendored
Normal file
13
vendor/src/golang.org/x/oauth2/google/appengine_hook.go
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package google
|
||||
|
||||
import "google.golang.org/appengine"
|
||||
|
||||
func init() {
|
||||
appengineTokenFunc = appengine.AccessToken
|
||||
}
|
14
vendor/src/golang.org/x/oauth2/google/appenginevm_hook.go
vendored
Normal file
14
vendor/src/golang.org/x/oauth2/google/appenginevm_hook.go
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
// Copyright 2015 The oauth2 Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appenginevm
|
||||
|
||||
package google
|
||||
|
||||
import "google.golang.org/appengine"
|
||||
|
||||
func init() {
|
||||
appengineVM = true
|
||||
appengineTokenFunc = appengine.AccessToken
|
||||
}
|
155
vendor/src/golang.org/x/oauth2/google/default.go
vendored
Normal file
155
vendor/src/golang.org/x/oauth2/google/default.go
vendored
Normal file
|
@ -0,0 +1,155 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package google
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/jwt"
|
||||
"google.golang.org/cloud/compute/metadata"
|
||||
)
|
||||
|
||||
// DefaultClient returns an HTTP Client that uses the
|
||||
// DefaultTokenSource to obtain authentication credentials.
|
||||
//
|
||||
// This client should be used when developing services
|
||||
// that run on Google App Engine or Google Compute Engine
|
||||
// and use "Application Default Credentials."
|
||||
//
|
||||
// For more details, see:
|
||||
// https://developers.google.com/accounts/docs/application-default-credentials
|
||||
//
|
||||
func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
|
||||
ts, err := DefaultTokenSource(ctx, scope...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return oauth2.NewClient(ctx, ts), nil
|
||||
}
|
||||
|
||||
// DefaultTokenSource is a token source that uses
|
||||
// "Application Default Credentials".
|
||||
//
|
||||
// It looks for credentials in the following places,
|
||||
// preferring the first location found:
|
||||
//
|
||||
// 1. A JSON file whose path is specified by the
|
||||
// GOOGLE_APPLICATION_CREDENTIALS environment variable.
|
||||
// 2. A JSON file in a location known to the gcloud command-line tool.
|
||||
// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
|
||||
// On other systems, $HOME/.config/gcloud/application_default_credentials.json.
|
||||
// 3. On Google App Engine it uses the appengine.AccessToken function.
|
||||
// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches
|
||||
// credentials from the metadata server.
|
||||
// (In this final case any provided scopes are ignored.)
|
||||
//
|
||||
// For more details, see:
|
||||
// https://developers.google.com/accounts/docs/application-default-credentials
|
||||
//
|
||||
func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) {
|
||||
// First, try the environment variable.
|
||||
const envVar = "GOOGLE_APPLICATION_CREDENTIALS"
|
||||
if filename := os.Getenv(envVar); filename != "" {
|
||||
ts, err := tokenSourceFromFile(ctx, filename, scope)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err)
|
||||
}
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
// Second, try a well-known file.
|
||||
filename := wellKnownFile()
|
||||
_, err := os.Stat(filename)
|
||||
if err == nil {
|
||||
ts, err2 := tokenSourceFromFile(ctx, filename, scope)
|
||||
if err2 == nil {
|
||||
return ts, nil
|
||||
}
|
||||
err = err2
|
||||
} else if os.IsNotExist(err) {
|
||||
err = nil // ignore this error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err)
|
||||
}
|
||||
|
||||
// Third, if we're on Google App Engine use those credentials.
|
||||
if appengineTokenFunc != nil && !appengineVM {
|
||||
return AppEngineTokenSource(ctx, scope...), nil
|
||||
}
|
||||
|
||||
// Fourth, if we're on Google Compute Engine use the metadata server.
|
||||
if metadata.OnGCE() {
|
||||
return ComputeTokenSource(""), nil
|
||||
}
|
||||
|
||||
// None are found; return helpful error.
|
||||
const url = "https://developers.google.com/accounts/docs/application-default-credentials"
|
||||
return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url)
|
||||
}
|
||||
|
||||
func wellKnownFile() string {
|
||||
const f = "application_default_credentials.json"
|
||||
if runtime.GOOS == "windows" {
|
||||
return filepath.Join(os.Getenv("APPDATA"), "gcloud", f)
|
||||
}
|
||||
return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f)
|
||||
}
|
||||
|
||||
func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) {
|
||||
b, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var d struct {
|
||||
// Common fields
|
||||
Type string
|
||||
ClientID string `json:"client_id"`
|
||||
|
||||
// User Credential fields
|
||||
ClientSecret string `json:"client_secret"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
|
||||
// Service Account fields
|
||||
ClientEmail string `json:"client_email"`
|
||||
PrivateKeyID string `json:"private_key_id"`
|
||||
PrivateKey string `json:"private_key"`
|
||||
}
|
||||
if err := json.Unmarshal(b, &d); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch d.Type {
|
||||
case "authorized_user":
|
||||
cfg := &oauth2.Config{
|
||||
ClientID: d.ClientID,
|
||||
ClientSecret: d.ClientSecret,
|
||||
Scopes: append([]string{}, scopes...), // copy
|
||||
Endpoint: Endpoint,
|
||||
}
|
||||
tok := &oauth2.Token{RefreshToken: d.RefreshToken}
|
||||
return cfg.TokenSource(ctx, tok), nil
|
||||
case "service_account":
|
||||
cfg := &jwt.Config{
|
||||
Email: d.ClientEmail,
|
||||
PrivateKey: []byte(d.PrivateKey),
|
||||
Scopes: append([]string{}, scopes...), // copy
|
||||
TokenURL: JWTTokenURL,
|
||||
}
|
||||
return cfg.TokenSource(ctx), nil
|
||||
case "":
|
||||
return nil, errors.New("missing 'type' field in credentials")
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown credential type: %q", d.Type)
|
||||
}
|
||||
}
|
145
vendor/src/golang.org/x/oauth2/google/google.go
vendored
Normal file
145
vendor/src/golang.org/x/oauth2/google/google.go
vendored
Normal file
|
@ -0,0 +1,145 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package google provides support for making OAuth2 authorized and
|
||||
// authenticated HTTP requests to Google APIs.
|
||||
// It supports the Web server flow, client-side credentials, service accounts,
|
||||
// Google Compute Engine service accounts, and Google App Engine service
|
||||
// accounts.
|
||||
//
|
||||
// For more information, please read
|
||||
// https://developers.google.com/accounts/docs/OAuth2
|
||||
// and
|
||||
// https://developers.google.com/accounts/docs/application-default-credentials.
|
||||
package google // import "golang.org/x/oauth2/google"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/jwt"
|
||||
"google.golang.org/cloud/compute/metadata"
|
||||
)
|
||||
|
||||
// Endpoint is Google's OAuth 2.0 endpoint.
|
||||
var Endpoint = oauth2.Endpoint{
|
||||
AuthURL: "https://accounts.google.com/o/oauth2/auth",
|
||||
TokenURL: "https://accounts.google.com/o/oauth2/token",
|
||||
}
|
||||
|
||||
// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow.
|
||||
const JWTTokenURL = "https://accounts.google.com/o/oauth2/token"
|
||||
|
||||
// ConfigFromJSON uses a Google Developers Console client_credentials.json
|
||||
// file to construct a config.
|
||||
// client_credentials.json can be downloadable from https://console.developers.google.com,
|
||||
// under "APIs & Auth" > "Credentials". Download the Web application credentials in the
|
||||
// JSON format and provide the contents of the file as jsonKey.
|
||||
func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) {
|
||||
type cred struct {
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
RedirectURIs []string `json:"redirect_uris"`
|
||||
AuthURI string `json:"auth_uri"`
|
||||
TokenURI string `json:"token_uri"`
|
||||
}
|
||||
var j struct {
|
||||
Web *cred `json:"web"`
|
||||
Installed *cred `json:"installed"`
|
||||
}
|
||||
if err := json.Unmarshal(jsonKey, &j); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var c *cred
|
||||
switch {
|
||||
case j.Web != nil:
|
||||
c = j.Web
|
||||
case j.Installed != nil:
|
||||
c = j.Installed
|
||||
default:
|
||||
return nil, fmt.Errorf("oauth2/google: no credentials found")
|
||||
}
|
||||
if len(c.RedirectURIs) < 1 {
|
||||
return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json")
|
||||
}
|
||||
return &oauth2.Config{
|
||||
ClientID: c.ClientID,
|
||||
ClientSecret: c.ClientSecret,
|
||||
RedirectURL: c.RedirectURIs[0],
|
||||
Scopes: scope,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: c.AuthURI,
|
||||
TokenURL: c.TokenURI,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// JWTConfigFromJSON uses a Google Developers service account JSON key file to read
|
||||
// the credentials that authorize and authenticate the requests.
|
||||
// Create a service account on "Credentials" page under "APIs & Auth" for your
|
||||
// project at https://console.developers.google.com to download a JSON key file.
|
||||
func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) {
|
||||
var key struct {
|
||||
Email string `json:"client_email"`
|
||||
PrivateKey string `json:"private_key"`
|
||||
}
|
||||
if err := json.Unmarshal(jsonKey, &key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &jwt.Config{
|
||||
Email: key.Email,
|
||||
PrivateKey: []byte(key.PrivateKey),
|
||||
Scopes: scope,
|
||||
TokenURL: JWTTokenURL,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ComputeTokenSource returns a token source that fetches access tokens
|
||||
// from Google Compute Engine (GCE)'s metadata server. It's only valid to use
|
||||
// this token source if your program is running on a GCE instance.
|
||||
// If no account is specified, "default" is used.
|
||||
// Further information about retrieving access tokens from the GCE metadata
|
||||
// server can be found at https://cloud.google.com/compute/docs/authentication.
|
||||
func ComputeTokenSource(account string) oauth2.TokenSource {
|
||||
return oauth2.ReuseTokenSource(nil, computeSource{account: account})
|
||||
}
|
||||
|
||||
type computeSource struct {
|
||||
account string
|
||||
}
|
||||
|
||||
func (cs computeSource) Token() (*oauth2.Token, error) {
|
||||
if !metadata.OnGCE() {
|
||||
return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE")
|
||||
}
|
||||
acct := cs.account
|
||||
if acct == "" {
|
||||
acct = "default"
|
||||
}
|
||||
tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresInSec int `json:"expires_in"`
|
||||
TokenType string `json:"token_type"`
|
||||
}
|
||||
err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err)
|
||||
}
|
||||
if res.ExpiresInSec == 0 || res.AccessToken == "" {
|
||||
return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata")
|
||||
}
|
||||
return &oauth2.Token{
|
||||
AccessToken: res.AccessToken,
|
||||
TokenType: res.TokenType,
|
||||
Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),
|
||||
}, nil
|
||||
}
|
71
vendor/src/golang.org/x/oauth2/google/jwt.go
vendored
Normal file
71
vendor/src/golang.org/x/oauth2/google/jwt.go
vendored
Normal file
|
@ -0,0 +1,71 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package google
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/internal"
|
||||
"golang.org/x/oauth2/jws"
|
||||
)
|
||||
|
||||
// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON
|
||||
// key file to read the credentials that authorize and authenticate the
|
||||
// requests, and returns a TokenSource that does not use any OAuth2 flow but
|
||||
// instead creates a JWT and sends that as the access token.
|
||||
// The audience is typically a URL that specifies the scope of the credentials.
|
||||
//
|
||||
// Note that this is not a standard OAuth flow, but rather an
|
||||
// optimization supported by a few Google services.
|
||||
// Unless you know otherwise, you should use JWTConfigFromJSON instead.
|
||||
func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) {
|
||||
cfg, err := JWTConfigFromJSON(jsonKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("google: could not parse JSON key: %v", err)
|
||||
}
|
||||
pk, err := internal.ParseKey(cfg.PrivateKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("google: could not parse key: %v", err)
|
||||
}
|
||||
ts := &jwtAccessTokenSource{
|
||||
email: cfg.Email,
|
||||
audience: audience,
|
||||
pk: pk,
|
||||
}
|
||||
tok, err := ts.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return oauth2.ReuseTokenSource(tok, ts), nil
|
||||
}
|
||||
|
||||
type jwtAccessTokenSource struct {
|
||||
email, audience string
|
||||
pk *rsa.PrivateKey
|
||||
}
|
||||
|
||||
func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) {
|
||||
iat := time.Now()
|
||||
exp := iat.Add(time.Hour)
|
||||
cs := &jws.ClaimSet{
|
||||
Iss: ts.email,
|
||||
Sub: ts.email,
|
||||
Aud: ts.audience,
|
||||
Iat: iat.Unix(),
|
||||
Exp: exp.Unix(),
|
||||
}
|
||||
hdr := &jws.Header{
|
||||
Algorithm: "RS256",
|
||||
Typ: "JWT",
|
||||
}
|
||||
msg, err := jws.Encode(hdr, cs, ts.pk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("google: could not encode JWT: %v", err)
|
||||
}
|
||||
return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil
|
||||
}
|
168
vendor/src/golang.org/x/oauth2/google/sdk.go
vendored
Normal file
168
vendor/src/golang.org/x/oauth2/google/sdk.go
vendored
Normal file
|
@ -0,0 +1,168 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package google
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/internal"
|
||||
)
|
||||
|
||||
type sdkCredentials struct {
|
||||
Data []struct {
|
||||
Credential struct {
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
TokenExpiry *time.Time `json:"token_expiry"`
|
||||
} `json:"credential"`
|
||||
Key struct {
|
||||
Account string `json:"account"`
|
||||
Scope string `json:"scope"`
|
||||
} `json:"key"`
|
||||
}
|
||||
}
|
||||
|
||||
// An SDKConfig provides access to tokens from an account already
|
||||
// authorized via the Google Cloud SDK.
|
||||
type SDKConfig struct {
|
||||
conf oauth2.Config
|
||||
initialToken *oauth2.Token
|
||||
}
|
||||
|
||||
// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK
|
||||
// account. If account is empty, the account currently active in
|
||||
// Google Cloud SDK properties is used.
|
||||
// Google Cloud SDK credentials must be created by running `gcloud auth`
|
||||
// before using this function.
|
||||
// The Google Cloud SDK is available at https://cloud.google.com/sdk/.
|
||||
func NewSDKConfig(account string) (*SDKConfig, error) {
|
||||
configPath, err := sdkConfigPath()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err)
|
||||
}
|
||||
credentialsPath := filepath.Join(configPath, "credentials")
|
||||
f, err := os.Open(credentialsPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var c sdkCredentials
|
||||
if err := json.NewDecoder(f).Decode(&c); err != nil {
|
||||
return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err)
|
||||
}
|
||||
if len(c.Data) == 0 {
|
||||
return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath)
|
||||
}
|
||||
if account == "" {
|
||||
propertiesPath := filepath.Join(configPath, "properties")
|
||||
f, err := os.Open(propertiesPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
ini, err := internal.ParseINI(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err)
|
||||
}
|
||||
core, ok := ini["core"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini)
|
||||
}
|
||||
active, ok := core["account"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core)
|
||||
}
|
||||
account = active
|
||||
}
|
||||
|
||||
for _, d := range c.Data {
|
||||
if account == "" || d.Key.Account == account {
|
||||
if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" {
|
||||
return nil, fmt.Errorf("oauth2/google: no token available for account %q", account)
|
||||
}
|
||||
var expiry time.Time
|
||||
if d.Credential.TokenExpiry != nil {
|
||||
expiry = *d.Credential.TokenExpiry
|
||||
}
|
||||
return &SDKConfig{
|
||||
conf: oauth2.Config{
|
||||
ClientID: d.Credential.ClientID,
|
||||
ClientSecret: d.Credential.ClientSecret,
|
||||
Scopes: strings.Split(d.Key.Scope, " "),
|
||||
Endpoint: Endpoint,
|
||||
RedirectURL: "oob",
|
||||
},
|
||||
initialToken: &oauth2.Token{
|
||||
AccessToken: d.Credential.AccessToken,
|
||||
RefreshToken: d.Credential.RefreshToken,
|
||||
Expiry: expiry,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account)
|
||||
}
|
||||
|
||||
// Client returns an HTTP client using Google Cloud SDK credentials to
|
||||
// authorize requests. The token will auto-refresh as necessary. The
|
||||
// underlying http.RoundTripper will be obtained using the provided
|
||||
// context. The returned client and its Transport should not be
|
||||
// modified.
|
||||
func (c *SDKConfig) Client(ctx context.Context) *http.Client {
|
||||
return &http.Client{
|
||||
Transport: &oauth2.Transport{
|
||||
Source: c.TokenSource(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TokenSource returns an oauth2.TokenSource that retrieve tokens from
|
||||
// Google Cloud SDK credentials using the provided context.
|
||||
// It will returns the current access token stored in the credentials,
|
||||
// and refresh it when it expires, but it won't update the credentials
|
||||
// with the new access token.
|
||||
func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource {
|
||||
return c.conf.TokenSource(ctx, c.initialToken)
|
||||
}
|
||||
|
||||
// Scopes are the OAuth 2.0 scopes the current account is authorized for.
|
||||
func (c *SDKConfig) Scopes() []string {
|
||||
return c.conf.Scopes
|
||||
}
|
||||
|
||||
// sdkConfigPath tries to guess where the gcloud config is located.
|
||||
// It can be overridden during tests.
|
||||
var sdkConfigPath = func() (string, error) {
|
||||
if runtime.GOOS == "windows" {
|
||||
return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil
|
||||
}
|
||||
homeDir := guessUnixHomeDir()
|
||||
if homeDir == "" {
|
||||
return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty")
|
||||
}
|
||||
return filepath.Join(homeDir, ".config", "gcloud"), nil
|
||||
}
|
||||
|
||||
func guessUnixHomeDir() string {
|
||||
usr, err := user.Current()
|
||||
if err == nil {
|
||||
return usr.HomeDir
|
||||
}
|
||||
return os.Getenv("HOME")
|
||||
}
|
76
vendor/src/golang.org/x/oauth2/internal/oauth2.go
vendored
Normal file
76
vendor/src/golang.org/x/oauth2/internal/oauth2.go
vendored
Normal file
|
@ -0,0 +1,76 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internal contains support packages for oauth2 package.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ParseKey converts the binary contents of a private key file
|
||||
// to an *rsa.PrivateKey. It detects whether the private key is in a
|
||||
// PEM container or not. If so, it extracts the the private key
|
||||
// from PEM container before conversion. It only supports PEM
|
||||
// containers with no passphrase.
|
||||
func ParseKey(key []byte) (*rsa.PrivateKey, error) {
|
||||
block, _ := pem.Decode(key)
|
||||
if block != nil {
|
||||
key = block.Bytes
|
||||
}
|
||||
parsedKey, err := x509.ParsePKCS8PrivateKey(key)
|
||||
if err != nil {
|
||||
parsedKey, err = x509.ParsePKCS1PrivateKey(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err)
|
||||
}
|
||||
}
|
||||
parsed, ok := parsedKey.(*rsa.PrivateKey)
|
||||
if !ok {
|
||||
return nil, errors.New("private key is invalid")
|
||||
}
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func ParseINI(ini io.Reader) (map[string]map[string]string, error) {
|
||||
result := map[string]map[string]string{
|
||||
"": map[string]string{}, // root section
|
||||
}
|
||||
scanner := bufio.NewScanner(ini)
|
||||
currentSection := ""
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
if strings.HasPrefix(line, ";") {
|
||||
// comment.
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
|
||||
currentSection = strings.TrimSpace(line[1 : len(line)-1])
|
||||
result[currentSection] = map[string]string{}
|
||||
continue
|
||||
}
|
||||
parts := strings.SplitN(line, "=", 2)
|
||||
if len(parts) == 2 && parts[0] != "" {
|
||||
result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error scanning ini: %v", err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func CondVal(v string) []string {
|
||||
if v == "" {
|
||||
return nil
|
||||
}
|
||||
return []string{v}
|
||||
}
|
221
vendor/src/golang.org/x/oauth2/internal/token.go
vendored
Normal file
221
vendor/src/golang.org/x/oauth2/internal/token.go
vendored
Normal file
|
@ -0,0 +1,221 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internal contains support packages for oauth2 package.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Token represents the crendentials used to authorize
|
||||
// the requests to access protected resources on the OAuth 2.0
|
||||
// provider's backend.
|
||||
//
|
||||
// This type is a mirror of oauth2.Token and exists to break
|
||||
// an otherwise-circular dependency. Other internal packages
|
||||
// should convert this Token into an oauth2.Token before use.
|
||||
type Token struct {
|
||||
// AccessToken is the token that authorizes and authenticates
|
||||
// the requests.
|
||||
AccessToken string
|
||||
|
||||
// TokenType is the type of token.
|
||||
// The Type method returns either this or "Bearer", the default.
|
||||
TokenType string
|
||||
|
||||
// RefreshToken is a token that's used by the application
|
||||
// (as opposed to the user) to refresh the access token
|
||||
// if it expires.
|
||||
RefreshToken string
|
||||
|
||||
// Expiry is the optional expiration time of the access token.
|
||||
//
|
||||
// If zero, TokenSource implementations will reuse the same
|
||||
// token forever and RefreshToken or equivalent
|
||||
// mechanisms for that TokenSource will not be used.
|
||||
Expiry time.Time
|
||||
|
||||
// Raw optionally contains extra metadata from the server
|
||||
// when updating a token.
|
||||
Raw interface{}
|
||||
}
|
||||
|
||||
// tokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
type tokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in
|
||||
}
|
||||
|
||||
func (e *tokenJSON) expiry() (t time.Time) {
|
||||
if v := e.ExpiresIn; v != 0 {
|
||||
return time.Now().Add(time.Duration(v) * time.Second)
|
||||
}
|
||||
if v := e.Expires; v != 0 {
|
||||
return time.Now().Add(time.Duration(v) * time.Second)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type expirationTime int32
|
||||
|
||||
func (e *expirationTime) UnmarshalJSON(b []byte) error {
|
||||
var n json.Number
|
||||
err := json.Unmarshal(b, &n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i, err := n.Int64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*e = expirationTime(i)
|
||||
return nil
|
||||
}
|
||||
|
||||
var brokenAuthHeaderProviders = []string{
|
||||
"https://accounts.google.com/",
|
||||
"https://api.dropbox.com/",
|
||||
"https://api.instagram.com/",
|
||||
"https://api.netatmo.net/",
|
||||
"https://api.odnoklassniki.ru/",
|
||||
"https://api.pushbullet.com/",
|
||||
"https://api.soundcloud.com/",
|
||||
"https://api.twitch.tv/",
|
||||
"https://app.box.com/",
|
||||
"https://connect.stripe.com/",
|
||||
"https://login.microsoftonline.com/",
|
||||
"https://login.salesforce.com/",
|
||||
"https://oauth.sandbox.trainingpeaks.com/",
|
||||
"https://oauth.trainingpeaks.com/",
|
||||
"https://oauth.vk.com/",
|
||||
"https://slack.com/",
|
||||
"https://test-sandbox.auth.corp.google.com",
|
||||
"https://test.salesforce.com/",
|
||||
"https://user.gini.net/",
|
||||
"https://www.douban.com/",
|
||||
"https://www.googleapis.com/",
|
||||
"https://www.linkedin.com/",
|
||||
"https://www.strava.com/oauth/",
|
||||
}
|
||||
|
||||
func RegisterBrokenAuthHeaderProvider(tokenURL string) {
|
||||
brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL)
|
||||
}
|
||||
|
||||
// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
|
||||
// implements the OAuth2 spec correctly
|
||||
// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
|
||||
// In summary:
|
||||
// - Reddit only accepts client secret in the Authorization header
|
||||
// - Dropbox accepts either it in URL param or Auth header, but not both.
|
||||
// - Google only accepts URL param (not spec compliant?), not Auth header
|
||||
// - Stripe only accepts client secret in Auth header with Bearer method, not Basic
|
||||
func providerAuthHeaderWorks(tokenURL string) bool {
|
||||
for _, s := range brokenAuthHeaderProviders {
|
||||
if strings.HasPrefix(tokenURL, s) {
|
||||
// Some sites fail to implement the OAuth2 spec fully.
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Assume the provider implements the spec properly
|
||||
// otherwise. We can add more exceptions as they're
|
||||
// discovered. We will _not_ be adding configurable hooks
|
||||
// to this package to let users select server bugs.
|
||||
return true
|
||||
}
|
||||
|
||||
func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) {
|
||||
hc, err := ContextClient(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v.Set("client_id", ClientID)
|
||||
bustedAuth := !providerAuthHeaderWorks(TokenURL)
|
||||
if bustedAuth && ClientSecret != "" {
|
||||
v.Set("client_secret", ClientSecret)
|
||||
}
|
||||
req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
if !bustedAuth {
|
||||
req.SetBasicAuth(ClientID, ClientSecret)
|
||||
}
|
||||
r, err := hc.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer r.Body.Close()
|
||||
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
|
||||
}
|
||||
if code := r.StatusCode; code < 200 || code > 299 {
|
||||
return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body)
|
||||
}
|
||||
|
||||
var token *Token
|
||||
content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
|
||||
switch content {
|
||||
case "application/x-www-form-urlencoded", "text/plain":
|
||||
vals, err := url.ParseQuery(string(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
token = &Token{
|
||||
AccessToken: vals.Get("access_token"),
|
||||
TokenType: vals.Get("token_type"),
|
||||
RefreshToken: vals.Get("refresh_token"),
|
||||
Raw: vals,
|
||||
}
|
||||
e := vals.Get("expires_in")
|
||||
if e == "" {
|
||||
// TODO(jbd): Facebook's OAuth2 implementation is broken and
|
||||
// returns expires_in field in expires. Remove the fallback to expires,
|
||||
// when Facebook fixes their implementation.
|
||||
e = vals.Get("expires")
|
||||
}
|
||||
expires, _ := strconv.Atoi(e)
|
||||
if expires != 0 {
|
||||
token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
|
||||
}
|
||||
default:
|
||||
var tj tokenJSON
|
||||
if err = json.Unmarshal(body, &tj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
token = &Token{
|
||||
AccessToken: tj.AccessToken,
|
||||
TokenType: tj.TokenType,
|
||||
RefreshToken: tj.RefreshToken,
|
||||
Expiry: tj.expiry(),
|
||||
Raw: make(map[string]interface{}),
|
||||
}
|
||||
json.Unmarshal(body, &token.Raw) // no error checks for optional fields
|
||||
}
|
||||
// Don't overwrite `RefreshToken` with an empty value
|
||||
// if this was a token refreshing request.
|
||||
if token.RefreshToken == "" {
|
||||
token.RefreshToken = v.Get("refresh_token")
|
||||
}
|
||||
return token, nil
|
||||
}
|
69
vendor/src/golang.org/x/oauth2/internal/transport.go
vendored
Normal file
69
vendor/src/golang.org/x/oauth2/internal/transport.go
vendored
Normal file
|
@ -0,0 +1,69 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internal contains support packages for oauth2 package.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// HTTPClient is the context key to use with golang.org/x/net/context's
|
||||
// WithValue function to associate an *http.Client value with a context.
|
||||
var HTTPClient ContextKey
|
||||
|
||||
// ContextKey is just an empty struct. It exists so HTTPClient can be
|
||||
// an immutable public variable with a unique type. It's immutable
|
||||
// because nobody else can create a ContextKey, being unexported.
|
||||
type ContextKey struct{}
|
||||
|
||||
// ContextClientFunc is a func which tries to return an *http.Client
|
||||
// given a Context value. If it returns an error, the search stops
|
||||
// with that error. If it returns (nil, nil), the search continues
|
||||
// down the list of registered funcs.
|
||||
type ContextClientFunc func(context.Context) (*http.Client, error)
|
||||
|
||||
var contextClientFuncs []ContextClientFunc
|
||||
|
||||
func RegisterContextClientFunc(fn ContextClientFunc) {
|
||||
contextClientFuncs = append(contextClientFuncs, fn)
|
||||
}
|
||||
|
||||
func ContextClient(ctx context.Context) (*http.Client, error) {
|
||||
if ctx != nil {
|
||||
if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
|
||||
return hc, nil
|
||||
}
|
||||
}
|
||||
for _, fn := range contextClientFuncs {
|
||||
c, err := fn(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c != nil {
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
return http.DefaultClient, nil
|
||||
}
|
||||
|
||||
func ContextTransport(ctx context.Context) http.RoundTripper {
|
||||
hc, err := ContextClient(ctx)
|
||||
// This is a rare error case (somebody using nil on App Engine).
|
||||
if err != nil {
|
||||
return ErrorTransport{err}
|
||||
}
|
||||
return hc.Transport
|
||||
}
|
||||
|
||||
// ErrorTransport returns the specified error on RoundTrip.
|
||||
// This RoundTripper should be used in rare error cases where
|
||||
// error handling can be postponed to response handling time.
|
||||
type ErrorTransport struct{ Err error }
|
||||
|
||||
func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) {
|
||||
return nil, t.Err
|
||||
}
|
172
vendor/src/golang.org/x/oauth2/jws/jws.go
vendored
Normal file
172
vendor/src/golang.org/x/oauth2/jws/jws.go
vendored
Normal file
|
@ -0,0 +1,172 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package jws provides encoding and decoding utilities for
|
||||
// signed JWS messages.
|
||||
package jws // import "golang.org/x/oauth2/jws"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ClaimSet contains information about the JWT signature including the
|
||||
// permissions being requested (scopes), the target of the token, the issuer,
|
||||
// the time the token was issued, and the lifetime of the token.
|
||||
type ClaimSet struct {
|
||||
Iss string `json:"iss"` // email address of the client_id of the application making the access token request
|
||||
Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests
|
||||
Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional).
|
||||
Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch)
|
||||
Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch)
|
||||
Typ string `json:"typ,omitempty"` // token type (Optional).
|
||||
|
||||
// Email for which the application is requesting delegated access (Optional).
|
||||
Sub string `json:"sub,omitempty"`
|
||||
|
||||
// The old name of Sub. Client keeps setting Prn to be
|
||||
// complaint with legacy OAuth 2.0 providers. (Optional)
|
||||
Prn string `json:"prn,omitempty"`
|
||||
|
||||
// See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3
|
||||
// This array is marshalled using custom code (see (c *ClaimSet) encode()).
|
||||
PrivateClaims map[string]interface{} `json:"-"`
|
||||
}
|
||||
|
||||
func (c *ClaimSet) encode() (string, error) {
|
||||
// Reverting time back for machines whose time is not perfectly in sync.
|
||||
// If client machine's time is in the future according
|
||||
// to Google servers, an access token will not be issued.
|
||||
now := time.Now().Add(-10 * time.Second)
|
||||
if c.Iat == 0 {
|
||||
c.Iat = now.Unix()
|
||||
}
|
||||
if c.Exp == 0 {
|
||||
c.Exp = now.Add(time.Hour).Unix()
|
||||
}
|
||||
if c.Exp < c.Iat {
|
||||
return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat)
|
||||
}
|
||||
|
||||
b, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(c.PrivateClaims) == 0 {
|
||||
return base64Encode(b), nil
|
||||
}
|
||||
|
||||
// Marshal private claim set and then append it to b.
|
||||
prv, err := json.Marshal(c.PrivateClaims)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims)
|
||||
}
|
||||
|
||||
// Concatenate public and private claim JSON objects.
|
||||
if !bytes.HasSuffix(b, []byte{'}'}) {
|
||||
return "", fmt.Errorf("jws: invalid JSON %s", b)
|
||||
}
|
||||
if !bytes.HasPrefix(prv, []byte{'{'}) {
|
||||
return "", fmt.Errorf("jws: invalid JSON %s", prv)
|
||||
}
|
||||
b[len(b)-1] = ',' // Replace closing curly brace with a comma.
|
||||
b = append(b, prv[1:]...) // Append private claims.
|
||||
return base64Encode(b), nil
|
||||
}
|
||||
|
||||
// Header represents the header for the signed JWS payloads.
|
||||
type Header struct {
|
||||
// The algorithm used for signature.
|
||||
Algorithm string `json:"alg"`
|
||||
|
||||
// Represents the token type.
|
||||
Typ string `json:"typ"`
|
||||
}
|
||||
|
||||
func (h *Header) encode() (string, error) {
|
||||
b, err := json.Marshal(h)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64Encode(b), nil
|
||||
}
|
||||
|
||||
// Decode decodes a claim set from a JWS payload.
|
||||
func Decode(payload string) (*ClaimSet, error) {
|
||||
// decode returned id token to get expiry
|
||||
s := strings.Split(payload, ".")
|
||||
if len(s) < 2 {
|
||||
// TODO(jbd): Provide more context about the error.
|
||||
return nil, errors.New("jws: invalid token received")
|
||||
}
|
||||
decoded, err := base64Decode(s[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &ClaimSet{}
|
||||
err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c)
|
||||
return c, err
|
||||
}
|
||||
|
||||
// Signer returns a signature for the given data.
|
||||
type Signer func(data []byte) (sig []byte, err error)
|
||||
|
||||
// EncodeWithSigner encodes a header and claim set with the provided signer.
|
||||
func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) {
|
||||
head, err := header.encode()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
cs, err := c.encode()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ss := fmt.Sprintf("%s.%s", head, cs)
|
||||
sig, err := sg([]byte(ss))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", ss, base64Encode(sig)), nil
|
||||
}
|
||||
|
||||
// Encode encodes a signed JWS with provided header and claim set.
|
||||
// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key.
|
||||
func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) {
|
||||
sg := func(data []byte) (sig []byte, err error) {
|
||||
h := sha256.New()
|
||||
h.Write([]byte(data))
|
||||
return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil))
|
||||
}
|
||||
return EncodeWithSigner(header, c, sg)
|
||||
}
|
||||
|
||||
// base64Encode returns and Base64url encoded version of the input string with any
|
||||
// trailing "=" stripped.
|
||||
func base64Encode(b []byte) string {
|
||||
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
|
||||
}
|
||||
|
||||
// base64Decode decodes the Base64url encoded string
|
||||
func base64Decode(s string) ([]byte, error) {
|
||||
// add back missing padding
|
||||
switch len(s) % 4 {
|
||||
case 1:
|
||||
s += "==="
|
||||
case 2:
|
||||
s += "=="
|
||||
case 3:
|
||||
s += "="
|
||||
}
|
||||
return base64.URLEncoding.DecodeString(s)
|
||||
}
|
153
vendor/src/golang.org/x/oauth2/jwt/jwt.go
vendored
Normal file
153
vendor/src/golang.org/x/oauth2/jwt/jwt.go
vendored
Normal file
|
@ -0,0 +1,153 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly
|
||||
// known as "two-legged OAuth 2.0".
|
||||
//
|
||||
// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12
|
||||
package jwt
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/internal"
|
||||
"golang.org/x/oauth2/jws"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
|
||||
defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"}
|
||||
)
|
||||
|
||||
// Config is the configuration for using JWT to fetch tokens,
|
||||
// commonly known as "two-legged OAuth 2.0".
|
||||
type Config struct {
|
||||
// Email is the OAuth client identifier used when communicating with
|
||||
// the configured OAuth provider.
|
||||
Email string
|
||||
|
||||
// PrivateKey contains the contents of an RSA private key or the
|
||||
// contents of a PEM file that contains a private key. The provided
|
||||
// private key is used to sign JWT payloads.
|
||||
// PEM containers with a passphrase are not supported.
|
||||
// Use the following command to convert a PKCS 12 file into a PEM.
|
||||
//
|
||||
// $ openssl pkcs12 -in key.p12 -out key.pem -nodes
|
||||
//
|
||||
PrivateKey []byte
|
||||
|
||||
// Subject is the optional user to impersonate.
|
||||
Subject string
|
||||
|
||||
// Scopes optionally specifies a list of requested permission scopes.
|
||||
Scopes []string
|
||||
|
||||
// TokenURL is the endpoint required to complete the 2-legged JWT flow.
|
||||
TokenURL string
|
||||
|
||||
// Expires optionally specifies how long the token is valid for.
|
||||
Expires time.Duration
|
||||
}
|
||||
|
||||
// TokenSource returns a JWT TokenSource using the configuration
|
||||
// in c and the HTTP client from the provided context.
|
||||
func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {
|
||||
return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c})
|
||||
}
|
||||
|
||||
// Client returns an HTTP client wrapping the context's
|
||||
// HTTP transport and adding Authorization headers with tokens
|
||||
// obtained from c.
|
||||
//
|
||||
// The returned client and its Transport should not be modified.
|
||||
func (c *Config) Client(ctx context.Context) *http.Client {
|
||||
return oauth2.NewClient(ctx, c.TokenSource(ctx))
|
||||
}
|
||||
|
||||
// jwtSource is a source that always does a signed JWT request for a token.
|
||||
// It should typically be wrapped with a reuseTokenSource.
|
||||
type jwtSource struct {
|
||||
ctx context.Context
|
||||
conf *Config
|
||||
}
|
||||
|
||||
func (js jwtSource) Token() (*oauth2.Token, error) {
|
||||
pk, err := internal.ParseKey(js.conf.PrivateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hc := oauth2.NewClient(js.ctx, nil)
|
||||
claimSet := &jws.ClaimSet{
|
||||
Iss: js.conf.Email,
|
||||
Scope: strings.Join(js.conf.Scopes, " "),
|
||||
Aud: js.conf.TokenURL,
|
||||
}
|
||||
if subject := js.conf.Subject; subject != "" {
|
||||
claimSet.Sub = subject
|
||||
// prn is the old name of sub. Keep setting it
|
||||
// to be compatible with legacy OAuth 2.0 providers.
|
||||
claimSet.Prn = subject
|
||||
}
|
||||
if t := js.conf.Expires; t > 0 {
|
||||
claimSet.Exp = time.Now().Add(t).Unix()
|
||||
}
|
||||
payload, err := jws.Encode(defaultHeader, claimSet, pk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v := url.Values{}
|
||||
v.Set("grant_type", defaultGrantType)
|
||||
v.Set("assertion", payload)
|
||||
resp, err := hc.PostForm(js.conf.TokenURL, v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
|
||||
}
|
||||
if c := resp.StatusCode; c < 200 || c > 299 {
|
||||
return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body)
|
||||
}
|
||||
// tokenRes is the JSON response body.
|
||||
var tokenRes struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
IDToken string `json:"id_token"`
|
||||
ExpiresIn int64 `json:"expires_in"` // relative seconds from now
|
||||
}
|
||||
if err := json.Unmarshal(body, &tokenRes); err != nil {
|
||||
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
|
||||
}
|
||||
token := &oauth2.Token{
|
||||
AccessToken: tokenRes.AccessToken,
|
||||
TokenType: tokenRes.TokenType,
|
||||
}
|
||||
raw := make(map[string]interface{})
|
||||
json.Unmarshal(body, &raw) // no error checks for optional fields
|
||||
token = token.WithExtra(raw)
|
||||
|
||||
if secs := tokenRes.ExpiresIn; secs > 0 {
|
||||
token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
|
||||
}
|
||||
if v := tokenRes.IDToken; v != "" {
|
||||
// decode returned id token to get expiry
|
||||
claimSet, err := jws.Decode(v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err)
|
||||
}
|
||||
token.Expiry = time.Unix(claimSet.Exp, 0)
|
||||
}
|
||||
return token, nil
|
||||
}
|
337
vendor/src/golang.org/x/oauth2/oauth2.go
vendored
Normal file
337
vendor/src/golang.org/x/oauth2/oauth2.go
vendored
Normal file
|
@ -0,0 +1,337 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package oauth2 provides support for making
|
||||
// OAuth2 authorized and authenticated HTTP requests.
|
||||
// It can additionally grant authorization with Bearer JWT.
|
||||
package oauth2 // import "golang.org/x/oauth2"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2/internal"
|
||||
)
|
||||
|
||||
// NoContext is the default context you should supply if not using
|
||||
// your own context.Context (see https://golang.org/x/net/context).
|
||||
var NoContext = context.TODO()
|
||||
|
||||
// RegisterBrokenAuthHeaderProvider registers an OAuth2 server
|
||||
// identified by the tokenURL prefix as an OAuth2 implementation
|
||||
// which doesn't support the HTTP Basic authentication
|
||||
// scheme to authenticate with the authorization server.
|
||||
// Once a server is registered, credentials (client_id and client_secret)
|
||||
// will be passed as query parameters rather than being present
|
||||
// in the Authorization header.
|
||||
// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
|
||||
func RegisterBrokenAuthHeaderProvider(tokenURL string) {
|
||||
internal.RegisterBrokenAuthHeaderProvider(tokenURL)
|
||||
}
|
||||
|
||||
// Config describes a typical 3-legged OAuth2 flow, with both the
|
||||
// client application information and the server's endpoint URLs.
|
||||
type Config struct {
|
||||
// ClientID is the application's ID.
|
||||
ClientID string
|
||||
|
||||
// ClientSecret is the application's secret.
|
||||
ClientSecret string
|
||||
|
||||
// Endpoint contains the resource server's token endpoint
|
||||
// URLs. These are constants specific to each server and are
|
||||
// often available via site-specific packages, such as
|
||||
// google.Endpoint or github.Endpoint.
|
||||
Endpoint Endpoint
|
||||
|
||||
// RedirectURL is the URL to redirect users going through
|
||||
// the OAuth flow, after the resource owner's URLs.
|
||||
RedirectURL string
|
||||
|
||||
// Scope specifies optional requested permissions.
|
||||
Scopes []string
|
||||
}
|
||||
|
||||
// A TokenSource is anything that can return a token.
|
||||
type TokenSource interface {
|
||||
// Token returns a token or an error.
|
||||
// Token must be safe for concurrent use by multiple goroutines.
|
||||
// The returned Token must not be modified.
|
||||
Token() (*Token, error)
|
||||
}
|
||||
|
||||
// Endpoint contains the OAuth 2.0 provider's authorization and token
|
||||
// endpoint URLs.
|
||||
type Endpoint struct {
|
||||
AuthURL string
|
||||
TokenURL string
|
||||
}
|
||||
|
||||
var (
|
||||
// AccessTypeOnline and AccessTypeOffline are options passed
|
||||
// to the Options.AuthCodeURL method. They modify the
|
||||
// "access_type" field that gets sent in the URL returned by
|
||||
// AuthCodeURL.
|
||||
//
|
||||
// Online is the default if neither is specified. If your
|
||||
// application needs to refresh access tokens when the user
|
||||
// is not present at the browser, then use offline. This will
|
||||
// result in your application obtaining a refresh token the
|
||||
// first time your application exchanges an authorization
|
||||
// code for a user.
|
||||
AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online")
|
||||
AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline")
|
||||
|
||||
// ApprovalForce forces the users to view the consent dialog
|
||||
// and confirm the permissions request at the URL returned
|
||||
// from AuthCodeURL, even if they've already done so.
|
||||
ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force")
|
||||
)
|
||||
|
||||
// An AuthCodeOption is passed to Config.AuthCodeURL.
|
||||
type AuthCodeOption interface {
|
||||
setValue(url.Values)
|
||||
}
|
||||
|
||||
type setParam struct{ k, v string }
|
||||
|
||||
func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
|
||||
|
||||
// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters
|
||||
// to a provider's authorization endpoint.
|
||||
func SetAuthURLParam(key, value string) AuthCodeOption {
|
||||
return setParam{key, value}
|
||||
}
|
||||
|
||||
// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page
|
||||
// that asks for permissions for the required scopes explicitly.
|
||||
//
|
||||
// State is a token to protect the user from CSRF attacks. You must
|
||||
// always provide a non-zero string and validate that it matches the
|
||||
// the state query parameter on your redirect callback.
|
||||
// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
|
||||
//
|
||||
// Opts may include AccessTypeOnline or AccessTypeOffline, as well
|
||||
// as ApprovalForce.
|
||||
func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(c.Endpoint.AuthURL)
|
||||
v := url.Values{
|
||||
"response_type": {"code"},
|
||||
"client_id": {c.ClientID},
|
||||
"redirect_uri": internal.CondVal(c.RedirectURL),
|
||||
"scope": internal.CondVal(strings.Join(c.Scopes, " ")),
|
||||
"state": internal.CondVal(state),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.setValue(v)
|
||||
}
|
||||
if strings.Contains(c.Endpoint.AuthURL, "?") {
|
||||
buf.WriteByte('&')
|
||||
} else {
|
||||
buf.WriteByte('?')
|
||||
}
|
||||
buf.WriteString(v.Encode())
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// PasswordCredentialsToken converts a resource owner username and password
|
||||
// pair into a token.
|
||||
//
|
||||
// Per the RFC, this grant type should only be used "when there is a high
|
||||
// degree of trust between the resource owner and the client (e.g., the client
|
||||
// is part of the device operating system or a highly privileged application),
|
||||
// and when other authorization grant types are not available."
|
||||
// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
|
||||
//
|
||||
// The HTTP client to use is derived from the context.
|
||||
// If nil, http.DefaultClient is used.
|
||||
func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
|
||||
return retrieveToken(ctx, c, url.Values{
|
||||
"grant_type": {"password"},
|
||||
"username": {username},
|
||||
"password": {password},
|
||||
"scope": internal.CondVal(strings.Join(c.Scopes, " ")),
|
||||
})
|
||||
}
|
||||
|
||||
// Exchange converts an authorization code into a token.
|
||||
//
|
||||
// It is used after a resource provider redirects the user back
|
||||
// to the Redirect URI (the URL obtained from AuthCodeURL).
|
||||
//
|
||||
// The HTTP client to use is derived from the context.
|
||||
// If a client is not provided via the context, http.DefaultClient is used.
|
||||
//
|
||||
// The code will be in the *http.Request.FormValue("code"). Before
|
||||
// calling Exchange, be sure to validate FormValue("state").
|
||||
func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) {
|
||||
return retrieveToken(ctx, c, url.Values{
|
||||
"grant_type": {"authorization_code"},
|
||||
"code": {code},
|
||||
"redirect_uri": internal.CondVal(c.RedirectURL),
|
||||
"scope": internal.CondVal(strings.Join(c.Scopes, " ")),
|
||||
})
|
||||
}
|
||||
|
||||
// Client returns an HTTP client using the provided token.
|
||||
// The token will auto-refresh as necessary. The underlying
|
||||
// HTTP transport will be obtained using the provided context.
|
||||
// The returned client and its Transport should not be modified.
|
||||
func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
|
||||
return NewClient(ctx, c.TokenSource(ctx, t))
|
||||
}
|
||||
|
||||
// TokenSource returns a TokenSource that returns t until t expires,
|
||||
// automatically refreshing it as necessary using the provided context.
|
||||
//
|
||||
// Most users will use Config.Client instead.
|
||||
func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
|
||||
tkr := &tokenRefresher{
|
||||
ctx: ctx,
|
||||
conf: c,
|
||||
}
|
||||
if t != nil {
|
||||
tkr.refreshToken = t.RefreshToken
|
||||
}
|
||||
return &reuseTokenSource{
|
||||
t: t,
|
||||
new: tkr,
|
||||
}
|
||||
}
|
||||
|
||||
// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
|
||||
// HTTP requests to renew a token using a RefreshToken.
|
||||
type tokenRefresher struct {
|
||||
ctx context.Context // used to get HTTP requests
|
||||
conf *Config
|
||||
refreshToken string
|
||||
}
|
||||
|
||||
// WARNING: Token is not safe for concurrent access, as it
|
||||
// updates the tokenRefresher's refreshToken field.
|
||||
// Within this package, it is used by reuseTokenSource which
|
||||
// synchronizes calls to this method with its own mutex.
|
||||
func (tf *tokenRefresher) Token() (*Token, error) {
|
||||
if tf.refreshToken == "" {
|
||||
return nil, errors.New("oauth2: token expired and refresh token is not set")
|
||||
}
|
||||
|
||||
tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{
|
||||
"grant_type": {"refresh_token"},
|
||||
"refresh_token": {tf.refreshToken},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tf.refreshToken != tk.RefreshToken {
|
||||
tf.refreshToken = tk.RefreshToken
|
||||
}
|
||||
return tk, err
|
||||
}
|
||||
|
||||
// reuseTokenSource is a TokenSource that holds a single token in memory
|
||||
// and validates its expiry before each call to retrieve it with
|
||||
// Token. If it's expired, it will be auto-refreshed using the
|
||||
// new TokenSource.
|
||||
type reuseTokenSource struct {
|
||||
new TokenSource // called when t is expired.
|
||||
|
||||
mu sync.Mutex // guards t
|
||||
t *Token
|
||||
}
|
||||
|
||||
// Token returns the current token if it's still valid, else will
|
||||
// refresh the current token (using r.Context for HTTP client
|
||||
// information) and return the new one.
|
||||
func (s *reuseTokenSource) Token() (*Token, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.t.Valid() {
|
||||
return s.t, nil
|
||||
}
|
||||
t, err := s.new.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.t = t
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// StaticTokenSource returns a TokenSource that always returns the same token.
|
||||
// Because the provided token t is never refreshed, StaticTokenSource is only
|
||||
// useful for tokens that never expire.
|
||||
func StaticTokenSource(t *Token) TokenSource {
|
||||
return staticTokenSource{t}
|
||||
}
|
||||
|
||||
// staticTokenSource is a TokenSource that always returns the same Token.
|
||||
type staticTokenSource struct {
|
||||
t *Token
|
||||
}
|
||||
|
||||
func (s staticTokenSource) Token() (*Token, error) {
|
||||
return s.t, nil
|
||||
}
|
||||
|
||||
// HTTPClient is the context key to use with golang.org/x/net/context's
|
||||
// WithValue function to associate an *http.Client value with a context.
|
||||
var HTTPClient internal.ContextKey
|
||||
|
||||
// NewClient creates an *http.Client from a Context and TokenSource.
|
||||
// The returned client is not valid beyond the lifetime of the context.
|
||||
//
|
||||
// As a special case, if src is nil, a non-OAuth2 client is returned
|
||||
// using the provided context. This exists to support related OAuth2
|
||||
// packages.
|
||||
func NewClient(ctx context.Context, src TokenSource) *http.Client {
|
||||
if src == nil {
|
||||
c, err := internal.ContextClient(ctx)
|
||||
if err != nil {
|
||||
return &http.Client{Transport: internal.ErrorTransport{err}}
|
||||
}
|
||||
return c
|
||||
}
|
||||
return &http.Client{
|
||||
Transport: &Transport{
|
||||
Base: internal.ContextTransport(ctx),
|
||||
Source: ReuseTokenSource(nil, src),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ReuseTokenSource returns a TokenSource which repeatedly returns the
|
||||
// same token as long as it's valid, starting with t.
|
||||
// When its cached token is invalid, a new token is obtained from src.
|
||||
//
|
||||
// ReuseTokenSource is typically used to reuse tokens from a cache
|
||||
// (such as a file on disk) between runs of a program, rather than
|
||||
// obtaining new tokens unnecessarily.
|
||||
//
|
||||
// The initial token t may be nil, in which case the TokenSource is
|
||||
// wrapped in a caching version if it isn't one already. This also
|
||||
// means it's always safe to wrap ReuseTokenSource around any other
|
||||
// TokenSource without adverse effects.
|
||||
func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
|
||||
// Don't wrap a reuseTokenSource in itself. That would work,
|
||||
// but cause an unnecessary number of mutex operations.
|
||||
// Just build the equivalent one.
|
||||
if rt, ok := src.(*reuseTokenSource); ok {
|
||||
if t == nil {
|
||||
// Just use it directly.
|
||||
return rt
|
||||
}
|
||||
src = rt.new
|
||||
}
|
||||
return &reuseTokenSource{
|
||||
t: t,
|
||||
new: src,
|
||||
}
|
||||
}
|
158
vendor/src/golang.org/x/oauth2/token.go
vendored
Normal file
158
vendor/src/golang.org/x/oauth2/token.go
vendored
Normal file
|
@ -0,0 +1,158 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package oauth2
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2/internal"
|
||||
)
|
||||
|
||||
// expiryDelta determines how earlier a token should be considered
|
||||
// expired than its actual expiration time. It is used to avoid late
|
||||
// expirations due to client-server time mismatches.
|
||||
const expiryDelta = 10 * time.Second
|
||||
|
||||
// Token represents the crendentials used to authorize
|
||||
// the requests to access protected resources on the OAuth 2.0
|
||||
// provider's backend.
|
||||
//
|
||||
// Most users of this package should not access fields of Token
|
||||
// directly. They're exported mostly for use by related packages
|
||||
// implementing derivative OAuth2 flows.
|
||||
type Token struct {
|
||||
// AccessToken is the token that authorizes and authenticates
|
||||
// the requests.
|
||||
AccessToken string `json:"access_token"`
|
||||
|
||||
// TokenType is the type of token.
|
||||
// The Type method returns either this or "Bearer", the default.
|
||||
TokenType string `json:"token_type,omitempty"`
|
||||
|
||||
// RefreshToken is a token that's used by the application
|
||||
// (as opposed to the user) to refresh the access token
|
||||
// if it expires.
|
||||
RefreshToken string `json:"refresh_token,omitempty"`
|
||||
|
||||
// Expiry is the optional expiration time of the access token.
|
||||
//
|
||||
// If zero, TokenSource implementations will reuse the same
|
||||
// token forever and RefreshToken or equivalent
|
||||
// mechanisms for that TokenSource will not be used.
|
||||
Expiry time.Time `json:"expiry,omitempty"`
|
||||
|
||||
// raw optionally contains extra metadata from the server
|
||||
// when updating a token.
|
||||
raw interface{}
|
||||
}
|
||||
|
||||
// Type returns t.TokenType if non-empty, else "Bearer".
|
||||
func (t *Token) Type() string {
|
||||
if strings.EqualFold(t.TokenType, "bearer") {
|
||||
return "Bearer"
|
||||
}
|
||||
if strings.EqualFold(t.TokenType, "mac") {
|
||||
return "MAC"
|
||||
}
|
||||
if strings.EqualFold(t.TokenType, "basic") {
|
||||
return "Basic"
|
||||
}
|
||||
if t.TokenType != "" {
|
||||
return t.TokenType
|
||||
}
|
||||
return "Bearer"
|
||||
}
|
||||
|
||||
// SetAuthHeader sets the Authorization header to r using the access
|
||||
// token in t.
|
||||
//
|
||||
// This method is unnecessary when using Transport or an HTTP Client
|
||||
// returned by this package.
|
||||
func (t *Token) SetAuthHeader(r *http.Request) {
|
||||
r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
|
||||
}
|
||||
|
||||
// WithExtra returns a new Token that's a clone of t, but using the
|
||||
// provided raw extra map. This is only intended for use by packages
|
||||
// implementing derivative OAuth2 flows.
|
||||
func (t *Token) WithExtra(extra interface{}) *Token {
|
||||
t2 := new(Token)
|
||||
*t2 = *t
|
||||
t2.raw = extra
|
||||
return t2
|
||||
}
|
||||
|
||||
// Extra returns an extra field.
|
||||
// Extra fields are key-value pairs returned by the server as a
|
||||
// part of the token retrieval response.
|
||||
func (t *Token) Extra(key string) interface{} {
|
||||
if raw, ok := t.raw.(map[string]interface{}); ok {
|
||||
return raw[key]
|
||||
}
|
||||
|
||||
vals, ok := t.raw.(url.Values)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
v := vals.Get(key)
|
||||
switch s := strings.TrimSpace(v); strings.Count(s, ".") {
|
||||
case 0: // Contains no "."; try to parse as int
|
||||
if i, err := strconv.ParseInt(s, 10, 64); err == nil {
|
||||
return i
|
||||
}
|
||||
case 1: // Contains a single "."; try to parse as float
|
||||
if f, err := strconv.ParseFloat(s, 64); err == nil {
|
||||
return f
|
||||
}
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// expired reports whether the token is expired.
|
||||
// t must be non-nil.
|
||||
func (t *Token) expired() bool {
|
||||
if t.Expiry.IsZero() {
|
||||
return false
|
||||
}
|
||||
return t.Expiry.Add(-expiryDelta).Before(time.Now())
|
||||
}
|
||||
|
||||
// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
|
||||
func (t *Token) Valid() bool {
|
||||
return t != nil && t.AccessToken != "" && !t.expired()
|
||||
}
|
||||
|
||||
// tokenFromInternal maps an *internal.Token struct into
|
||||
// a *Token struct.
|
||||
func tokenFromInternal(t *internal.Token) *Token {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return &Token{
|
||||
AccessToken: t.AccessToken,
|
||||
TokenType: t.TokenType,
|
||||
RefreshToken: t.RefreshToken,
|
||||
Expiry: t.Expiry,
|
||||
raw: t.Raw,
|
||||
}
|
||||
}
|
||||
|
||||
// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
|
||||
// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
|
||||
// with an error..
|
||||
func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
|
||||
tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tokenFromInternal(tk), nil
|
||||
}
|
132
vendor/src/golang.org/x/oauth2/transport.go
vendored
Normal file
132
vendor/src/golang.org/x/oauth2/transport.go
vendored
Normal file
|
@ -0,0 +1,132 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package oauth2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
|
||||
// wrapping a base RoundTripper and adding an Authorization header
|
||||
// with a token from the supplied Sources.
|
||||
//
|
||||
// Transport is a low-level mechanism. Most code will use the
|
||||
// higher-level Config.Client method instead.
|
||||
type Transport struct {
|
||||
// Source supplies the token to add to outgoing requests'
|
||||
// Authorization headers.
|
||||
Source TokenSource
|
||||
|
||||
// Base is the base RoundTripper used to make HTTP requests.
|
||||
// If nil, http.DefaultTransport is used.
|
||||
Base http.RoundTripper
|
||||
|
||||
mu sync.Mutex // guards modReq
|
||||
modReq map[*http.Request]*http.Request // original -> modified
|
||||
}
|
||||
|
||||
// RoundTrip authorizes and authenticates the request with an
|
||||
// access token. If no token exists or token is expired,
|
||||
// tries to refresh/fetch a new token.
|
||||
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
if t.Source == nil {
|
||||
return nil, errors.New("oauth2: Transport's Source is nil")
|
||||
}
|
||||
token, err := t.Source.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req2 := cloneRequest(req) // per RoundTripper contract
|
||||
token.SetAuthHeader(req2)
|
||||
t.setModReq(req, req2)
|
||||
res, err := t.base().RoundTrip(req2)
|
||||
if err != nil {
|
||||
t.setModReq(req, nil)
|
||||
return nil, err
|
||||
}
|
||||
res.Body = &onEOFReader{
|
||||
rc: res.Body,
|
||||
fn: func() { t.setModReq(req, nil) },
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// CancelRequest cancels an in-flight request by closing its connection.
|
||||
func (t *Transport) CancelRequest(req *http.Request) {
|
||||
type canceler interface {
|
||||
CancelRequest(*http.Request)
|
||||
}
|
||||
if cr, ok := t.base().(canceler); ok {
|
||||
t.mu.Lock()
|
||||
modReq := t.modReq[req]
|
||||
delete(t.modReq, req)
|
||||
t.mu.Unlock()
|
||||
cr.CancelRequest(modReq)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Transport) base() http.RoundTripper {
|
||||
if t.Base != nil {
|
||||
return t.Base
|
||||
}
|
||||
return http.DefaultTransport
|
||||
}
|
||||
|
||||
func (t *Transport) setModReq(orig, mod *http.Request) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if t.modReq == nil {
|
||||
t.modReq = make(map[*http.Request]*http.Request)
|
||||
}
|
||||
if mod == nil {
|
||||
delete(t.modReq, orig)
|
||||
} else {
|
||||
t.modReq[orig] = mod
|
||||
}
|
||||
}
|
||||
|
||||
// cloneRequest returns a clone of the provided *http.Request.
|
||||
// The clone is a shallow copy of the struct and its Header map.
|
||||
func cloneRequest(r *http.Request) *http.Request {
|
||||
// shallow copy of the struct
|
||||
r2 := new(http.Request)
|
||||
*r2 = *r
|
||||
// deep copy of the Header
|
||||
r2.Header = make(http.Header, len(r.Header))
|
||||
for k, s := range r.Header {
|
||||
r2.Header[k] = append([]string(nil), s...)
|
||||
}
|
||||
return r2
|
||||
}
|
||||
|
||||
type onEOFReader struct {
|
||||
rc io.ReadCloser
|
||||
fn func()
|
||||
}
|
||||
|
||||
func (r *onEOFReader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.rc.Read(p)
|
||||
if err == io.EOF {
|
||||
r.runFunc()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *onEOFReader) Close() error {
|
||||
err := r.rc.Close()
|
||||
r.runFunc()
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *onEOFReader) runFunc() {
|
||||
if fn := r.fn; fn != nil {
|
||||
fn()
|
||||
r.fn = nil
|
||||
}
|
||||
}
|
27
vendor/src/google.golang.org/api/LICENSE
vendored
Normal file
27
vendor/src/google.golang.org/api/LICENSE
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2011 Google Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
177
vendor/src/google.golang.org/api/gensupport/json.go
vendored
Normal file
177
vendor/src/google.golang.org/api/gensupport/json.go
vendored
Normal file
|
@ -0,0 +1,177 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package gensupport is an internal implementation detail used by code
|
||||
// generated by the google-api-go-generator tool.
|
||||
//
|
||||
// This package may be modified at any time without regard for backwards
|
||||
// compatibility. It should not be used directly by API users.
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// MarshalJSON returns a JSON encoding of schema containing only selected fields.
|
||||
// A field is selected if:
|
||||
// * it has a non-empty value, or
|
||||
// * its field name is present in forceSendFields, and
|
||||
// * it is not a nil pointer or nil interface.
|
||||
// The JSON key for each selected field is taken from the field's json: struct tag.
|
||||
func MarshalJSON(schema interface{}, forceSendFields []string) ([]byte, error) {
|
||||
if len(forceSendFields) == 0 {
|
||||
return json.Marshal(schema)
|
||||
}
|
||||
|
||||
mustInclude := make(map[string]struct{})
|
||||
for _, f := range forceSendFields {
|
||||
mustInclude[f] = struct{}{}
|
||||
}
|
||||
|
||||
dataMap, err := schemaToMap(schema, mustInclude)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.Marshal(dataMap)
|
||||
}
|
||||
|
||||
func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[string]interface{}, error) {
|
||||
m := make(map[string]interface{})
|
||||
s := reflect.ValueOf(schema)
|
||||
st := s.Type()
|
||||
|
||||
for i := 0; i < s.NumField(); i++ {
|
||||
jsonTag := st.Field(i).Tag.Get("json")
|
||||
if jsonTag == "" {
|
||||
continue
|
||||
}
|
||||
tag, err := parseJSONTag(jsonTag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tag.ignore {
|
||||
continue
|
||||
}
|
||||
|
||||
v := s.Field(i)
|
||||
f := st.Field(i)
|
||||
if !includeField(v, f, mustInclude) {
|
||||
continue
|
||||
}
|
||||
|
||||
// nil maps are treated as empty maps.
|
||||
if f.Type.Kind() == reflect.Map && v.IsNil() {
|
||||
m[tag.apiName] = map[string]string{}
|
||||
continue
|
||||
}
|
||||
|
||||
// nil slices are treated as empty slices.
|
||||
if f.Type.Kind() == reflect.Slice && v.IsNil() {
|
||||
m[tag.apiName] = []bool{}
|
||||
continue
|
||||
}
|
||||
|
||||
if tag.stringFormat {
|
||||
m[tag.apiName] = formatAsString(v, f.Type.Kind())
|
||||
} else {
|
||||
m[tag.apiName] = v.Interface()
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// formatAsString returns a string representation of v, dereferencing it first if possible.
|
||||
func formatAsString(v reflect.Value, kind reflect.Kind) string {
|
||||
if kind == reflect.Ptr && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v", v.Interface())
|
||||
}
|
||||
|
||||
// jsonTag represents a restricted version of the struct tag format used by encoding/json.
|
||||
// It is used to describe the JSON encoding of fields in a Schema struct.
|
||||
type jsonTag struct {
|
||||
apiName string
|
||||
stringFormat bool
|
||||
ignore bool
|
||||
}
|
||||
|
||||
// parseJSONTag parses a restricted version of the struct tag format used by encoding/json.
|
||||
// The format of the tag must match that generated by the Schema.writeSchemaStruct method
|
||||
// in the api generator.
|
||||
func parseJSONTag(val string) (jsonTag, error) {
|
||||
if val == "-" {
|
||||
return jsonTag{ignore: true}, nil
|
||||
}
|
||||
|
||||
var tag jsonTag
|
||||
|
||||
i := strings.Index(val, ",")
|
||||
if i == -1 || val[:i] == "" {
|
||||
return tag, fmt.Errorf("malformed json tag: %s", val)
|
||||
}
|
||||
|
||||
tag = jsonTag{
|
||||
apiName: val[:i],
|
||||
}
|
||||
|
||||
switch val[i+1:] {
|
||||
case "omitempty":
|
||||
case "omitempty,string":
|
||||
tag.stringFormat = true
|
||||
default:
|
||||
return tag, fmt.Errorf("malformed json tag: %s", val)
|
||||
}
|
||||
|
||||
return tag, nil
|
||||
}
|
||||
|
||||
// Reports whether the struct field "f" with value "v" should be included in JSON output.
|
||||
func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool {
|
||||
// The regular JSON encoding of a nil pointer is "null", which means "delete this field".
|
||||
// Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set.
|
||||
// However, many fields are not pointers, so there would be no way to delete these fields.
|
||||
// Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields.
|
||||
// Deletion will be handled by a separate mechanism.
|
||||
if f.Type.Kind() == reflect.Ptr && v.IsNil() {
|
||||
return false
|
||||
}
|
||||
|
||||
// The "any" type is represented as an interface{}. If this interface
|
||||
// is nil, there is no reasonable representation to send. We ignore
|
||||
// these fields, for the same reasons as given above for pointers.
|
||||
if f.Type.Kind() == reflect.Interface && v.IsNil() {
|
||||
return false
|
||||
}
|
||||
|
||||
_, ok := mustInclude[f.Name]
|
||||
return ok || !isEmptyValue(v)
|
||||
}
|
||||
|
||||
// isEmptyValue reports whether v is the empty value for its type. This
|
||||
// implementation is based on that of the encoding/json package, but its
|
||||
// correctness does not depend on it being identical. What's important is that
|
||||
// this function return false in situations where v should not be sent as part
|
||||
// of a PATCH operation.
|
||||
func isEmptyValue(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
31
vendor/src/google.golang.org/api/gensupport/params.go
vendored
Normal file
31
vendor/src/google.golang.org/api/gensupport/params.go
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import "net/url"
|
||||
|
||||
// URLParams is a simplified replacement for url.Values
|
||||
// that safely builds up URL parameters for encoding.
|
||||
type URLParams map[string][]string
|
||||
|
||||
// Set sets the key to value.
|
||||
// It replaces any existing values.
|
||||
func (u URLParams) Set(key, value string) {
|
||||
u[key] = []string{value}
|
||||
}
|
||||
|
||||
// SetMulti sets the key to an array of values.
|
||||
// It replaces any existing values.
|
||||
// Note that values must not be modified after calling SetMulti
|
||||
// so the caller is responsible for making a copy if necessary.
|
||||
func (u URLParams) SetMulti(key string, values []string) {
|
||||
u[key] = values
|
||||
}
|
||||
|
||||
// Encode encodes the values into ``URL encoded'' form
|
||||
// ("bar=baz&foo=quux") sorted by key.
|
||||
func (u URLParams) Encode() string {
|
||||
return url.Values(u).Encode()
|
||||
}
|
588
vendor/src/google.golang.org/api/googleapi/googleapi.go
vendored
Normal file
588
vendor/src/google.golang.org/api/googleapi/googleapi.go
vendored
Normal file
|
@ -0,0 +1,588 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package googleapi contains the common code shared by all Google API
|
||||
// libraries.
|
||||
package googleapi // import "google.golang.org/api/googleapi"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
"google.golang.org/api/googleapi/internal/uritemplates"
|
||||
)
|
||||
|
||||
// ContentTyper is an interface for Readers which know (or would like
|
||||
// to override) their Content-Type. If a media body doesn't implement
|
||||
// ContentTyper, the type is sniffed from the content using
|
||||
// http.DetectContentType.
|
||||
type ContentTyper interface {
|
||||
ContentType() string
|
||||
}
|
||||
|
||||
// A SizeReaderAt is a ReaderAt with a Size method.
|
||||
// An io.SectionReader implements SizeReaderAt.
|
||||
type SizeReaderAt interface {
|
||||
io.ReaderAt
|
||||
Size() int64
|
||||
}
|
||||
|
||||
// ServerResponse is embedded in each Do response and
|
||||
// provides the HTTP status code and header sent by the server.
|
||||
type ServerResponse struct {
|
||||
// HTTPStatusCode is the server's response status code.
|
||||
// When using a resource method's Do call, this will always be in the 2xx range.
|
||||
HTTPStatusCode int
|
||||
// Header contains the response header fields from the server.
|
||||
Header http.Header
|
||||
}
|
||||
|
||||
const (
|
||||
Version = "0.5"
|
||||
|
||||
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
|
||||
statusResumeIncomplete = 308
|
||||
|
||||
// UserAgent is the header string used to identify this package.
|
||||
UserAgent = "google-api-go-client/" + Version
|
||||
|
||||
// uploadPause determines the delay between failed upload attempts
|
||||
uploadPause = 1 * time.Second
|
||||
)
|
||||
|
||||
// Error contains an error response from the server.
|
||||
type Error struct {
|
||||
// Code is the HTTP response status code and will always be populated.
|
||||
Code int `json:"code"`
|
||||
// Message is the server response message and is only populated when
|
||||
// explicitly referenced by the JSON server response.
|
||||
Message string `json:"message"`
|
||||
// Body is the raw response returned by the server.
|
||||
// It is often but not always JSON, depending on how the request fails.
|
||||
Body string
|
||||
// Header contains the response header fields from the server.
|
||||
Header http.Header
|
||||
|
||||
Errors []ErrorItem
|
||||
}
|
||||
|
||||
// ErrorItem is a detailed error code & message from the Google API frontend.
|
||||
type ErrorItem struct {
|
||||
// Reason is the typed error code. For example: "some_example".
|
||||
Reason string `json:"reason"`
|
||||
// Message is the human-readable description of the error.
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
if len(e.Errors) == 0 && e.Message == "" {
|
||||
return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body)
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code)
|
||||
if e.Message != "" {
|
||||
fmt.Fprintf(&buf, "%s", e.Message)
|
||||
}
|
||||
if len(e.Errors) == 0 {
|
||||
return strings.TrimSpace(buf.String())
|
||||
}
|
||||
if len(e.Errors) == 1 && e.Errors[0].Message == e.Message {
|
||||
fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason)
|
||||
return buf.String()
|
||||
}
|
||||
fmt.Fprintln(&buf, "\nMore details:")
|
||||
for _, v := range e.Errors {
|
||||
fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
type errorReply struct {
|
||||
Error *Error `json:"error"`
|
||||
}
|
||||
|
||||
// CheckResponse returns an error (of type *Error) if the response
|
||||
// status code is not 2xx.
|
||||
func CheckResponse(res *http.Response) error {
|
||||
if res.StatusCode >= 200 && res.StatusCode <= 299 {
|
||||
return nil
|
||||
}
|
||||
slurp, err := ioutil.ReadAll(res.Body)
|
||||
if err == nil {
|
||||
jerr := new(errorReply)
|
||||
err = json.Unmarshal(slurp, jerr)
|
||||
if err == nil && jerr.Error != nil {
|
||||
if jerr.Error.Code == 0 {
|
||||
jerr.Error.Code = res.StatusCode
|
||||
}
|
||||
jerr.Error.Body = string(slurp)
|
||||
return jerr.Error
|
||||
}
|
||||
}
|
||||
return &Error{
|
||||
Code: res.StatusCode,
|
||||
Body: string(slurp),
|
||||
Header: res.Header,
|
||||
}
|
||||
}
|
||||
|
||||
// IsNotModified reports whether err is the result of the
|
||||
// server replying with http.StatusNotModified.
|
||||
// Such error values are sometimes returned by "Do" methods
|
||||
// on calls when If-None-Match is used.
|
||||
func IsNotModified(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
ae, ok := err.(*Error)
|
||||
return ok && ae.Code == http.StatusNotModified
|
||||
}
|
||||
|
||||
// CheckMediaResponse returns an error (of type *Error) if the response
|
||||
// status code is not 2xx. Unlike CheckResponse it does not assume the
|
||||
// body is a JSON error document.
|
||||
func CheckMediaResponse(res *http.Response) error {
|
||||
if res.StatusCode >= 200 && res.StatusCode <= 299 {
|
||||
return nil
|
||||
}
|
||||
slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20))
|
||||
res.Body.Close()
|
||||
return &Error{
|
||||
Code: res.StatusCode,
|
||||
Body: string(slurp),
|
||||
}
|
||||
}
|
||||
|
||||
type MarshalStyle bool
|
||||
|
||||
var WithDataWrapper = MarshalStyle(true)
|
||||
var WithoutDataWrapper = MarshalStyle(false)
|
||||
|
||||
func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
if wrap {
|
||||
buf.Write([]byte(`{"data": `))
|
||||
}
|
||||
err := json.NewEncoder(buf).Encode(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if wrap {
|
||||
buf.Write([]byte(`}`))
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func getMediaType(media io.Reader) (io.Reader, string) {
|
||||
if typer, ok := media.(ContentTyper); ok {
|
||||
return media, typer.ContentType()
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
typ := "application/octet-stream"
|
||||
buf, err := ioutil.ReadAll(io.LimitReader(media, 512))
|
||||
if err != nil {
|
||||
pw.CloseWithError(fmt.Errorf("error reading media: %v", err))
|
||||
return pr, typ
|
||||
}
|
||||
typ = http.DetectContentType(buf)
|
||||
mr := io.MultiReader(bytes.NewReader(buf), media)
|
||||
go func() {
|
||||
_, err = io.Copy(pw, mr)
|
||||
if err != nil {
|
||||
pw.CloseWithError(fmt.Errorf("error reading media: %v", err))
|
||||
return
|
||||
}
|
||||
pw.Close()
|
||||
}()
|
||||
return pr, typ
|
||||
}
|
||||
|
||||
// DetectMediaType detects and returns the content type of the provided media.
|
||||
// If the type can not be determined, "application/octet-stream" is returned.
|
||||
func DetectMediaType(media io.ReaderAt) string {
|
||||
if typer, ok := media.(ContentTyper); ok {
|
||||
return typer.ContentType()
|
||||
}
|
||||
|
||||
typ := "application/octet-stream"
|
||||
buf := make([]byte, 1024)
|
||||
n, err := media.ReadAt(buf, 0)
|
||||
buf = buf[:n]
|
||||
if err == nil || err == io.EOF {
|
||||
typ = http.DetectContentType(buf)
|
||||
}
|
||||
return typ
|
||||
}
|
||||
|
||||
type Lengther interface {
|
||||
Len() int
|
||||
}
|
||||
|
||||
// endingWithErrorReader from r until it returns an error. If the
|
||||
// final error from r is io.EOF and e is non-nil, e is used instead.
|
||||
type endingWithErrorReader struct {
|
||||
r io.Reader
|
||||
e error
|
||||
}
|
||||
|
||||
func (er endingWithErrorReader) Read(p []byte) (n int, err error) {
|
||||
n, err = er.r.Read(p)
|
||||
if err == io.EOF && er.e != nil {
|
||||
err = er.e
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func typeHeader(contentType string) textproto.MIMEHeader {
|
||||
h := make(textproto.MIMEHeader)
|
||||
h.Set("Content-Type", contentType)
|
||||
return h
|
||||
}
|
||||
|
||||
// countingWriter counts the number of bytes it receives to write, but
|
||||
// discards them.
|
||||
type countingWriter struct {
|
||||
n *int64
|
||||
}
|
||||
|
||||
func (w countingWriter) Write(p []byte) (int, error) {
|
||||
*w.n += int64(len(p))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// ConditionallyIncludeMedia does nothing if media is nil.
|
||||
//
|
||||
// bodyp is an in/out parameter. It should initially point to the
|
||||
// reader of the application/json (or whatever) payload to send in the
|
||||
// API request. It's updated to point to the multipart body reader.
|
||||
//
|
||||
// ctypep is an in/out parameter. It should initially point to the
|
||||
// content type of the bodyp, usually "application/json". It's updated
|
||||
// to the "multipart/related" content type, with random boundary.
|
||||
//
|
||||
// The return value is the content-length of the entire multpart body.
|
||||
func ConditionallyIncludeMedia(media io.Reader, bodyp *io.Reader, ctypep *string) (cancel func(), ok bool) {
|
||||
if media == nil {
|
||||
return
|
||||
}
|
||||
// Get the media type, which might return a different reader instance.
|
||||
var mediaType string
|
||||
media, mediaType = getMediaType(media)
|
||||
|
||||
body, bodyType := *bodyp, *ctypep
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
mpw := multipart.NewWriter(pw)
|
||||
*bodyp = pr
|
||||
*ctypep = "multipart/related; boundary=" + mpw.Boundary()
|
||||
go func() {
|
||||
w, err := mpw.CreatePart(typeHeader(bodyType))
|
||||
if err != nil {
|
||||
mpw.Close()
|
||||
pw.CloseWithError(fmt.Errorf("googleapi: body CreatePart failed: %v", err))
|
||||
return
|
||||
}
|
||||
_, err = io.Copy(w, body)
|
||||
if err != nil {
|
||||
mpw.Close()
|
||||
pw.CloseWithError(fmt.Errorf("googleapi: body Copy failed: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
w, err = mpw.CreatePart(typeHeader(mediaType))
|
||||
if err != nil {
|
||||
mpw.Close()
|
||||
pw.CloseWithError(fmt.Errorf("googleapi: media CreatePart failed: %v", err))
|
||||
return
|
||||
}
|
||||
_, err = io.Copy(w, media)
|
||||
if err != nil {
|
||||
mpw.Close()
|
||||
pw.CloseWithError(fmt.Errorf("googleapi: media Copy failed: %v", err))
|
||||
return
|
||||
}
|
||||
mpw.Close()
|
||||
pw.Close()
|
||||
}()
|
||||
cancel = func() { pw.CloseWithError(errAborted) }
|
||||
return cancel, true
|
||||
}
|
||||
|
||||
var errAborted = errors.New("googleapi: upload aborted")
|
||||
|
||||
// ProgressUpdater is a function that is called upon every progress update of a resumable upload.
|
||||
// This is the only part of a resumable upload (from googleapi) that is usable by the developer.
|
||||
// The remaining usable pieces of resumable uploads is exposed in each auto-generated API.
|
||||
type ProgressUpdater func(current, total int64)
|
||||
|
||||
// ResumableUpload is used by the generated APIs to provide resumable uploads.
|
||||
// It is not used by developers directly.
|
||||
type ResumableUpload struct {
|
||||
Client *http.Client
|
||||
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
|
||||
URI string
|
||||
UserAgent string // User-Agent for header of the request
|
||||
// Media is the object being uploaded.
|
||||
Media io.ReaderAt
|
||||
// MediaType defines the media type, e.g. "image/jpeg".
|
||||
MediaType string
|
||||
// ContentLength is the full size of the object being uploaded.
|
||||
ContentLength int64
|
||||
|
||||
mu sync.Mutex // guards progress
|
||||
progress int64 // number of bytes uploaded so far
|
||||
|
||||
// Callback is an optional function that will be called upon every progress update.
|
||||
Callback ProgressUpdater
|
||||
}
|
||||
|
||||
var (
|
||||
// rangeRE matches the transfer status response from the server. $1 is the last byte index uploaded.
|
||||
rangeRE = regexp.MustCompile(`^bytes=0\-(\d+)$`)
|
||||
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
chunkSize int64 = 1 << 18
|
||||
)
|
||||
|
||||
// Progress returns the number of bytes uploaded at this point.
|
||||
func (rx *ResumableUpload) Progress() int64 {
|
||||
rx.mu.Lock()
|
||||
defer rx.mu.Unlock()
|
||||
return rx.progress
|
||||
}
|
||||
|
||||
func (rx *ResumableUpload) transferStatus(ctx context.Context) (int64, *http.Response, error) {
|
||||
req, _ := http.NewRequest("POST", rx.URI, nil)
|
||||
req.ContentLength = 0
|
||||
req.Header.Set("User-Agent", rx.UserAgent)
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
|
||||
res, err := ctxhttp.Do(ctx, rx.Client, req)
|
||||
if err != nil || res.StatusCode != statusResumeIncomplete {
|
||||
return 0, res, err
|
||||
}
|
||||
var start int64
|
||||
if m := rangeRE.FindStringSubmatch(res.Header.Get("Range")); len(m) == 2 {
|
||||
start, err = strconv.ParseInt(m[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("unable to parse range size %v", m[1])
|
||||
}
|
||||
start += 1 // Start at the next byte
|
||||
}
|
||||
return start, res, nil
|
||||
}
|
||||
|
||||
type chunk struct {
|
||||
body io.Reader
|
||||
size int64
|
||||
err error
|
||||
}
|
||||
|
||||
func (rx *ResumableUpload) transferChunks(ctx context.Context) (*http.Response, error) {
|
||||
start, res, err := rx.transferStatus(ctx)
|
||||
if err != nil || res.StatusCode != statusResumeIncomplete {
|
||||
if err == context.Canceled {
|
||||
return &http.Response{StatusCode: http.StatusRequestTimeout}, err
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
||||
for {
|
||||
select { // Check for cancellation
|
||||
case <-ctx.Done():
|
||||
res.StatusCode = http.StatusRequestTimeout
|
||||
return res, ctx.Err()
|
||||
default:
|
||||
}
|
||||
reqSize := rx.ContentLength - start
|
||||
if reqSize > chunkSize {
|
||||
reqSize = chunkSize
|
||||
}
|
||||
r := io.NewSectionReader(rx.Media, start, reqSize)
|
||||
req, _ := http.NewRequest("POST", rx.URI, r)
|
||||
req.ContentLength = reqSize
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
|
||||
req.Header.Set("Content-Type", rx.MediaType)
|
||||
req.Header.Set("User-Agent", rx.UserAgent)
|
||||
res, err = ctxhttp.Do(ctx, rx.Client, req)
|
||||
start += reqSize
|
||||
if err == nil && (res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK) {
|
||||
rx.mu.Lock()
|
||||
rx.progress = start // keep track of number of bytes sent so far
|
||||
rx.mu.Unlock()
|
||||
if rx.Callback != nil {
|
||||
rx.Callback(start, rx.ContentLength)
|
||||
}
|
||||
}
|
||||
if err != nil || res.StatusCode != statusResumeIncomplete {
|
||||
break
|
||||
}
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
||||
var sleep = time.Sleep // override in unit tests
|
||||
|
||||
// Upload starts the process of a resumable upload with a cancellable context.
|
||||
// It retries indefinitely (with a pause of uploadPause between attempts) until cancelled.
|
||||
// It is called from the auto-generated API code and is not visible to the user.
|
||||
// rx is private to the auto-generated API code.
|
||||
func (rx *ResumableUpload) Upload(ctx context.Context) (*http.Response, error) {
|
||||
var res *http.Response
|
||||
var err error
|
||||
for {
|
||||
res, err = rx.transferChunks(ctx)
|
||||
if err != nil || res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
|
||||
return res, err
|
||||
}
|
||||
select { // Check for cancellation
|
||||
case <-ctx.Done():
|
||||
res.StatusCode = http.StatusRequestTimeout
|
||||
return res, ctx.Err()
|
||||
default:
|
||||
}
|
||||
sleep(uploadPause)
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
||||
func ResolveRelative(basestr, relstr string) string {
|
||||
u, _ := url.Parse(basestr)
|
||||
rel, _ := url.Parse(relstr)
|
||||
u = u.ResolveReference(rel)
|
||||
us := u.String()
|
||||
us = strings.Replace(us, "%7B", "{", -1)
|
||||
us = strings.Replace(us, "%7D", "}", -1)
|
||||
return us
|
||||
}
|
||||
|
||||
// has4860Fix is whether this Go environment contains the fix for
|
||||
// http://golang.org/issue/4860
|
||||
var has4860Fix bool
|
||||
|
||||
// init initializes has4860Fix by checking the behavior of the net/http package.
|
||||
func init() {
|
||||
r := http.Request{
|
||||
URL: &url.URL{
|
||||
Scheme: "http",
|
||||
Opaque: "//opaque",
|
||||
},
|
||||
}
|
||||
b := &bytes.Buffer{}
|
||||
r.Write(b)
|
||||
has4860Fix = bytes.HasPrefix(b.Bytes(), []byte("GET http"))
|
||||
}
|
||||
|
||||
// SetOpaque sets u.Opaque from u.Path such that HTTP requests to it
|
||||
// don't alter any hex-escaped characters in u.Path.
|
||||
func SetOpaque(u *url.URL) {
|
||||
u.Opaque = "//" + u.Host + u.Path
|
||||
if !has4860Fix {
|
||||
u.Opaque = u.Scheme + ":" + u.Opaque
|
||||
}
|
||||
}
|
||||
|
||||
// Expand subsitutes any {encoded} strings in the URL passed in using
|
||||
// the map supplied.
|
||||
//
|
||||
// This calls SetOpaque to avoid encoding of the parameters in the URL path.
|
||||
func Expand(u *url.URL, expansions map[string]string) {
|
||||
expanded, err := uritemplates.Expand(u.Path, expansions)
|
||||
if err == nil {
|
||||
u.Path = expanded
|
||||
SetOpaque(u)
|
||||
}
|
||||
}
|
||||
|
||||
// CloseBody is used to close res.Body.
|
||||
// Prior to calling Close, it also tries to Read a small amount to see an EOF.
|
||||
// Not seeing an EOF can prevent HTTP Transports from reusing connections.
|
||||
func CloseBody(res *http.Response) {
|
||||
if res == nil || res.Body == nil {
|
||||
return
|
||||
}
|
||||
// Justification for 3 byte reads: two for up to "\r\n" after
|
||||
// a JSON/XML document, and then 1 to see EOF if we haven't yet.
|
||||
// TODO(bradfitz): detect Go 1.3+ and skip these reads.
|
||||
// See https://codereview.appspot.com/58240043
|
||||
// and https://codereview.appspot.com/49570044
|
||||
buf := make([]byte, 1)
|
||||
for i := 0; i < 3; i++ {
|
||||
_, err := res.Body.Read(buf)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
}
|
||||
|
||||
// VariantType returns the type name of the given variant.
|
||||
// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned.
|
||||
// This is used to support "variant" APIs that can return one of a number of different types.
|
||||
func VariantType(t map[string]interface{}) string {
|
||||
s, _ := t["type"].(string)
|
||||
return s
|
||||
}
|
||||
|
||||
// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'.
|
||||
// This is used to support "variant" APIs that can return one of a number of different types.
|
||||
// It reports whether the conversion was successful.
|
||||
func ConvertVariant(v map[string]interface{}, dst interface{}) bool {
|
||||
var buf bytes.Buffer
|
||||
err := json.NewEncoder(&buf).Encode(v)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return json.Unmarshal(buf.Bytes(), dst) == nil
|
||||
}
|
||||
|
||||
// A Field names a field to be retrieved with a partial response.
|
||||
// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
|
||||
//
|
||||
// Partial responses can dramatically reduce the amount of data that must be sent to your application.
|
||||
// In order to request partial responses, you can specify the full list of fields
|
||||
// that your application needs by adding the Fields option to your request.
|
||||
//
|
||||
// Field strings use camelCase with leading lower-case characters to identify fields within the response.
|
||||
//
|
||||
// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields,
|
||||
// you could request just those fields like this:
|
||||
//
|
||||
// svc.Events.List().Fields("nextPageToken", "items/id").Do()
|
||||
//
|
||||
// or if you were also interested in each Item's "Updated" field, you can combine them like this:
|
||||
//
|
||||
// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do()
|
||||
//
|
||||
// More information about field formatting can be found here:
|
||||
// https://developers.google.com/+/api/#fields-syntax
|
||||
//
|
||||
// Another way to find field names is through the Google API explorer:
|
||||
// https://developers.google.com/apis-explorer/#p/
|
||||
type Field string
|
||||
|
||||
// CombineFields combines fields into a single string.
|
||||
func CombineFields(s []Field) string {
|
||||
r := make([]string, len(s))
|
||||
for i, v := range s {
|
||||
r[i] = string(v)
|
||||
}
|
||||
return strings.Join(r, ",")
|
||||
}
|
18
vendor/src/google.golang.org/api/googleapi/internal/uritemplates/LICENSE
vendored
Normal file
18
vendor/src/google.golang.org/api/googleapi/internal/uritemplates/LICENSE
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
Copyright (c) 2013 Joshua Tacoma
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
359
vendor/src/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go
vendored
Normal file
359
vendor/src/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go
vendored
Normal file
|
@ -0,0 +1,359 @@
|
|||
// Copyright 2013 Joshua Tacoma. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package uritemplates is a level 4 implementation of RFC 6570 (URI
|
||||
// Template, http://tools.ietf.org/html/rfc6570).
|
||||
//
|
||||
// To use uritemplates, parse a template string and expand it with a value
|
||||
// map:
|
||||
//
|
||||
// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}")
|
||||
// values := make(map[string]interface{})
|
||||
// values["user"] = "jtacoma"
|
||||
// values["repo"] = "uritemplates"
|
||||
// expanded, _ := template.ExpandString(values)
|
||||
// fmt.Printf(expanded)
|
||||
//
|
||||
package uritemplates
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]")
|
||||
reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]")
|
||||
validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$")
|
||||
hex = []byte("0123456789ABCDEF")
|
||||
)
|
||||
|
||||
func pctEncode(src []byte) []byte {
|
||||
dst := make([]byte, len(src)*3)
|
||||
for i, b := range src {
|
||||
buf := dst[i*3 : i*3+3]
|
||||
buf[0] = 0x25
|
||||
buf[1] = hex[b/16]
|
||||
buf[2] = hex[b%16]
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func escape(s string, allowReserved bool) (escaped string) {
|
||||
if allowReserved {
|
||||
escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode))
|
||||
} else {
|
||||
escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
|
||||
}
|
||||
return escaped
|
||||
}
|
||||
|
||||
// A UriTemplate is a parsed representation of a URI template.
|
||||
type UriTemplate struct {
|
||||
raw string
|
||||
parts []templatePart
|
||||
}
|
||||
|
||||
// Parse parses a URI template string into a UriTemplate object.
|
||||
func Parse(rawtemplate string) (template *UriTemplate, err error) {
|
||||
template = new(UriTemplate)
|
||||
template.raw = rawtemplate
|
||||
split := strings.Split(rawtemplate, "{")
|
||||
template.parts = make([]templatePart, len(split)*2-1)
|
||||
for i, s := range split {
|
||||
if i == 0 {
|
||||
if strings.Contains(s, "}") {
|
||||
err = errors.New("unexpected }")
|
||||
break
|
||||
}
|
||||
template.parts[i].raw = s
|
||||
} else {
|
||||
subsplit := strings.Split(s, "}")
|
||||
if len(subsplit) != 2 {
|
||||
err = errors.New("malformed template")
|
||||
break
|
||||
}
|
||||
expression := subsplit[0]
|
||||
template.parts[i*2-1], err = parseExpression(expression)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
template.parts[i*2].raw = subsplit[1]
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
template = nil
|
||||
}
|
||||
return template, err
|
||||
}
|
||||
|
||||
type templatePart struct {
|
||||
raw string
|
||||
terms []templateTerm
|
||||
first string
|
||||
sep string
|
||||
named bool
|
||||
ifemp string
|
||||
allowReserved bool
|
||||
}
|
||||
|
||||
type templateTerm struct {
|
||||
name string
|
||||
explode bool
|
||||
truncate int
|
||||
}
|
||||
|
||||
func parseExpression(expression string) (result templatePart, err error) {
|
||||
switch expression[0] {
|
||||
case '+':
|
||||
result.sep = ","
|
||||
result.allowReserved = true
|
||||
expression = expression[1:]
|
||||
case '.':
|
||||
result.first = "."
|
||||
result.sep = "."
|
||||
expression = expression[1:]
|
||||
case '/':
|
||||
result.first = "/"
|
||||
result.sep = "/"
|
||||
expression = expression[1:]
|
||||
case ';':
|
||||
result.first = ";"
|
||||
result.sep = ";"
|
||||
result.named = true
|
||||
expression = expression[1:]
|
||||
case '?':
|
||||
result.first = "?"
|
||||
result.sep = "&"
|
||||
result.named = true
|
||||
result.ifemp = "="
|
||||
expression = expression[1:]
|
||||
case '&':
|
||||
result.first = "&"
|
||||
result.sep = "&"
|
||||
result.named = true
|
||||
result.ifemp = "="
|
||||
expression = expression[1:]
|
||||
case '#':
|
||||
result.first = "#"
|
||||
result.sep = ","
|
||||
result.allowReserved = true
|
||||
expression = expression[1:]
|
||||
default:
|
||||
result.sep = ","
|
||||
}
|
||||
rawterms := strings.Split(expression, ",")
|
||||
result.terms = make([]templateTerm, len(rawterms))
|
||||
for i, raw := range rawterms {
|
||||
result.terms[i], err = parseTerm(raw)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
|
||||
func parseTerm(term string) (result templateTerm, err error) {
|
||||
if strings.HasSuffix(term, "*") {
|
||||
result.explode = true
|
||||
term = term[:len(term)-1]
|
||||
}
|
||||
split := strings.Split(term, ":")
|
||||
if len(split) == 1 {
|
||||
result.name = term
|
||||
} else if len(split) == 2 {
|
||||
result.name = split[0]
|
||||
var parsed int64
|
||||
parsed, err = strconv.ParseInt(split[1], 10, 0)
|
||||
result.truncate = int(parsed)
|
||||
} else {
|
||||
err = errors.New("multiple colons in same term")
|
||||
}
|
||||
if !validname.MatchString(result.name) {
|
||||
err = errors.New("not a valid name: " + result.name)
|
||||
}
|
||||
if result.explode && result.truncate > 0 {
|
||||
err = errors.New("both explode and prefix modifers on same term")
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Expand expands a URI template with a set of values to produce a string.
|
||||
func (self *UriTemplate) Expand(value interface{}) (string, error) {
|
||||
values, ismap := value.(map[string]interface{})
|
||||
if !ismap {
|
||||
if m, ismap := struct2map(value); !ismap {
|
||||
return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.")
|
||||
} else {
|
||||
return self.Expand(m)
|
||||
}
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
for _, p := range self.parts {
|
||||
err := p.expand(&buf, values)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error {
|
||||
if len(self.raw) > 0 {
|
||||
buf.WriteString(self.raw)
|
||||
return nil
|
||||
}
|
||||
var zeroLen = buf.Len()
|
||||
buf.WriteString(self.first)
|
||||
var firstLen = buf.Len()
|
||||
for _, term := range self.terms {
|
||||
value, exists := values[term.name]
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
if buf.Len() != firstLen {
|
||||
buf.WriteString(self.sep)
|
||||
}
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
self.expandString(buf, term, v)
|
||||
case []interface{}:
|
||||
self.expandArray(buf, term, v)
|
||||
case map[string]interface{}:
|
||||
if term.truncate > 0 {
|
||||
return errors.New("cannot truncate a map expansion")
|
||||
}
|
||||
self.expandMap(buf, term, v)
|
||||
default:
|
||||
if m, ismap := struct2map(value); ismap {
|
||||
if term.truncate > 0 {
|
||||
return errors.New("cannot truncate a map expansion")
|
||||
}
|
||||
self.expandMap(buf, term, m)
|
||||
} else {
|
||||
str := fmt.Sprintf("%v", value)
|
||||
self.expandString(buf, term, str)
|
||||
}
|
||||
}
|
||||
}
|
||||
if buf.Len() == firstLen {
|
||||
original := buf.Bytes()[:zeroLen]
|
||||
buf.Reset()
|
||||
buf.Write(original)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) {
|
||||
if self.named {
|
||||
buf.WriteString(name)
|
||||
if empty {
|
||||
buf.WriteString(self.ifemp)
|
||||
} else {
|
||||
buf.WriteString("=")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) {
|
||||
if len(s) > t.truncate && t.truncate > 0 {
|
||||
s = s[:t.truncate]
|
||||
}
|
||||
self.expandName(buf, t.name, len(s) == 0)
|
||||
buf.WriteString(escape(s, self.allowReserved))
|
||||
}
|
||||
|
||||
func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) {
|
||||
if len(a) == 0 {
|
||||
return
|
||||
} else if !t.explode {
|
||||
self.expandName(buf, t.name, false)
|
||||
}
|
||||
for i, value := range a {
|
||||
if t.explode && i > 0 {
|
||||
buf.WriteString(self.sep)
|
||||
} else if i > 0 {
|
||||
buf.WriteString(",")
|
||||
}
|
||||
var s string
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
s = v
|
||||
default:
|
||||
s = fmt.Sprintf("%v", v)
|
||||
}
|
||||
if len(s) > t.truncate && t.truncate > 0 {
|
||||
s = s[:t.truncate]
|
||||
}
|
||||
if self.named && t.explode {
|
||||
self.expandName(buf, t.name, len(s) == 0)
|
||||
}
|
||||
buf.WriteString(escape(s, self.allowReserved))
|
||||
}
|
||||
}
|
||||
|
||||
func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) {
|
||||
if len(m) == 0 {
|
||||
return
|
||||
}
|
||||
if !t.explode {
|
||||
self.expandName(buf, t.name, len(m) == 0)
|
||||
}
|
||||
var firstLen = buf.Len()
|
||||
for k, value := range m {
|
||||
if firstLen != buf.Len() {
|
||||
if t.explode {
|
||||
buf.WriteString(self.sep)
|
||||
} else {
|
||||
buf.WriteString(",")
|
||||
}
|
||||
}
|
||||
var s string
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
s = v
|
||||
default:
|
||||
s = fmt.Sprintf("%v", v)
|
||||
}
|
||||
if t.explode {
|
||||
buf.WriteString(escape(k, self.allowReserved))
|
||||
buf.WriteRune('=')
|
||||
buf.WriteString(escape(s, self.allowReserved))
|
||||
} else {
|
||||
buf.WriteString(escape(k, self.allowReserved))
|
||||
buf.WriteRune(',')
|
||||
buf.WriteString(escape(s, self.allowReserved))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func struct2map(v interface{}) (map[string]interface{}, bool) {
|
||||
value := reflect.ValueOf(v)
|
||||
switch value.Type().Kind() {
|
||||
case reflect.Ptr:
|
||||
return struct2map(value.Elem().Interface())
|
||||
case reflect.Struct:
|
||||
m := make(map[string]interface{})
|
||||
for i := 0; i < value.NumField(); i++ {
|
||||
tag := value.Type().Field(i).Tag
|
||||
var name string
|
||||
if strings.Contains(string(tag), ":") {
|
||||
name = tag.Get("uri")
|
||||
} else {
|
||||
name = strings.TrimSpace(string(tag))
|
||||
}
|
||||
if len(name) == 0 {
|
||||
name = value.Type().Field(i).Name
|
||||
}
|
||||
m[name] = value.Field(i).Interface()
|
||||
}
|
||||
return m, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
13
vendor/src/google.golang.org/api/googleapi/internal/uritemplates/utils.go
vendored
Normal file
13
vendor/src/google.golang.org/api/googleapi/internal/uritemplates/utils.go
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
package uritemplates
|
||||
|
||||
func Expand(path string, expansions map[string]string) (string, error) {
|
||||
template, err := Parse(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
values := make(map[string]interface{})
|
||||
for k, v := range expansions {
|
||||
values[k] = v
|
||||
}
|
||||
return template.Expand(values)
|
||||
}
|
182
vendor/src/google.golang.org/api/googleapi/types.go
vendored
Normal file
182
vendor/src/google.golang.org/api/googleapi/types.go
vendored
Normal file
|
@ -0,0 +1,182 @@
|
|||
// Copyright 2013 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package googleapi
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Int64s is a slice of int64s that marshal as quoted strings in JSON.
|
||||
type Int64s []int64
|
||||
|
||||
func (q *Int64s) UnmarshalJSON(raw []byte) error {
|
||||
*q = (*q)[:0]
|
||||
var ss []string
|
||||
if err := json.Unmarshal(raw, &ss); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, s := range ss {
|
||||
v, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*q = append(*q, int64(v))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Int32s is a slice of int32s that marshal as quoted strings in JSON.
|
||||
type Int32s []int32
|
||||
|
||||
func (q *Int32s) UnmarshalJSON(raw []byte) error {
|
||||
*q = (*q)[:0]
|
||||
var ss []string
|
||||
if err := json.Unmarshal(raw, &ss); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, s := range ss {
|
||||
v, err := strconv.ParseInt(s, 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*q = append(*q, int32(v))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Uint64s is a slice of uint64s that marshal as quoted strings in JSON.
|
||||
type Uint64s []uint64
|
||||
|
||||
func (q *Uint64s) UnmarshalJSON(raw []byte) error {
|
||||
*q = (*q)[:0]
|
||||
var ss []string
|
||||
if err := json.Unmarshal(raw, &ss); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, s := range ss {
|
||||
v, err := strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*q = append(*q, uint64(v))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Uint32s is a slice of uint32s that marshal as quoted strings in JSON.
|
||||
type Uint32s []uint32
|
||||
|
||||
func (q *Uint32s) UnmarshalJSON(raw []byte) error {
|
||||
*q = (*q)[:0]
|
||||
var ss []string
|
||||
if err := json.Unmarshal(raw, &ss); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, s := range ss {
|
||||
v, err := strconv.ParseUint(s, 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*q = append(*q, uint32(v))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Float64s is a slice of float64s that marshal as quoted strings in JSON.
|
||||
type Float64s []float64
|
||||
|
||||
func (q *Float64s) UnmarshalJSON(raw []byte) error {
|
||||
*q = (*q)[:0]
|
||||
var ss []string
|
||||
if err := json.Unmarshal(raw, &ss); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, s := range ss {
|
||||
v, err := strconv.ParseFloat(s, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*q = append(*q, float64(v))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) {
|
||||
dst := make([]byte, 0, 2+n*10) // somewhat arbitrary
|
||||
dst = append(dst, '[')
|
||||
for i := 0; i < n; i++ {
|
||||
if i > 0 {
|
||||
dst = append(dst, ',')
|
||||
}
|
||||
dst = append(dst, '"')
|
||||
dst = fn(dst, i)
|
||||
dst = append(dst, '"')
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
func (s Int64s) MarshalJSON() ([]byte, error) {
|
||||
return quotedList(len(s), func(dst []byte, i int) []byte {
|
||||
return strconv.AppendInt(dst, s[i], 10)
|
||||
})
|
||||
}
|
||||
|
||||
func (s Int32s) MarshalJSON() ([]byte, error) {
|
||||
return quotedList(len(s), func(dst []byte, i int) []byte {
|
||||
return strconv.AppendInt(dst, int64(s[i]), 10)
|
||||
})
|
||||
}
|
||||
|
||||
func (s Uint64s) MarshalJSON() ([]byte, error) {
|
||||
return quotedList(len(s), func(dst []byte, i int) []byte {
|
||||
return strconv.AppendUint(dst, s[i], 10)
|
||||
})
|
||||
}
|
||||
|
||||
func (s Uint32s) MarshalJSON() ([]byte, error) {
|
||||
return quotedList(len(s), func(dst []byte, i int) []byte {
|
||||
return strconv.AppendUint(dst, uint64(s[i]), 10)
|
||||
})
|
||||
}
|
||||
|
||||
func (s Float64s) MarshalJSON() ([]byte, error) {
|
||||
return quotedList(len(s), func(dst []byte, i int) []byte {
|
||||
return strconv.AppendFloat(dst, s[i], 'g', -1, 64)
|
||||
})
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper routines for simplifying the creation of optional fields of basic type.
|
||||
*/
|
||||
|
||||
// Bool is a helper routine that allocates a new bool value
|
||||
// to store v and returns a pointer to it.
|
||||
func Bool(v bool) *bool { return &v }
|
||||
|
||||
// Int32 is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int32(v int32) *int32 { return &v }
|
||||
|
||||
// Int64 is a helper routine that allocates a new int64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int64(v int64) *int64 { return &v }
|
||||
|
||||
// Float64 is a helper routine that allocates a new float64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float64(v float64) *float64 { return &v }
|
||||
|
||||
// Uint32 is a helper routine that allocates a new uint32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint32(v uint32) *uint32 { return &v }
|
||||
|
||||
// Uint64 is a helper routine that allocates a new uint64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint64(v uint64) *uint64 { return &v }
|
||||
|
||||
// String is a helper routine that allocates a new string value
|
||||
// to store v and returns a pointer to it.
|
||||
func String(v string) *string { return &v }
|
1692
vendor/src/google.golang.org/api/logging/v1beta3/logging-api.json
vendored
Normal file
1692
vendor/src/google.golang.org/api/logging/v1beta3/logging-api.json
vendored
Normal file
File diff suppressed because it is too large
Load diff
4787
vendor/src/google.golang.org/api/logging/v1beta3/logging-gen.go
vendored
Normal file
4787
vendor/src/google.golang.org/api/logging/v1beta3/logging-gen.go
vendored
Normal file
File diff suppressed because it is too large
Load diff
11
vendor/src/google.golang.org/cloud/.travis.yml
vendored
Normal file
11
vendor/src/google.golang.org/cloud/.travis.yml
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
install:
|
||||
- go get -v google.golang.org/cloud/...
|
||||
script:
|
||||
- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d
|
||||
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json"
|
||||
go test -v -tags=integration google.golang.org/cloud/...
|
12
vendor/src/google.golang.org/cloud/AUTHORS
vendored
Normal file
12
vendor/src/google.golang.org/cloud/AUTHORS
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
# This is the official list of cloud authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
|
||||
# Names should be added to this file as:
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
Google Inc.
|
||||
Palm Stone Games, Inc.
|
||||
Péter Szilágyi <peterke@gmail.com>
|
||||
Tyler Treat <ttreat31@gmail.com>
|
114
vendor/src/google.golang.org/cloud/CONTRIBUTING.md
vendored
Normal file
114
vendor/src/google.golang.org/cloud/CONTRIBUTING.md
vendored
Normal file
|
@ -0,0 +1,114 @@
|
|||
# Contributing
|
||||
|
||||
1. Sign one of the contributor license agreements below.
|
||||
1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool.
|
||||
1. Get the cloud package by running `go get -d google.golang.org/cloud`.
|
||||
1. If you have already checked out the source, make sure that the remote git
|
||||
origin is https://code.googlesource.com/gocloud:
|
||||
|
||||
git remote set-url origin https://code.googlesource.com/gocloud
|
||||
1. Make changes and create a change by running `git codereview change <name>`,
|
||||
provide a command message, and use `git codereview mail` to create a Gerrit CL.
|
||||
1. Keep amending to the change and mail as your recieve feedback.
|
||||
|
||||
## Integration Tests
|
||||
|
||||
Additional to the unit tests, you may run the integration test suite.
|
||||
|
||||
To run the integrations tests, creating and configuration of a project in the
|
||||
Google Developers Console is required. Once you create a project, set the
|
||||
following environment variables to be able to run the against the actual APIs.
|
||||
|
||||
- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455)
|
||||
- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
|
||||
|
||||
Create a storage bucket with the same name as the project id set in **GCLOUD_TESTS_GOLANG_PROJECT_ID**.
|
||||
The storage integration test will create and delete some objects in this bucket.
|
||||
|
||||
Install the [gcloud command-line tool][gcloudcli] to your machine and use it
|
||||
to create the indexes used in the datastore integration tests with indexes
|
||||
found in `datastore/testdata/index.yaml`:
|
||||
|
||||
From the project's root directory:
|
||||
|
||||
``` sh
|
||||
# Install the app component
|
||||
$ gcloud components update app
|
||||
|
||||
# Set the default project in your env
|
||||
$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
|
||||
# Authenticate the gcloud tool with your account
|
||||
$ gcloud auth login
|
||||
|
||||
# Create the indexes
|
||||
$ gcloud preview datastore create-indexes datastore/testdata/index.yaml
|
||||
|
||||
```
|
||||
|
||||
You can run the integration tests by running:
|
||||
|
||||
``` sh
|
||||
$ go test -v -tags=integration google.golang.org/cloud/...
|
||||
```
|
||||
|
||||
## Contributor License Agreements
|
||||
|
||||
Before we can accept your pull requests you'll need to sign a Contributor
|
||||
License Agreement (CLA):
|
||||
|
||||
- **If you are an individual writing original source code** and **you own the
|
||||
- intellectual property**, then you'll need to sign an [individual CLA][indvcla].
|
||||
- **If you work for a company that wants to allow you to contribute your work**,
|
||||
then you'll need to sign a [corporate CLA][corpcla].
|
||||
|
||||
You can sign these electronically (just scroll to the bottom). After that,
|
||||
we'll be able to accept your pull requests.
|
||||
|
||||
## Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project,
|
||||
and in the interest of fostering an open and welcoming community,
|
||||
we pledge to respect all people who contribute through reporting issues,
|
||||
posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project
|
||||
a harassment-free experience for everyone,
|
||||
regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance,
|
||||
body size, race, ethnicity, age, religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information,
|
||||
such as physical or electronic
|
||||
addresses, without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct.
|
||||
By adopting this Code of Conduct,
|
||||
project maintainers commit themselves to fairly and consistently
|
||||
applying these principles to every aspect of managing this project.
|
||||
Project maintainers who do not follow or enforce the Code of Conduct
|
||||
may be permanently removed from the project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior
|
||||
may be reported by opening an issue
|
||||
or contacting one or more of the project maintainers.
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
|
||||
available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
|
||||
|
||||
[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/
|
||||
[indvcla]: https://developers.google.com/open-source/cla/individual
|
||||
[corpcla]: https://developers.google.com/open-source/cla/corporate
|
24
vendor/src/google.golang.org/cloud/CONTRIBUTORS
vendored
Normal file
24
vendor/src/google.golang.org/cloud/CONTRIBUTORS
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
# People who have agreed to one of the CLAs and can contribute patches.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# https://developers.google.com/open-source/cla/individual
|
||||
# https://developers.google.com/open-source/cla/corporate
|
||||
#
|
||||
# Names should be added to this file as:
|
||||
# Name <email address>
|
||||
|
||||
# Keep the list alphabetically sorted.
|
||||
|
||||
Andrew Gerrand <adg@golang.org>
|
||||
Brad Fitzpatrick <bradfitz@golang.org>
|
||||
Burcu Dogan <jbd@google.com>
|
||||
Dave Day <djd@golang.org>
|
||||
David Symonds <dsymonds@golang.org>
|
||||
Glenn Lewis <gmlewis@google.com>
|
||||
Johan Euphrosine <proppy@google.com>
|
||||
Luna Duclos <luna.duclos@palmstonegames.com>
|
||||
Michael McGreevy <mcgreevy@golang.org>
|
||||
Péter Szilágyi <peterke@gmail.com>
|
||||
Tyler Treat <ttreat31@gmail.com>
|
202
vendor/src/google.golang.org/cloud/LICENSE
vendored
Normal file
202
vendor/src/google.golang.org/cloud/LICENSE
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2014 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
135
vendor/src/google.golang.org/cloud/README.md
vendored
Normal file
135
vendor/src/google.golang.org/cloud/README.md
vendored
Normal file
|
@ -0,0 +1,135 @@
|
|||
# Google Cloud for Go
|
||||
|
||||
[![Build Status](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang)
|
||||
|
||||
**NOTE:** These packages are experimental, and may occasionally make
|
||||
backwards-incompatible changes.
|
||||
|
||||
**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
|
||||
|
||||
Go packages for Google Cloud Platform services. Supported APIs include:
|
||||
|
||||
* Google Cloud Datastore
|
||||
* Google Cloud Storage
|
||||
* Google Cloud Pub/Sub
|
||||
* Google Cloud Container Engine
|
||||
|
||||
``` go
|
||||
import "google.golang.org/cloud"
|
||||
```
|
||||
|
||||
Documentation and examples are available at
|
||||
[https://godoc.org/google.golang.org/cloud](https://godoc.org/google.golang.org/cloud).
|
||||
|
||||
## Authorization
|
||||
|
||||
Authorization, throughout the package, is delegated to the godoc.org/golang.org/x/oauth2.
|
||||
Refer to the [godoc documentation](https://godoc.org/golang.org/x/oauth2)
|
||||
for examples on using oauth2 with the Cloud package.
|
||||
|
||||
## Google Cloud Datastore
|
||||
|
||||
[Google Cloud Datastore][cloud-datastore] ([docs][cloud-datastore-docs]) is a fully
|
||||
managed, schemaless database for storing non-relational data. Cloud Datastore
|
||||
automatically scales with your users and supports ACID transactions, high availability
|
||||
of reads and writes, strong consistency for reads and ancestor queries, and eventual
|
||||
consistency for all other queries.
|
||||
|
||||
Follow the [activation instructions][cloud-datastore-activation] to use the Google
|
||||
Cloud Datastore API with your project.
|
||||
|
||||
[https://godoc.org/google.golang.org/cloud/datastore](https://godoc.org/google.golang.org/cloud/datastore)
|
||||
|
||||
|
||||
```go
|
||||
type Post struct {
|
||||
Title string
|
||||
Body string `datastore:",noindex"`
|
||||
PublishedAt time.Time
|
||||
}
|
||||
keys := []*datastore.Key{
|
||||
datastore.NewKey(ctx, "Post", "post1", 0, nil),
|
||||
datastore.NewKey(ctx, "Post", "post2", 0, nil),
|
||||
}
|
||||
posts := []*Post{
|
||||
{Title: "Post 1", Body: "...", PublishedAt: time.Now()},
|
||||
{Title: "Post 2", Body: "...", PublishedAt: time.Now()},
|
||||
}
|
||||
if _, err := datastore.PutMulti(ctx, keys, posts); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
```
|
||||
|
||||
## Google Cloud Storage
|
||||
|
||||
[Google Cloud Storage][cloud-storage] ([docs][cloud-storage-docs]) allows you to store
|
||||
data on Google infrastructure with very high reliability, performance and availability,
|
||||
and can be used to distribute large data objects to users via direct download.
|
||||
|
||||
[https://godoc.org/google.golang.org/cloud/storage](https://godoc.org/google.golang.org/cloud/storage)
|
||||
|
||||
|
||||
```go
|
||||
// Read the object1 from bucket.
|
||||
rc, err := storage.NewReader(ctx, "bucket", "object1")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
slurp, err := ioutil.ReadAll(rc)
|
||||
rc.Close()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
## Google Cloud Pub/Sub (Alpha)
|
||||
|
||||
> Google Cloud Pub/Sub is in **Alpha status**. As a result, it might change in
|
||||
> backward-incompatible ways and is not recommended for production use. It is not
|
||||
> subject to any SLA or deprecation policy.
|
||||
|
||||
[Google Cloud Pub/Sub][cloud-pubsub] ([docs][cloud-pubsub-docs]) allows you to connect
|
||||
your services with reliable, many-to-many, asynchronous messaging hosted on Google's
|
||||
infrastructure. Cloud Pub/Sub automatically scales as you need it and provides a foundation
|
||||
for building your own robust, global services.
|
||||
|
||||
[https://godoc.org/google.golang.org/cloud/pubsub](https://godoc.org/google.golang.org/cloud/pubsub)
|
||||
|
||||
|
||||
```go
|
||||
// Publish "hello world" on topic1.
|
||||
msgIDs, err := pubsub.Publish(ctx, "topic1", &pubsub.Message{
|
||||
Data: []byte("hello world"),
|
||||
})
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
// Pull messages via subscription1.
|
||||
msgs, err := pubsub.Pull(ctx, "subscription1", 1)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome. Please, see the
|
||||
[CONTRIBUTING](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md)
|
||||
document for details. We're using Gerrit for our code reviews. Please don't open pull
|
||||
requests against this repo, new pull requests will be automatically closed.
|
||||
|
||||
Please note that this project is released with a Contributor Code of Conduct.
|
||||
By participating in this project you agree to abide by its terms.
|
||||
See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md#contributor-code-of-conduct)
|
||||
for more information.
|
||||
|
||||
[cloud-datastore]: https://cloud.google.com/datastore/
|
||||
[cloud-datastore-docs]: https://cloud.google.com/datastore/docs
|
||||
[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate
|
||||
|
||||
[cloud-pubsub]: https://cloud.google.com/pubsub/
|
||||
[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs
|
||||
|
||||
[cloud-storage]: https://cloud.google.com/storage/
|
||||
[cloud-storage-docs]: https://cloud.google.com/storage/docs/overview
|
||||
[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets
|
49
vendor/src/google.golang.org/cloud/cloud.go
vendored
Normal file
49
vendor/src/google.golang.org/cloud/cloud.go
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package cloud contains Google Cloud Platform APIs related types
|
||||
// and common functions.
|
||||
package cloud // import "google.golang.org/cloud"
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/cloud/internal"
|
||||
)
|
||||
|
||||
// NewContext returns a new context that uses the provided http.Client.
|
||||
// Provided http.Client is responsible to authorize and authenticate
|
||||
// the requests made to the Google Cloud APIs.
|
||||
// It mutates the client's original Transport to append the cloud
|
||||
// package's user-agent to the outgoing requests.
|
||||
// You can obtain the project ID from the Google Developers Console,
|
||||
// https://console.developers.google.com.
|
||||
func NewContext(projID string, c *http.Client) context.Context {
|
||||
if c == nil {
|
||||
panic("invalid nil *http.Client passed to NewContext")
|
||||
}
|
||||
return WithContext(context.Background(), projID, c)
|
||||
}
|
||||
|
||||
// WithContext returns a new context in a similar way NewContext does,
|
||||
// but initiates the new context with the specified parent.
|
||||
func WithContext(parent context.Context, projID string, c *http.Client) context.Context {
|
||||
// TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does.
|
||||
// Do User-Agent some other way.
|
||||
if _, ok := c.Transport.(*internal.Transport); !ok {
|
||||
c.Transport = &internal.Transport{Base: c.Transport}
|
||||
}
|
||||
return internal.WithContext(parent, projID, c)
|
||||
}
|
327
vendor/src/google.golang.org/cloud/compute/metadata/metadata.go
vendored
Normal file
327
vendor/src/google.golang.org/cloud/compute/metadata/metadata.go
vendored
Normal file
|
@ -0,0 +1,327 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package metadata provides access to Google Compute Engine (GCE)
|
||||
// metadata and API service accounts.
|
||||
//
|
||||
// This package is a wrapper around the GCE metadata service,
|
||||
// as documented at https://developers.google.com/compute/docs/metadata.
|
||||
package metadata // import "google.golang.org/cloud/compute/metadata"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/cloud/internal"
|
||||
)
|
||||
|
||||
type cachedValue struct {
|
||||
k string
|
||||
trim bool
|
||||
mu sync.Mutex
|
||||
v string
|
||||
}
|
||||
|
||||
var (
|
||||
projID = &cachedValue{k: "project/project-id", trim: true}
|
||||
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
|
||||
instID = &cachedValue{k: "instance/id", trim: true}
|
||||
)
|
||||
|
||||
var metaClient = &http.Client{
|
||||
Transport: &internal.Transport{
|
||||
Base: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 750 * time.Millisecond,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
ResponseHeaderTimeout: 750 * time.Millisecond,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// NotDefinedError is returned when requested metadata is not defined.
|
||||
//
|
||||
// The underlying string is the suffix after "/computeMetadata/v1/".
|
||||
//
|
||||
// This error is not returned if the value is defined to be the empty
|
||||
// string.
|
||||
type NotDefinedError string
|
||||
|
||||
func (suffix NotDefinedError) Error() string {
|
||||
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
|
||||
}
|
||||
|
||||
// Get returns a value from the metadata service.
|
||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||
//
|
||||
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
||||
// 169.254.169.254 will be used instead.
|
||||
//
|
||||
// If the requested metadata is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
func Get(suffix string) (string, error) {
|
||||
val, _, err := getETag(suffix)
|
||||
return val, err
|
||||
}
|
||||
|
||||
// getETag returns a value from the metadata service as well as the associated
|
||||
// ETag. This func is otherwise equivalent to Get.
|
||||
func getETag(suffix string) (value, etag string, err error) {
|
||||
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
||||
// a container, which is an important use-case for local testing of cloud
|
||||
// deployments. To enable spoofing of the metadata service, the environment
|
||||
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
||||
// requests shall go.
|
||||
host := os.Getenv("GCE_METADATA_HOST")
|
||||
if host == "" {
|
||||
// Using 169.254.169.254 instead of "metadata" here because Go
|
||||
// binaries built with the "netgo" tag and without cgo won't
|
||||
// know the search suffix for "metadata" is
|
||||
// ".google.internal", and this IP address is documented as
|
||||
// being stable anyway.
|
||||
host = "169.254.169.254"
|
||||
}
|
||||
url := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, _ := http.NewRequest("GET", url, nil)
|
||||
req.Header.Set("Metadata-Flavor", "Google")
|
||||
res, err := metaClient.Do(req)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return "", "", NotDefinedError(suffix)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
|
||||
}
|
||||
all, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return string(all), res.Header.Get("Etag"), nil
|
||||
}
|
||||
|
||||
func getTrimmed(suffix string) (s string, err error) {
|
||||
s, err = Get(suffix)
|
||||
s = strings.TrimSpace(s)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *cachedValue) get() (v string, err error) {
|
||||
defer c.mu.Unlock()
|
||||
c.mu.Lock()
|
||||
if c.v != "" {
|
||||
return c.v, nil
|
||||
}
|
||||
if c.trim {
|
||||
v, err = getTrimmed(c.k)
|
||||
} else {
|
||||
v, err = Get(c.k)
|
||||
}
|
||||
if err == nil {
|
||||
c.v = v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var onGCE struct {
|
||||
sync.Mutex
|
||||
set bool
|
||||
v bool
|
||||
}
|
||||
|
||||
// OnGCE reports whether this process is running on Google Compute Engine.
|
||||
func OnGCE() bool {
|
||||
defer onGCE.Unlock()
|
||||
onGCE.Lock()
|
||||
if onGCE.set {
|
||||
return onGCE.v
|
||||
}
|
||||
onGCE.set = true
|
||||
|
||||
// We use the DNS name of the metadata service here instead of the IP address
|
||||
// because we expect that to fail faster in the not-on-GCE case.
|
||||
res, err := metaClient.Get("http://metadata.google.internal")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
onGCE.v = res.Header.Get("Metadata-Flavor") == "Google"
|
||||
return onGCE.v
|
||||
}
|
||||
|
||||
// Subscribe subscribes to a value from the metadata service.
|
||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||
//
|
||||
// Subscribe calls fn with the latest metadata value indicated by the provided
|
||||
// suffix. If the metadata value is deleted, fn is called with the empty string
|
||||
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
||||
// is deleted. Subscribe returns the error value returned from the last call to
|
||||
// fn, which may be nil when ok == false.
|
||||
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
const failedSubscribeSleep = time.Second * 5
|
||||
|
||||
// First check to see if the metadata value exists at all.
|
||||
val, lastETag, err := getETag(suffix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := fn(val, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ok := true
|
||||
suffix += "?wait_for_change=true&last_etag="
|
||||
for {
|
||||
val, etag, err := getETag(suffix + url.QueryEscape(lastETag))
|
||||
if err != nil {
|
||||
if _, deleted := err.(NotDefinedError); !deleted {
|
||||
time.Sleep(failedSubscribeSleep)
|
||||
continue // Retry on other errors.
|
||||
}
|
||||
ok = false
|
||||
}
|
||||
lastETag = etag
|
||||
|
||||
if err := fn(val, ok); err != nil || !ok {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ProjectID returns the current instance's project ID string.
|
||||
func ProjectID() (string, error) { return projID.get() }
|
||||
|
||||
// NumericProjectID returns the current instance's numeric project ID.
|
||||
func NumericProjectID() (string, error) { return projNum.get() }
|
||||
|
||||
// InternalIP returns the instance's primary internal IP address.
|
||||
func InternalIP() (string, error) {
|
||||
return getTrimmed("instance/network-interfaces/0/ip")
|
||||
}
|
||||
|
||||
// ExternalIP returns the instance's primary external (public) IP address.
|
||||
func ExternalIP() (string, error) {
|
||||
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
||||
}
|
||||
|
||||
// Hostname returns the instance's hostname. This will be of the form
|
||||
// "<instanceID>.c.<projID>.internal".
|
||||
func Hostname() (string, error) {
|
||||
return getTrimmed("instance/hostname")
|
||||
}
|
||||
|
||||
// InstanceTags returns the list of user-defined instance tags,
|
||||
// assigned when initially creating a GCE instance.
|
||||
func InstanceTags() ([]string, error) {
|
||||
var s []string
|
||||
j, err := Get("instance/tags")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InstanceID returns the current VM's numeric instance ID.
|
||||
func InstanceID() (string, error) {
|
||||
return instID.get()
|
||||
}
|
||||
|
||||
// InstanceName returns the current VM's instance ID string.
|
||||
func InstanceName() (string, error) {
|
||||
host, err := Hostname()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.Split(host, ".")[0], nil
|
||||
}
|
||||
|
||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||
func Zone() (string, error) {
|
||||
zone, err := getTrimmed("instance/zone")
|
||||
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return zone[strings.LastIndex(zone, "/")+1:], nil
|
||||
}
|
||||
|
||||
// InstanceAttributes returns the list of user-defined attributes,
|
||||
// assigned when initially creating a GCE VM instance. The value of an
|
||||
// attribute can be obtained with InstanceAttributeValue.
|
||||
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
|
||||
|
||||
// ProjectAttributes returns the list of user-defined attributes
|
||||
// applying to the project as a whole, not just this VM. The value of
|
||||
// an attribute can be obtained with ProjectAttributeValue.
|
||||
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
|
||||
|
||||
func lines(suffix string) ([]string, error) {
|
||||
j, err := Get(suffix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := strings.Split(strings.TrimSpace(j), "\n")
|
||||
for i := range s {
|
||||
s[i] = strings.TrimSpace(s[i])
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InstanceAttributeValue returns the value of the provided VM
|
||||
// instance attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// InstanceAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func InstanceAttributeValue(attr string) (string, error) {
|
||||
return Get("instance/attributes/" + attr)
|
||||
}
|
||||
|
||||
// ProjectAttributeValue returns the value of the provided
|
||||
// project attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// ProjectAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func ProjectAttributeValue(attr string) (string, error) {
|
||||
return Get("project/attributes/" + attr)
|
||||
}
|
||||
|
||||
// Scopes returns the service account scopes for the given account.
|
||||
// The account may be empty or the string "default" to use the instance's
|
||||
// main account.
|
||||
func Scopes(serviceAccount string) ([]string, error) {
|
||||
if serviceAccount == "" {
|
||||
serviceAccount = "default"
|
||||
}
|
||||
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
||||
}
|
128
vendor/src/google.golang.org/cloud/internal/cloud.go
vendored
Normal file
128
vendor/src/google.golang.org/cloud/internal/cloud.go
vendored
Normal file
|
@ -0,0 +1,128 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package internal provides support for the cloud packages.
|
||||
//
|
||||
// Users should not import this package directly.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type contextKey struct{}
|
||||
|
||||
func WithContext(parent context.Context, projID string, c *http.Client) context.Context {
|
||||
if c == nil {
|
||||
panic("nil *http.Client passed to WithContext")
|
||||
}
|
||||
if projID == "" {
|
||||
panic("empty project ID passed to WithContext")
|
||||
}
|
||||
return context.WithValue(parent, contextKey{}, &cloudContext{
|
||||
ProjectID: projID,
|
||||
HTTPClient: c,
|
||||
})
|
||||
}
|
||||
|
||||
const userAgent = "gcloud-golang/0.1"
|
||||
|
||||
type cloudContext struct {
|
||||
ProjectID string
|
||||
HTTPClient *http.Client
|
||||
|
||||
mu sync.Mutex // guards svc
|
||||
svc map[string]interface{} // e.g. "storage" => *rawStorage.Service
|
||||
}
|
||||
|
||||
// Service returns the result of the fill function if it's never been
|
||||
// called before for the given name (which is assumed to be an API
|
||||
// service name, like "datastore"). If it has already been cached, the fill
|
||||
// func is not run.
|
||||
// It's safe for concurrent use by multiple goroutines.
|
||||
func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} {
|
||||
return cc(ctx).service(name, fill)
|
||||
}
|
||||
|
||||
func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.svc == nil {
|
||||
c.svc = make(map[string]interface{})
|
||||
} else if v, ok := c.svc[name]; ok {
|
||||
return v
|
||||
}
|
||||
v := fill(c.HTTPClient)
|
||||
c.svc[name] = v
|
||||
return v
|
||||
}
|
||||
|
||||
// Transport is an http.RoundTripper that appends
|
||||
// Google Cloud client's user-agent to the original
|
||||
// request's user-agent header.
|
||||
type Transport struct {
|
||||
// Base represents the actual http.RoundTripper
|
||||
// the requests will be delegated to.
|
||||
Base http.RoundTripper
|
||||
}
|
||||
|
||||
// RoundTrip appends a user-agent to the existing user-agent
|
||||
// header and delegates the request to the base http.RoundTripper.
|
||||
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
req = cloneRequest(req)
|
||||
ua := req.Header.Get("User-Agent")
|
||||
if ua == "" {
|
||||
ua = userAgent
|
||||
} else {
|
||||
ua = fmt.Sprintf("%s %s", ua, userAgent)
|
||||
}
|
||||
req.Header.Set("User-Agent", ua)
|
||||
return t.Base.RoundTrip(req)
|
||||
}
|
||||
|
||||
// cloneRequest returns a clone of the provided *http.Request.
|
||||
// The clone is a shallow copy of the struct and its Header map.
|
||||
func cloneRequest(r *http.Request) *http.Request {
|
||||
// shallow copy of the struct
|
||||
r2 := new(http.Request)
|
||||
*r2 = *r
|
||||
// deep copy of the Header
|
||||
r2.Header = make(http.Header)
|
||||
for k, s := range r.Header {
|
||||
r2.Header[k] = s
|
||||
}
|
||||
return r2
|
||||
}
|
||||
|
||||
func ProjID(ctx context.Context) string {
|
||||
return cc(ctx).ProjectID
|
||||
}
|
||||
|
||||
func HTTPClient(ctx context.Context) *http.Client {
|
||||
return cc(ctx).HTTPClient
|
||||
}
|
||||
|
||||
// cc returns the internal *cloudContext (cc) state for a context.Context.
|
||||
// It panics if the user did it wrong.
|
||||
func cc(ctx context.Context) *cloudContext {
|
||||
if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok {
|
||||
return c
|
||||
}
|
||||
panic("invalid context.Context type; it should be created with cloud.NewContext")
|
||||
}
|
24
vendor/src/google.golang.org/cloud/internal/opts/option.go
vendored
Normal file
24
vendor/src/google.golang.org/cloud/internal/opts/option.go
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
// Package opts holds the DialOpts struct, configurable by
|
||||
// cloud.ClientOptions to set up transports for cloud packages.
|
||||
//
|
||||
// This is a separate page to prevent cycles between the core
|
||||
// cloud packages.
|
||||
package opts
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type DialOpt struct {
|
||||
Endpoint string
|
||||
Scopes []string
|
||||
UserAgent string
|
||||
|
||||
TokenSource oauth2.TokenSource
|
||||
|
||||
HTTPClient *http.Client
|
||||
GRPCClient *grpc.ClientConn
|
||||
}
|
29
vendor/src/google.golang.org/cloud/internal/transport/cancelreq.go
vendored
Normal file
29
vendor/src/google.golang.org/cloud/internal/transport/cancelreq.go
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.5
|
||||
|
||||
package transport
|
||||
|
||||
import "net/http"
|
||||
|
||||
// makeReqCancel returns a closure that cancels the given http.Request
|
||||
// when called.
|
||||
func makeReqCancel(req *http.Request) func(http.RoundTripper) {
|
||||
c := make(chan struct{})
|
||||
req.Cancel = c
|
||||
return func(http.RoundTripper) {
|
||||
close(c)
|
||||
}
|
||||
}
|
31
vendor/src/google.golang.org/cloud/internal/transport/cancelreq_legacy.go
vendored
Normal file
31
vendor/src/google.golang.org/cloud/internal/transport/cancelreq_legacy.go
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !go1.5
|
||||
|
||||
package transport
|
||||
|
||||
import "net/http"
|
||||
|
||||
// makeReqCancel returns a closure that cancels the given http.Request
|
||||
// when called.
|
||||
func makeReqCancel(req *http.Request) func(http.RoundTripper) {
|
||||
// Go 1.4 and prior do not have a reliable way of cancelling a request.
|
||||
// Transport.CancelRequest will only work if the request is already in-flight.
|
||||
return func(r http.RoundTripper) {
|
||||
if t, ok := r.(*http.Transport); ok {
|
||||
t.CancelRequest(req)
|
||||
}
|
||||
}
|
||||
}
|
134
vendor/src/google.golang.org/cloud/internal/transport/dial.go
vendored
Normal file
134
vendor/src/google.golang.org/cloud/internal/transport/dial.go
vendored
Normal file
|
@ -0,0 +1,134 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/cloud"
|
||||
"google.golang.org/cloud/internal/opts"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/oauth"
|
||||
)
|
||||
|
||||
// ErrHTTP is returned when on a non-200 HTTP response.
|
||||
type ErrHTTP struct {
|
||||
StatusCode int
|
||||
Body []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func (e *ErrHTTP) Error() string {
|
||||
if e.err == nil {
|
||||
return fmt.Sprintf("error during call, http status code: %v %s", e.StatusCode, e.Body)
|
||||
}
|
||||
return e.err.Error()
|
||||
}
|
||||
|
||||
// NewHTTPClient returns an HTTP client for use communicating with a Google cloud
|
||||
// service, configured with the given ClientOptions. It also returns the endpoint
|
||||
// for the service as specified in the options.
|
||||
func NewHTTPClient(ctx context.Context, opt ...cloud.ClientOption) (*http.Client, string, error) {
|
||||
var o opts.DialOpt
|
||||
for _, opt := range opt {
|
||||
opt.Resolve(&o)
|
||||
}
|
||||
if o.GRPCClient != nil {
|
||||
return nil, "", errors.New("unsupported GRPC base transport specified")
|
||||
}
|
||||
// TODO(djd): Wrap all http.Clients with appropriate internal version to add
|
||||
// UserAgent header and prepend correct endpoint.
|
||||
if o.HTTPClient != nil {
|
||||
return o.HTTPClient, o.Endpoint, nil
|
||||
}
|
||||
if o.TokenSource == nil {
|
||||
var err error
|
||||
o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("google.DefaultTokenSource: %v", err)
|
||||
}
|
||||
}
|
||||
return oauth2.NewClient(ctx, o.TokenSource), o.Endpoint, nil
|
||||
}
|
||||
|
||||
// NewProtoClient returns a ProtoClient for communicating with a Google cloud service,
|
||||
// configured with the given ClientOptions.
|
||||
func NewProtoClient(ctx context.Context, opt ...cloud.ClientOption) (*ProtoClient, error) {
|
||||
var o opts.DialOpt
|
||||
for _, opt := range opt {
|
||||
opt.Resolve(&o)
|
||||
}
|
||||
if o.GRPCClient != nil {
|
||||
return nil, errors.New("unsupported GRPC base transport specified")
|
||||
}
|
||||
var client *http.Client
|
||||
switch {
|
||||
case o.HTTPClient != nil:
|
||||
if o.TokenSource != nil {
|
||||
return nil, errors.New("at most one of WithTokenSource or WithBaseHTTP may be provided")
|
||||
}
|
||||
client = o.HTTPClient
|
||||
case o.TokenSource != nil:
|
||||
client = oauth2.NewClient(ctx, o.TokenSource)
|
||||
default:
|
||||
var err error
|
||||
client, err = google.DefaultClient(ctx, o.Scopes...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &ProtoClient{
|
||||
client: client,
|
||||
endpoint: o.Endpoint,
|
||||
userAgent: o.UserAgent,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DialGRPC returns a GRPC connection for use communicating with a Google cloud
|
||||
// service, configured with the given ClientOptions.
|
||||
func DialGRPC(ctx context.Context, opt ...cloud.ClientOption) (*grpc.ClientConn, error) {
|
||||
var o opts.DialOpt
|
||||
for _, opt := range opt {
|
||||
opt.Resolve(&o)
|
||||
}
|
||||
if o.HTTPClient != nil {
|
||||
return nil, errors.New("unsupported HTTP base transport specified")
|
||||
}
|
||||
if o.GRPCClient != nil {
|
||||
return o.GRPCClient, nil
|
||||
}
|
||||
if o.TokenSource == nil {
|
||||
var err error
|
||||
o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("google.DefaultTokenSource: %v", err)
|
||||
}
|
||||
}
|
||||
grpcOpts := []grpc.DialOption{
|
||||
grpc.WithPerRPCCredentials(oauth.TokenSource{o.TokenSource}),
|
||||
grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")),
|
||||
}
|
||||
if o.UserAgent != "" {
|
||||
grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent))
|
||||
}
|
||||
return grpc.Dial(o.Endpoint, grpcOpts...)
|
||||
}
|
80
vendor/src/google.golang.org/cloud/internal/transport/proto.go
vendored
Normal file
80
vendor/src/google.golang.org/cloud/internal/transport/proto.go
vendored
Normal file
|
@ -0,0 +1,80 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type ProtoClient struct {
|
||||
client *http.Client
|
||||
endpoint string
|
||||
userAgent string
|
||||
}
|
||||
|
||||
func (c *ProtoClient) Call(ctx context.Context, method string, req, resp proto.Message) error {
|
||||
payload, err := proto.Marshal(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
httpReq, err := http.NewRequest("POST", c.endpoint+method, bytes.NewReader(payload))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
httpReq.Header.Set("Content-Type", "application/x-protobuf")
|
||||
if ua := c.userAgent; ua != "" {
|
||||
httpReq.Header.Set("User-Agent", ua)
|
||||
}
|
||||
|
||||
errc := make(chan error, 1)
|
||||
cancel := makeReqCancel(httpReq)
|
||||
|
||||
go func() {
|
||||
r, err := c.client.Do(httpReq)
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
}
|
||||
defer r.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if r.StatusCode != http.StatusOK {
|
||||
err = &ErrHTTP{
|
||||
StatusCode: r.StatusCode,
|
||||
Body: body,
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
}
|
||||
errc <- proto.Unmarshal(body, resp)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
cancel(c.client.Transport) // Cancel the HTTP request.
|
||||
return ctx.Err()
|
||||
case err := <-errc:
|
||||
return err
|
||||
}
|
||||
}
|
BIN
vendor/src/google.golang.org/cloud/key.json.enc
vendored
Normal file
BIN
vendor/src/google.golang.org/cloud/key.json.enc
vendored
Normal file
Binary file not shown.
468
vendor/src/google.golang.org/cloud/logging/logging.go
vendored
Normal file
468
vendor/src/google.golang.org/cloud/logging/logging.go
vendored
Normal file
|
@ -0,0 +1,468 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package logging contains a Google Cloud Logging client.
|
||||
//
|
||||
// This package is experimental and subject to API changes.
|
||||
package logging // import "google.golang.org/cloud/logging"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
api "google.golang.org/api/logging/v1beta3"
|
||||
"google.golang.org/cloud"
|
||||
"google.golang.org/cloud/internal/transport"
|
||||
)
|
||||
|
||||
// Scope is the OAuth2 scope necessary to use Google Cloud Logging.
|
||||
const Scope = api.LoggingWriteScope
|
||||
|
||||
// Level is the log level.
|
||||
type Level int
|
||||
|
||||
const (
|
||||
// Default means no assigned severity level.
|
||||
Default Level = iota
|
||||
Debug
|
||||
Info
|
||||
Warning
|
||||
Error
|
||||
Critical
|
||||
Alert
|
||||
Emergency
|
||||
nLevel
|
||||
)
|
||||
|
||||
var levelName = [nLevel]string{
|
||||
Default: "",
|
||||
Debug: "DEBUG",
|
||||
Info: "INFO",
|
||||
Warning: "WARNING",
|
||||
Error: "ERROR",
|
||||
Critical: "CRITICAL",
|
||||
Alert: "ALERT",
|
||||
Emergency: "EMERGENCY",
|
||||
}
|
||||
|
||||
func (v Level) String() string {
|
||||
return levelName[v]
|
||||
}
|
||||
|
||||
// Client is a Google Cloud Logging client.
|
||||
// It must be constructed via NewClient.
|
||||
type Client struct {
|
||||
svc *api.Service
|
||||
logs *api.ProjectsLogsEntriesService
|
||||
projID string
|
||||
logName string
|
||||
writer [nLevel]io.Writer
|
||||
logger [nLevel]*log.Logger
|
||||
|
||||
mu sync.Mutex
|
||||
queued []*api.LogEntry
|
||||
curFlush *flushCall // currently in-flight flush
|
||||
flushTimer *time.Timer // nil before first use
|
||||
timerActive bool // whether flushTimer is armed
|
||||
inFlight int // number of log entries sent to API service but not yet ACKed
|
||||
|
||||
// For testing:
|
||||
timeNow func() time.Time // optional
|
||||
|
||||
// ServiceName may be "appengine.googleapis.com",
|
||||
// "compute.googleapis.com" or "custom.googleapis.com".
|
||||
//
|
||||
// The default is "custom.googleapis.com".
|
||||
//
|
||||
// The service name is only used by the API server to
|
||||
// determine which of the labels are used to index the logs.
|
||||
ServiceName string
|
||||
|
||||
// CommonLabels are metadata labels that apply to all log
|
||||
// entries in this request, so that you don't have to repeat
|
||||
// them in each log entry's metadata.labels field. If any of
|
||||
// the log entries contains a (key, value) with the same key
|
||||
// that is in CommonLabels, then the entry's (key, value)
|
||||
// overrides the one in CommonLabels.
|
||||
CommonLabels map[string]string
|
||||
|
||||
// BufferLimit is the maximum number of items to keep in memory
|
||||
// before flushing. Zero means automatic. A value of 1 means to
|
||||
// flush after each log entry.
|
||||
// The default is currently 10,000.
|
||||
BufferLimit int
|
||||
|
||||
// FlushAfter optionally specifies a threshold count at which buffered
|
||||
// log entries are flushed, even if the BufferInterval has not yet
|
||||
// been reached.
|
||||
// The default is currently 10.
|
||||
FlushAfter int
|
||||
|
||||
// BufferInterval is the maximum amount of time that an item
|
||||
// should remain buffered in memory before being flushed to
|
||||
// the logging service.
|
||||
// The default is currently 1 second.
|
||||
BufferInterval time.Duration
|
||||
|
||||
// Overflow is a function which runs when the Log function
|
||||
// overflows its configured buffer limit. If nil, the log
|
||||
// entry is dropped. The return value from Overflow is
|
||||
// returned by Log.
|
||||
Overflow func(*Client, Entry) error
|
||||
}
|
||||
|
||||
func (c *Client) flushAfter() int {
|
||||
if v := c.FlushAfter; v > 0 {
|
||||
return v
|
||||
}
|
||||
return 10
|
||||
}
|
||||
|
||||
func (c *Client) bufferInterval() time.Duration {
|
||||
if v := c.BufferInterval; v > 0 {
|
||||
return v
|
||||
}
|
||||
return time.Second
|
||||
}
|
||||
|
||||
func (c *Client) bufferLimit() int {
|
||||
if v := c.BufferLimit; v > 0 {
|
||||
return v
|
||||
}
|
||||
return 10000
|
||||
}
|
||||
|
||||
func (c *Client) serviceName() string {
|
||||
if v := c.ServiceName; v != "" {
|
||||
return v
|
||||
}
|
||||
return "custom.googleapis.com"
|
||||
}
|
||||
|
||||
func (c *Client) now() time.Time {
|
||||
if now := c.timeNow; now != nil {
|
||||
return now()
|
||||
}
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// Writer returns an io.Writer for the provided log level.
|
||||
//
|
||||
// Each Write call on the returned Writer generates a log entry.
|
||||
//
|
||||
// This Writer accessor does not allocate, so callers do not need to
|
||||
// cache.
|
||||
func (c *Client) Writer(v Level) io.Writer { return c.writer[v] }
|
||||
|
||||
// Logger returns a *log.Logger for the provided log level.
|
||||
//
|
||||
// A Logger for each Level is pre-allocated by NewClient with an empty
|
||||
// prefix and no flags. This Logger accessor does not allocate.
|
||||
// Callers wishing to use alternate flags (such as log.Lshortfile) may
|
||||
// mutate the returned Logger with SetFlags. Such mutations affect all
|
||||
// callers in the program.
|
||||
func (c *Client) Logger(v Level) *log.Logger { return c.logger[v] }
|
||||
|
||||
type levelWriter struct {
|
||||
level Level
|
||||
c *Client
|
||||
}
|
||||
|
||||
func (w levelWriter) Write(p []byte) (n int, err error) {
|
||||
return len(p), w.c.Log(Entry{
|
||||
Level: w.level,
|
||||
Payload: string(p),
|
||||
})
|
||||
}
|
||||
|
||||
// Entry is a log entry.
|
||||
type Entry struct {
|
||||
// Time is the time of the entry. If the zero value, the current time is used.
|
||||
Time time.Time
|
||||
|
||||
// Level is log entry's severity level.
|
||||
// The zero value means no assigned severity level.
|
||||
Level Level
|
||||
|
||||
// Payload must be either a string, []byte, or something that
|
||||
// marshals via the encoding/json package to a JSON object
|
||||
// (and not any other type of JSON value).
|
||||
Payload interface{}
|
||||
|
||||
// Labels optionally specifies key/value labels for the log entry.
|
||||
// Depending on the Client's ServiceName, these are indexed differently
|
||||
// by the Cloud Logging Service.
|
||||
// See https://cloud.google.com/logging/docs/logs_index
|
||||
// The Client.Log method takes ownership of this map.
|
||||
Labels map[string]string
|
||||
|
||||
// TODO: de-duping id
|
||||
}
|
||||
|
||||
func (c *Client) apiEntry(e Entry) (*api.LogEntry, error) {
|
||||
t := e.Time
|
||||
if t.IsZero() {
|
||||
t = c.now()
|
||||
}
|
||||
|
||||
ent := &api.LogEntry{
|
||||
Metadata: &api.LogEntryMetadata{
|
||||
Timestamp: t.UTC().Format(time.RFC3339Nano),
|
||||
ServiceName: c.serviceName(),
|
||||
Severity: e.Level.String(),
|
||||
Labels: e.Labels,
|
||||
},
|
||||
}
|
||||
switch p := e.Payload.(type) {
|
||||
case string:
|
||||
ent.TextPayload = p
|
||||
case []byte:
|
||||
ent.TextPayload = string(p)
|
||||
default:
|
||||
ent.StructPayload = api.LogEntryStructPayload(p)
|
||||
}
|
||||
return ent, nil
|
||||
}
|
||||
|
||||
// LogSync logs e synchronously without any buffering.
|
||||
// This is mostly intended for debugging or critical errors.
|
||||
func (c *Client) LogSync(e Entry) error {
|
||||
ent, err := c.apiEntry(e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = c.logs.Write(c.projID, c.logName, &api.WriteLogEntriesRequest{
|
||||
CommonLabels: c.CommonLabels,
|
||||
Entries: []*api.LogEntry{ent},
|
||||
}).Do()
|
||||
return err
|
||||
}
|
||||
|
||||
var ErrOverflow = errors.New("logging: log entry overflowed buffer limits")
|
||||
|
||||
// Log queues an entry to be sent to the logging service, subject to the
|
||||
// Client's parameters. By default, the log will be flushed within
|
||||
// one second.
|
||||
// Log only returns an error if the entry is invalid or the queue is at
|
||||
// capacity. If the queue is at capacity and the entry can't be added,
|
||||
// Log returns either ErrOverflow when c.Overflow is nil, or the
|
||||
// value returned by c.Overflow.
|
||||
func (c *Client) Log(e Entry) error {
|
||||
ent, err := c.apiEntry(e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
buffered := len(c.queued) + c.inFlight
|
||||
|
||||
if buffered >= c.bufferLimit() {
|
||||
c.mu.Unlock()
|
||||
if fn := c.Overflow; fn != nil {
|
||||
return fn(c, e)
|
||||
}
|
||||
return ErrOverflow
|
||||
}
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.queued = append(c.queued, ent)
|
||||
if len(c.queued) >= c.flushAfter() {
|
||||
c.scheduleFlushLocked(0)
|
||||
return nil
|
||||
}
|
||||
c.scheduleFlushLocked(c.bufferInterval())
|
||||
return nil
|
||||
}
|
||||
|
||||
// c.mu must be held.
|
||||
//
|
||||
// d will be one of two values: either c.BufferInterval (or its
|
||||
// default value) or 0.
|
||||
func (c *Client) scheduleFlushLocked(d time.Duration) {
|
||||
if c.inFlight > 0 {
|
||||
// For now to keep things simple, only allow one HTTP
|
||||
// request in flight at a time.
|
||||
return
|
||||
}
|
||||
switch {
|
||||
case c.flushTimer == nil:
|
||||
// First flush.
|
||||
c.timerActive = true
|
||||
c.flushTimer = time.AfterFunc(d, c.timeoutFlush)
|
||||
case c.timerActive && d == 0:
|
||||
// Make it happen sooner. For example, this is the
|
||||
// case of transitioning from a 1 second flush after
|
||||
// the 1st item to an immediate flush after the 10th
|
||||
// item.
|
||||
c.flushTimer.Reset(0)
|
||||
case !c.timerActive:
|
||||
c.timerActive = true
|
||||
c.flushTimer.Reset(d)
|
||||
default:
|
||||
// else timer was already active, also at d > 0,
|
||||
// so we don't touch it and let it fire as previously
|
||||
// scheduled.
|
||||
}
|
||||
}
|
||||
|
||||
// timeoutFlush runs in its own goroutine (from time.AfterFunc) and
|
||||
// flushes c.queued.
|
||||
func (c *Client) timeoutFlush() {
|
||||
c.mu.Lock()
|
||||
c.timerActive = false
|
||||
c.mu.Unlock()
|
||||
if err := c.Flush(); err != nil {
|
||||
// schedule another try
|
||||
// TODO: smarter back-off?
|
||||
c.mu.Lock()
|
||||
c.scheduleFlushLocked(5 * time.Second)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Ping reports whether the client's connection to Google Cloud
|
||||
// Logging and the authentication configuration are valid.
|
||||
func (c *Client) Ping() error {
|
||||
_, err := c.logs.Write(c.projID, c.logName, &api.WriteLogEntriesRequest{
|
||||
Entries: []*api.LogEntry{},
|
||||
}).Do()
|
||||
return err
|
||||
}
|
||||
|
||||
// Flush flushes any buffered log entries.
|
||||
func (c *Client) Flush() error {
|
||||
var numFlush int
|
||||
c.mu.Lock()
|
||||
for {
|
||||
// We're already flushing (or we just started flushing
|
||||
// ourselves), so wait for it to finish.
|
||||
if f := c.curFlush; f != nil {
|
||||
wasEmpty := len(c.queued) == 0
|
||||
c.mu.Unlock()
|
||||
<-f.donec // wait for it
|
||||
numFlush++
|
||||
// Terminate whenever there's an error, we've
|
||||
// already flushed twice (one that was already
|
||||
// in-flight when flush was called, and then
|
||||
// one we instigated), or the queue was empty
|
||||
// when we released the locked (meaning this
|
||||
// in-flight flush removes everything present
|
||||
// when Flush was called, and we don't need to
|
||||
// kick off a new flush for things arriving
|
||||
// afterward)
|
||||
if f.err != nil || numFlush == 2 || wasEmpty {
|
||||
return f.err
|
||||
}
|
||||
// Otherwise, re-obtain the lock and loop,
|
||||
// starting over with seeing if a flush is in
|
||||
// progress, which might've been started by a
|
||||
// different goroutine before aquiring this
|
||||
// lock again.
|
||||
c.mu.Lock()
|
||||
continue
|
||||
}
|
||||
|
||||
// Terminal case:
|
||||
if len(c.queued) == 0 {
|
||||
c.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
c.startFlushLocked()
|
||||
}
|
||||
}
|
||||
|
||||
// requires c.mu be held.
|
||||
func (c *Client) startFlushLocked() {
|
||||
if c.curFlush != nil {
|
||||
panic("internal error: flush already in flight")
|
||||
}
|
||||
if len(c.queued) == 0 {
|
||||
panic("internal error: no items queued")
|
||||
}
|
||||
logEntries := c.queued
|
||||
c.inFlight = len(logEntries)
|
||||
c.queued = nil
|
||||
|
||||
flush := &flushCall{
|
||||
donec: make(chan struct{}),
|
||||
}
|
||||
c.curFlush = flush
|
||||
go func() {
|
||||
defer close(flush.donec)
|
||||
_, err := c.logs.Write(c.projID, c.logName, &api.WriteLogEntriesRequest{
|
||||
CommonLabels: c.CommonLabels,
|
||||
Entries: logEntries,
|
||||
}).Do()
|
||||
flush.err = err
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.inFlight = 0
|
||||
c.curFlush = nil
|
||||
if err != nil {
|
||||
c.queued = append(c.queued, logEntries...)
|
||||
} else if len(c.queued) > 0 {
|
||||
c.scheduleFlushLocked(c.bufferInterval())
|
||||
}
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
const prodAddr = "https://logging.googleapis.com/"
|
||||
|
||||
const userAgent = "gcloud-golang-logging/20150922"
|
||||
|
||||
// NewClient returns a new log client, logging to the named log in the
|
||||
// provided project.
|
||||
//
|
||||
// The exported fields on the returned client may be modified before
|
||||
// the client is used for logging. Once log entries are in flight,
|
||||
// the fields must not be modified.
|
||||
func NewClient(ctx context.Context, projectID, logName string, opts ...cloud.ClientOption) (*Client, error) {
|
||||
httpClient, endpoint, err := transport.NewHTTPClient(ctx, append([]cloud.ClientOption{
|
||||
cloud.WithEndpoint(prodAddr),
|
||||
cloud.WithScopes(api.CloudPlatformScope),
|
||||
cloud.WithUserAgent(userAgent),
|
||||
}, opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
svc, err := api.New(httpClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
svc.BasePath = endpoint
|
||||
c := &Client{
|
||||
svc: svc,
|
||||
logs: api.NewProjectsLogsEntriesService(svc),
|
||||
logName: logName,
|
||||
projID: projectID,
|
||||
}
|
||||
for i := range c.writer {
|
||||
level := Level(i)
|
||||
c.writer[level] = levelWriter{level, c}
|
||||
c.logger[level] = log.New(c.writer[level], "", 0)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// flushCall is an in-flight or completed flush.
|
||||
type flushCall struct {
|
||||
donec chan struct{} // closed when response is in
|
||||
err error // error is valid after wg is Done
|
||||
}
|
102
vendor/src/google.golang.org/cloud/option.go
vendored
Normal file
102
vendor/src/google.golang.org/cloud/option.go
vendored
Normal file
|
@ -0,0 +1,102 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cloud
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"google.golang.org/cloud/internal/opts"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// ClientOption is used when construct clients for each cloud service.
|
||||
type ClientOption interface {
|
||||
// Resolve configures the given DialOpts for this option.
|
||||
Resolve(*opts.DialOpt)
|
||||
}
|
||||
|
||||
// WithTokenSource returns a ClientOption that specifies an OAuth2 token
|
||||
// source to be used as the basis for authentication.
|
||||
func WithTokenSource(s oauth2.TokenSource) ClientOption {
|
||||
return withTokenSource{s}
|
||||
}
|
||||
|
||||
type withTokenSource struct{ ts oauth2.TokenSource }
|
||||
|
||||
func (w withTokenSource) Resolve(o *opts.DialOpt) {
|
||||
o.TokenSource = w.ts
|
||||
}
|
||||
|
||||
// WithEndpoint returns a ClientOption that overrides the default endpoint
|
||||
// to be used for a service.
|
||||
func WithEndpoint(url string) ClientOption {
|
||||
return withEndpoint(url)
|
||||
}
|
||||
|
||||
type withEndpoint string
|
||||
|
||||
func (w withEndpoint) Resolve(o *opts.DialOpt) {
|
||||
o.Endpoint = string(w)
|
||||
}
|
||||
|
||||
// WithScopes returns a ClientOption that overrides the default OAuth2 scopes
|
||||
// to be used for a service.
|
||||
func WithScopes(scope ...string) ClientOption {
|
||||
return withScopes(scope)
|
||||
}
|
||||
|
||||
type withScopes []string
|
||||
|
||||
func (w withScopes) Resolve(o *opts.DialOpt) {
|
||||
s := make([]string, len(w))
|
||||
copy(s, w)
|
||||
o.Scopes = s
|
||||
}
|
||||
|
||||
// WithUserAgent returns a ClientOption that sets the User-Agent.
|
||||
func WithUserAgent(ua string) ClientOption {
|
||||
return withUA(ua)
|
||||
}
|
||||
|
||||
type withUA string
|
||||
|
||||
func (w withUA) Resolve(o *opts.DialOpt) { o.UserAgent = string(w) }
|
||||
|
||||
// WithBaseHTTP returns a ClientOption that specifies the HTTP client to
|
||||
// use as the basis of communications. This option may only be used with
|
||||
// services that support HTTP as their communication transport.
|
||||
func WithBaseHTTP(client *http.Client) ClientOption {
|
||||
return withBaseHTTP{client}
|
||||
}
|
||||
|
||||
type withBaseHTTP struct{ client *http.Client }
|
||||
|
||||
func (w withBaseHTTP) Resolve(o *opts.DialOpt) {
|
||||
o.HTTPClient = w.client
|
||||
}
|
||||
|
||||
// WithBaseGRPC returns a ClientOption that specifies the GRPC client
|
||||
// connection to use as the basis of communications. This option many only be
|
||||
// used with services that support HRPC as their communication transport.
|
||||
func WithBaseGRPC(client *grpc.ClientConn) ClientOption {
|
||||
return withBaseGRPC{client}
|
||||
}
|
||||
|
||||
type withBaseGRPC struct{ client *grpc.ClientConn }
|
||||
|
||||
func (w withBaseGRPC) Resolve(o *opts.DialOpt) {
|
||||
o.GRPCClient = w.client
|
||||
}
|
14
vendor/src/google.golang.org/grpc/.travis.yml
vendored
Normal file
14
vendor/src/google.golang.org/grpc/.travis.yml
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
language: go
|
||||
|
||||
before_install:
|
||||
- go get github.com/axw/gocov/gocov
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
|
||||
install:
|
||||
- mkdir -p "$GOPATH/src/google.golang.org"
|
||||
- mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/google.golang.org/grpc"
|
||||
|
||||
script:
|
||||
- make test testrace
|
||||
- make coverage
|
23
vendor/src/google.golang.org/grpc/CONTRIBUTING.md
vendored
Normal file
23
vendor/src/google.golang.org/grpc/CONTRIBUTING.md
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
# How to contribute
|
||||
|
||||
We definitely welcome patches and contribution to grpc! Here is some guideline
|
||||
and information about how to do so.
|
||||
|
||||
## Getting started
|
||||
|
||||
### Legal requirements
|
||||
|
||||
In order to protect both you and ourselves, you will need to sign the
|
||||
[Contributor License Agreement](https://cla.developers.google.com/clas).
|
||||
|
||||
### Filing Issues
|
||||
When filing an issue, make sure to answer these five questions:
|
||||
|
||||
1. What version of Go are you using (`go version`)?
|
||||
2. What operating system and processor architecture are you using?
|
||||
3. What did you do?
|
||||
4. What did you expect to see?
|
||||
5. What did you see instead?
|
||||
|
||||
### Contributing code
|
||||
Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file.
|
50
vendor/src/google.golang.org/grpc/Makefile
vendored
Normal file
50
vendor/src/google.golang.org/grpc/Makefile
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
.PHONY: \
|
||||
all \
|
||||
deps \
|
||||
updatedeps \
|
||||
testdeps \
|
||||
updatetestdeps \
|
||||
build \
|
||||
proto \
|
||||
test \
|
||||
testrace \
|
||||
clean \
|
||||
|
||||
all: test testrace
|
||||
|
||||
deps:
|
||||
go get -d -v google.golang.org/grpc/...
|
||||
|
||||
updatedeps:
|
||||
go get -d -v -u -f google.golang.org/grpc/...
|
||||
|
||||
testdeps:
|
||||
go get -d -v -t google.golang.org/grpc/...
|
||||
|
||||
updatetestdeps:
|
||||
go get -d -v -t -u -f google.golang.org/grpc/...
|
||||
|
||||
build: deps
|
||||
go build google.golang.org/grpc/...
|
||||
|
||||
proto:
|
||||
@ if ! which protoc > /dev/null; then \
|
||||
echo "error: protoc not installed" >&2; \
|
||||
exit 1; \
|
||||
fi
|
||||
go get -v github.com/golang/protobuf/protoc-gen-go
|
||||
for file in $$(git ls-files '*.proto'); do \
|
||||
protoc -I $$(dirname $$file) --go_out=plugins=grpc:$$(dirname $$file) $$file; \
|
||||
done
|
||||
|
||||
test: testdeps
|
||||
go test -v -cpu 1,4 google.golang.org/grpc/...
|
||||
|
||||
testrace: testdeps
|
||||
go test -v -race -cpu 1,4 google.golang.org/grpc/...
|
||||
|
||||
clean:
|
||||
go clean google.golang.org/grpc/...
|
||||
|
||||
coverage: testdeps
|
||||
goveralls -v google.golang.org/grpc/...
|
22
vendor/src/google.golang.org/grpc/PATENTS
vendored
Normal file
22
vendor/src/google.golang.org/grpc/PATENTS
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the GRPC project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of GRPC, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of GRPC. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of GRPC or any code incorporated within this
|
||||
implementation of GRPC constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of GRPC
|
||||
shall terminate as of the date such litigation is filed.
|
32
vendor/src/google.golang.org/grpc/README.md
vendored
Normal file
32
vendor/src/google.golang.org/grpc/README.md
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
#gRPC-Go
|
||||
|
||||
[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc)
|
||||
|
||||
The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
To install this package, you need to install Go 1.4 or above and setup your Go workspace on your computer. The simplest way to install the library is to run:
|
||||
|
||||
```
|
||||
$ go get google.golang.org/grpc
|
||||
```
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
This requires Go 1.4 or above.
|
||||
|
||||
Constraints
|
||||
-----------
|
||||
The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants.
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/).
|
||||
|
||||
Status
|
||||
------
|
||||
Beta release
|
||||
|
192
vendor/src/google.golang.org/grpc/call.go
vendored
Normal file
192
vendor/src/google.golang.org/grpc/call.go
vendored
Normal file
|
@ -0,0 +1,192 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// recvResponse receives and parses an RPC response.
|
||||
// On error, it returns the error and indicates whether the call should be retried.
|
||||
//
|
||||
// TODO(zhaoq): Check whether the received message sequence is valid.
|
||||
func recvResponse(codec Codec, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error {
|
||||
// Try to acquire header metadata from the server if there is any.
|
||||
var err error
|
||||
c.headerMD, err = stream.Header()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p := &parser{s: stream}
|
||||
for {
|
||||
if err = recv(p, codec, reply); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
c.trailerMD = stream.Trailer()
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendRequest writes out various information of an RPC such as Context and Message.
|
||||
func sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) {
|
||||
stream, err := t.NewStream(ctx, callHdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
t.CloseStream(stream, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
// TODO(zhaoq): Support compression.
|
||||
outBuf, err := encode(codec, args, compressionNone)
|
||||
if err != nil {
|
||||
return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err)
|
||||
}
|
||||
err = t.Write(stream, outBuf, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Sent successfully.
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
// callInfo contains all related configuration and information about an RPC.
|
||||
type callInfo struct {
|
||||
failFast bool
|
||||
headerMD metadata.MD
|
||||
trailerMD metadata.MD
|
||||
traceInfo traceInfo // in trace.go
|
||||
}
|
||||
|
||||
// Invoke is called by the generated code. It sends the RPC request on the
|
||||
// wire and returns after response is received.
|
||||
func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) {
|
||||
var c callInfo
|
||||
for _, o := range opts {
|
||||
if err := o.before(&c); err != nil {
|
||||
return toRPCErr(err)
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
for _, o := range opts {
|
||||
o.after(&c)
|
||||
}
|
||||
}()
|
||||
if EnableTracing {
|
||||
c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
|
||||
defer c.traceInfo.tr.Finish()
|
||||
c.traceInfo.firstLine.client = true
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
c.traceInfo.firstLine.deadline = deadline.Sub(time.Now())
|
||||
}
|
||||
c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false)
|
||||
// TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
c.traceInfo.tr.SetError()
|
||||
}
|
||||
}()
|
||||
}
|
||||
topts := &transport.Options{
|
||||
Last: true,
|
||||
Delay: false,
|
||||
}
|
||||
var (
|
||||
lastErr error // record the error that happened
|
||||
)
|
||||
for {
|
||||
var (
|
||||
err error
|
||||
t transport.ClientTransport
|
||||
stream *transport.Stream
|
||||
)
|
||||
// TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs.
|
||||
if lastErr != nil && c.failFast {
|
||||
return toRPCErr(lastErr)
|
||||
}
|
||||
callHdr := &transport.CallHdr{
|
||||
Host: cc.authority,
|
||||
Method: method,
|
||||
}
|
||||
t, err = cc.dopts.picker.Pick(ctx)
|
||||
if err != nil {
|
||||
if lastErr != nil {
|
||||
// This was a retry; return the error from the last attempt.
|
||||
return toRPCErr(lastErr)
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
if c.traceInfo.tr != nil {
|
||||
c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
|
||||
}
|
||||
stream, err = sendRequest(ctx, cc.dopts.codec, callHdr, t, args, topts)
|
||||
if err != nil {
|
||||
if _, ok := err.(transport.ConnectionError); ok {
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
if lastErr != nil {
|
||||
return toRPCErr(lastErr)
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
// Receive the response
|
||||
lastErr = recvResponse(cc.dopts.codec, t, &c, stream, reply)
|
||||
if _, ok := lastErr.(transport.ConnectionError); ok {
|
||||
continue
|
||||
}
|
||||
if c.traceInfo.tr != nil {
|
||||
c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
|
||||
}
|
||||
t.CloseStream(stream, lastErr)
|
||||
if lastErr != nil {
|
||||
return toRPCErr(lastErr)
|
||||
}
|
||||
return Errorf(stream.StatusCode(), stream.StatusDesc())
|
||||
}
|
||||
}
|
525
vendor/src/google.golang.org/grpc/clientconn.go
vendored
Normal file
525
vendor/src/google.golang.org/grpc/clientconn.go
vendored
Normal file
|
@ -0,0 +1,525 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrUnspecTarget indicates that the target address is unspecified.
|
||||
ErrUnspecTarget = errors.New("grpc: target is unspecified")
|
||||
// ErrNoTransportSecurity indicates that there is no transport security
|
||||
// being set for ClientConn. Users should either set one or explicityly
|
||||
// call WithInsecure DialOption to disable security.
|
||||
ErrNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)")
|
||||
// ErrCredentialsMisuse indicates that users want to transmit security infomation
|
||||
// (e.g., oauth2 token) which requires secure connection on an insecure
|
||||
// connection.
|
||||
ErrCredentialsMisuse = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportAuthenticator() to set)")
|
||||
// ErrClientConnClosing indicates that the operation is illegal because
|
||||
// the session is closing.
|
||||
ErrClientConnClosing = errors.New("grpc: the client connection is closing")
|
||||
// ErrClientConnTimeout indicates that the connection could not be
|
||||
// established or re-established within the specified timeout.
|
||||
ErrClientConnTimeout = errors.New("grpc: timed out trying to connect")
|
||||
// minimum time to give a connection to complete
|
||||
minConnectTimeout = 20 * time.Second
|
||||
)
|
||||
|
||||
// dialOptions configure a Dial call. dialOptions are set by the DialOption
|
||||
// values passed to Dial.
|
||||
type dialOptions struct {
|
||||
codec Codec
|
||||
picker Picker
|
||||
block bool
|
||||
insecure bool
|
||||
copts transport.ConnectOptions
|
||||
}
|
||||
|
||||
// DialOption configures how we set up the connection.
|
||||
type DialOption func(*dialOptions)
|
||||
|
||||
// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling.
|
||||
func WithCodec(c Codec) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.codec = c
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying
|
||||
// connection is up. Without this, Dial returns immediately and connecting the server
|
||||
// happens in background.
|
||||
func WithBlock() DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.block = true
|
||||
}
|
||||
}
|
||||
|
||||
func WithInsecure() DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.insecure = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithTransportCredentials returns a DialOption which configures a
|
||||
// connection level security credentials (e.g., TLS/SSL).
|
||||
func WithTransportCredentials(creds credentials.TransportAuthenticator) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.copts.AuthOptions = append(o.copts.AuthOptions, creds)
|
||||
}
|
||||
}
|
||||
|
||||
// WithPerRPCCredentials returns a DialOption which sets
|
||||
// credentials which will place auth state on each outbound RPC.
|
||||
func WithPerRPCCredentials(creds credentials.Credentials) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.copts.AuthOptions = append(o.copts.AuthOptions, creds)
|
||||
}
|
||||
}
|
||||
|
||||
// WithTimeout returns a DialOption that configures a timeout for dialing a client connection.
|
||||
func WithTimeout(d time.Duration) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.copts.Timeout = d
|
||||
}
|
||||
}
|
||||
|
||||
// WithDialer returns a DialOption that specifies a function to use for dialing network addresses.
|
||||
func WithDialer(f func(addr string, timeout time.Duration) (net.Conn, error)) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.copts.Dialer = f
|
||||
}
|
||||
}
|
||||
|
||||
// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs.
|
||||
func WithUserAgent(s string) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.copts.UserAgent = s
|
||||
}
|
||||
}
|
||||
|
||||
// Dial creates a client connection the given target.
|
||||
func Dial(target string, opts ...DialOption) (*ClientConn, error) {
|
||||
cc := &ClientConn{
|
||||
target: target,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(&cc.dopts)
|
||||
}
|
||||
if cc.dopts.codec == nil {
|
||||
// Set the default codec.
|
||||
cc.dopts.codec = protoCodec{}
|
||||
}
|
||||
if cc.dopts.picker == nil {
|
||||
cc.dopts.picker = &unicastPicker{}
|
||||
}
|
||||
if err := cc.dopts.picker.Init(cc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
colonPos := strings.LastIndex(target, ":")
|
||||
if colonPos == -1 {
|
||||
colonPos = len(target)
|
||||
}
|
||||
cc.authority = target[:colonPos]
|
||||
return cc, nil
|
||||
}
|
||||
|
||||
// ConnectivityState indicates the state of a client connection.
|
||||
type ConnectivityState int
|
||||
|
||||
const (
|
||||
// Idle indicates the ClientConn is idle.
|
||||
Idle ConnectivityState = iota
|
||||
// Connecting indicates the ClienConn is connecting.
|
||||
Connecting
|
||||
// Ready indicates the ClientConn is ready for work.
|
||||
Ready
|
||||
// TransientFailure indicates the ClientConn has seen a failure but expects to recover.
|
||||
TransientFailure
|
||||
// Shutdown indicates the ClientConn has started shutting down.
|
||||
Shutdown
|
||||
)
|
||||
|
||||
func (s ConnectivityState) String() string {
|
||||
switch s {
|
||||
case Idle:
|
||||
return "IDLE"
|
||||
case Connecting:
|
||||
return "CONNECTING"
|
||||
case Ready:
|
||||
return "READY"
|
||||
case TransientFailure:
|
||||
return "TRANSIENT_FAILURE"
|
||||
case Shutdown:
|
||||
return "SHUTDOWN"
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown connectivity state: %d", s))
|
||||
}
|
||||
}
|
||||
|
||||
// ClientConn represents a client connection to an RPC service.
|
||||
type ClientConn struct {
|
||||
target string
|
||||
authority string
|
||||
dopts dialOptions
|
||||
}
|
||||
|
||||
// State returns the connectivity state of cc.
|
||||
// This is EXPERIMENTAL API.
|
||||
func (cc *ClientConn) State() ConnectivityState {
|
||||
return cc.dopts.picker.State()
|
||||
}
|
||||
|
||||
// WaitForStateChange blocks until the state changes to something other than the sourceState
|
||||
// or timeout fires on cc. It returns false if timeout fires, and true otherwise.
|
||||
// This is EXPERIMENTAL API.
|
||||
func (cc *ClientConn) WaitForStateChange(timeout time.Duration, sourceState ConnectivityState) bool {
|
||||
return cc.dopts.picker.WaitForStateChange(timeout, sourceState)
|
||||
}
|
||||
|
||||
// Close starts to tear down the ClientConn.
|
||||
func (cc *ClientConn) Close() error {
|
||||
return cc.dopts.picker.Close()
|
||||
}
|
||||
|
||||
// Conn is a client connection to a single destination.
|
||||
type Conn struct {
|
||||
target string
|
||||
dopts dialOptions
|
||||
shutdownChan chan struct{}
|
||||
events trace.EventLog
|
||||
|
||||
mu sync.Mutex
|
||||
state ConnectivityState
|
||||
stateCV *sync.Cond
|
||||
// ready is closed and becomes nil when a new transport is up or failed
|
||||
// due to timeout.
|
||||
ready chan struct{}
|
||||
transport transport.ClientTransport
|
||||
}
|
||||
|
||||
// NewConn creates a Conn.
|
||||
func NewConn(cc *ClientConn) (*Conn, error) {
|
||||
if cc.target == "" {
|
||||
return nil, ErrUnspecTarget
|
||||
}
|
||||
c := &Conn{
|
||||
target: cc.target,
|
||||
dopts: cc.dopts,
|
||||
shutdownChan: make(chan struct{}),
|
||||
}
|
||||
if EnableTracing {
|
||||
c.events = trace.NewEventLog("grpc.ClientConn", c.target)
|
||||
}
|
||||
if !c.dopts.insecure {
|
||||
var ok bool
|
||||
for _, cd := range c.dopts.copts.AuthOptions {
|
||||
if _, ok := cd.(credentials.TransportAuthenticator); !ok {
|
||||
continue
|
||||
}
|
||||
ok = true
|
||||
}
|
||||
if !ok {
|
||||
return nil, ErrNoTransportSecurity
|
||||
}
|
||||
} else {
|
||||
for _, cd := range c.dopts.copts.AuthOptions {
|
||||
if cd.RequireTransportSecurity() {
|
||||
return nil, ErrCredentialsMisuse
|
||||
}
|
||||
}
|
||||
}
|
||||
c.stateCV = sync.NewCond(&c.mu)
|
||||
if c.dopts.block {
|
||||
if err := c.resetTransport(false); err != nil {
|
||||
c.Close()
|
||||
return nil, err
|
||||
}
|
||||
// Start to monitor the error status of transport.
|
||||
go c.transportMonitor()
|
||||
} else {
|
||||
// Start a goroutine connecting to the server asynchronously.
|
||||
go func() {
|
||||
if err := c.resetTransport(false); err != nil {
|
||||
grpclog.Printf("Failed to dial %s: %v; please retry.", c.target, err)
|
||||
c.Close()
|
||||
return
|
||||
}
|
||||
c.transportMonitor()
|
||||
}()
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// printf records an event in cc's event log, unless cc has been closed.
|
||||
// REQUIRES cc.mu is held.
|
||||
func (cc *Conn) printf(format string, a ...interface{}) {
|
||||
if cc.events != nil {
|
||||
cc.events.Printf(format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
// errorf records an error in cc's event log, unless cc has been closed.
|
||||
// REQUIRES cc.mu is held.
|
||||
func (cc *Conn) errorf(format string, a ...interface{}) {
|
||||
if cc.events != nil {
|
||||
cc.events.Errorf(format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
// State returns the connectivity state of the Conn
|
||||
func (cc *Conn) State() ConnectivityState {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
return cc.state
|
||||
}
|
||||
|
||||
// WaitForStateChange blocks until the state changes to something other than the sourceState
|
||||
// or timeout fires. It returns false if timeout fires and true otherwise.
|
||||
// TODO(zhaoq): Rewrite for complex Picker.
|
||||
func (cc *Conn) WaitForStateChange(timeout time.Duration, sourceState ConnectivityState) bool {
|
||||
start := time.Now()
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
if sourceState != cc.state {
|
||||
return true
|
||||
}
|
||||
expired := timeout <= time.Since(start)
|
||||
if expired {
|
||||
return false
|
||||
}
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-time.After(timeout - time.Since(start)):
|
||||
cc.mu.Lock()
|
||||
expired = true
|
||||
cc.stateCV.Broadcast()
|
||||
cc.mu.Unlock()
|
||||
case <-done:
|
||||
}
|
||||
}()
|
||||
defer close(done)
|
||||
for sourceState == cc.state {
|
||||
cc.stateCV.Wait()
|
||||
if expired {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (cc *Conn) resetTransport(closeTransport bool) error {
|
||||
var retries int
|
||||
start := time.Now()
|
||||
for {
|
||||
cc.mu.Lock()
|
||||
cc.printf("connecting")
|
||||
if cc.state == Shutdown {
|
||||
cc.mu.Unlock()
|
||||
return ErrClientConnClosing
|
||||
}
|
||||
cc.state = Connecting
|
||||
cc.stateCV.Broadcast()
|
||||
cc.mu.Unlock()
|
||||
if closeTransport {
|
||||
cc.transport.Close()
|
||||
}
|
||||
// Adjust timeout for the current try.
|
||||
copts := cc.dopts.copts
|
||||
if copts.Timeout < 0 {
|
||||
cc.Close()
|
||||
return ErrClientConnTimeout
|
||||
}
|
||||
if copts.Timeout > 0 {
|
||||
copts.Timeout -= time.Since(start)
|
||||
if copts.Timeout <= 0 {
|
||||
cc.Close()
|
||||
return ErrClientConnTimeout
|
||||
}
|
||||
}
|
||||
sleepTime := backoff(retries)
|
||||
timeout := sleepTime
|
||||
if timeout < minConnectTimeout {
|
||||
timeout = minConnectTimeout
|
||||
}
|
||||
if copts.Timeout == 0 || copts.Timeout > timeout {
|
||||
copts.Timeout = timeout
|
||||
}
|
||||
connectTime := time.Now()
|
||||
newTransport, err := transport.NewClientTransport(cc.target, &copts)
|
||||
if err != nil {
|
||||
cc.mu.Lock()
|
||||
cc.errorf("transient failure: %v", err)
|
||||
cc.state = TransientFailure
|
||||
cc.stateCV.Broadcast()
|
||||
if cc.ready != nil {
|
||||
close(cc.ready)
|
||||
cc.ready = nil
|
||||
}
|
||||
cc.mu.Unlock()
|
||||
sleepTime -= time.Since(connectTime)
|
||||
if sleepTime < 0 {
|
||||
sleepTime = 0
|
||||
}
|
||||
// Fail early before falling into sleep.
|
||||
if cc.dopts.copts.Timeout > 0 && cc.dopts.copts.Timeout < sleepTime+time.Since(start) {
|
||||
cc.mu.Lock()
|
||||
cc.errorf("connection timeout")
|
||||
cc.mu.Unlock()
|
||||
cc.Close()
|
||||
return ErrClientConnTimeout
|
||||
}
|
||||
closeTransport = false
|
||||
time.Sleep(sleepTime)
|
||||
retries++
|
||||
grpclog.Printf("grpc: ClientConn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, cc.target)
|
||||
continue
|
||||
}
|
||||
cc.mu.Lock()
|
||||
cc.printf("ready")
|
||||
if cc.state == Shutdown {
|
||||
// cc.Close() has been invoked.
|
||||
cc.mu.Unlock()
|
||||
newTransport.Close()
|
||||
return ErrClientConnClosing
|
||||
}
|
||||
cc.state = Ready
|
||||
cc.stateCV.Broadcast()
|
||||
cc.transport = newTransport
|
||||
if cc.ready != nil {
|
||||
close(cc.ready)
|
||||
cc.ready = nil
|
||||
}
|
||||
cc.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Run in a goroutine to track the error in transport and create the
|
||||
// new transport if an error happens. It returns when the channel is closing.
|
||||
func (cc *Conn) transportMonitor() {
|
||||
for {
|
||||
select {
|
||||
// shutdownChan is needed to detect the teardown when
|
||||
// the ClientConn is idle (i.e., no RPC in flight).
|
||||
case <-cc.shutdownChan:
|
||||
return
|
||||
case <-cc.transport.Error():
|
||||
cc.mu.Lock()
|
||||
cc.state = TransientFailure
|
||||
cc.stateCV.Broadcast()
|
||||
cc.mu.Unlock()
|
||||
if err := cc.resetTransport(true); err != nil {
|
||||
// The ClientConn is closing.
|
||||
cc.mu.Lock()
|
||||
cc.printf("transport exiting: %v", err)
|
||||
cc.mu.Unlock()
|
||||
grpclog.Printf("grpc: ClientConn.transportMonitor exits due to: %v", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until i) the new transport is up or ii) ctx is done or iii) cc is closed.
|
||||
func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) {
|
||||
for {
|
||||
cc.mu.Lock()
|
||||
switch {
|
||||
case cc.state == Shutdown:
|
||||
cc.mu.Unlock()
|
||||
return nil, ErrClientConnClosing
|
||||
case cc.state == Ready:
|
||||
cc.mu.Unlock()
|
||||
return cc.transport, nil
|
||||
default:
|
||||
ready := cc.ready
|
||||
if ready == nil {
|
||||
ready = make(chan struct{})
|
||||
cc.ready = ready
|
||||
}
|
||||
cc.mu.Unlock()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, transport.ContextErr(ctx.Err())
|
||||
// Wait until the new transport is ready or failed.
|
||||
case <-ready:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close starts to tear down the Conn. Returns ErrClientConnClosing if
|
||||
// it has been closed (mostly due to dial time-out).
|
||||
// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in
|
||||
// some edge cases (e.g., the caller opens and closes many ClientConn's in a
|
||||
// tight loop.
|
||||
func (cc *Conn) Close() error {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
if cc.state == Shutdown {
|
||||
return ErrClientConnClosing
|
||||
}
|
||||
cc.state = Shutdown
|
||||
cc.stateCV.Broadcast()
|
||||
if cc.events != nil {
|
||||
cc.events.Finish()
|
||||
cc.events = nil
|
||||
}
|
||||
if cc.ready != nil {
|
||||
close(cc.ready)
|
||||
cc.ready = nil
|
||||
}
|
||||
if cc.transport != nil {
|
||||
cc.transport.Close()
|
||||
}
|
||||
if cc.shutdownChan != nil {
|
||||
close(cc.shutdownChan)
|
||||
}
|
||||
return nil
|
||||
}
|
17
vendor/src/google.golang.org/grpc/codegen.sh
vendored
Executable file
17
vendor/src/google.golang.org/grpc/codegen.sh
vendored
Executable file
|
@ -0,0 +1,17 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This script serves as an example to demonstrate how to generate the gRPC-Go
|
||||
# interface and the related messages from .proto file.
|
||||
#
|
||||
# It assumes the installation of i) Google proto buffer compiler at
|
||||
# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen
|
||||
# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have
|
||||
# not, please install them first.
|
||||
#
|
||||
# We recommend running this script at $GOPATH/src.
|
||||
#
|
||||
# If this is not what you need, feel free to make your own scripts. Again, this
|
||||
# script is for demonstration purpose.
|
||||
#
|
||||
proto=$1
|
||||
protoc --go_out=plugins=grpc:. $proto
|
16
vendor/src/google.golang.org/grpc/codes/code_string.go
vendored
Normal file
16
vendor/src/google.golang.org/grpc/codes/code_string.go
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
// generated by stringer -type=Code; DO NOT EDIT
|
||||
|
||||
package codes
|
||||
|
||||
import "fmt"
|
||||
|
||||
const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated"
|
||||
|
||||
var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192}
|
||||
|
||||
func (i Code) String() string {
|
||||
if i+1 >= Code(len(_Code_index)) {
|
||||
return fmt.Sprintf("Code(%d)", i)
|
||||
}
|
||||
return _Code_name[_Code_index[i]:_Code_index[i+1]]
|
||||
}
|
159
vendor/src/google.golang.org/grpc/codes/codes.go
vendored
Normal file
159
vendor/src/google.golang.org/grpc/codes/codes.go
vendored
Normal file
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package codes defines the canonical error codes used by gRPC. It is
|
||||
// consistent across various languages.
|
||||
package codes // import "google.golang.org/grpc/codes"
|
||||
|
||||
// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
|
||||
type Code uint32
|
||||
|
||||
//go:generate stringer -type=Code
|
||||
|
||||
const (
|
||||
// OK is returned on success.
|
||||
OK Code = 0
|
||||
|
||||
// Canceled indicates the operation was cancelled (typically by the caller).
|
||||
Canceled Code = 1
|
||||
|
||||
// Unknown error. An example of where this error may be returned is
|
||||
// if a Status value received from another address space belongs to
|
||||
// an error-space that is not known in this address space. Also
|
||||
// errors raised by APIs that do not return enough error information
|
||||
// may be converted to this error.
|
||||
Unknown Code = 2
|
||||
|
||||
// InvalidArgument indicates client specified an invalid argument.
|
||||
// Note that this differs from FailedPrecondition. It indicates arguments
|
||||
// that are problematic regardless of the state of the system
|
||||
// (e.g., a malformed file name).
|
||||
InvalidArgument Code = 3
|
||||
|
||||
// DeadlineExceeded means operation expired before completion.
|
||||
// For operations that change the state of the system, this error may be
|
||||
// returned even if the operation has completed successfully. For
|
||||
// example, a successful response from a server could have been delayed
|
||||
// long enough for the deadline to expire.
|
||||
DeadlineExceeded Code = 4
|
||||
|
||||
// NotFound means some requested entity (e.g., file or directory) was
|
||||
// not found.
|
||||
NotFound Code = 5
|
||||
|
||||
// AlreadyExists means an attempt to create an entity failed because one
|
||||
// already exists.
|
||||
AlreadyExists Code = 6
|
||||
|
||||
// PermissionDenied indicates the caller does not have permission to
|
||||
// execute the specified operation. It must not be used for rejections
|
||||
// caused by exhausting some resource (use ResourceExhausted
|
||||
// instead for those errors). It must not be
|
||||
// used if the caller cannot be identified (use Unauthenticated
|
||||
// instead for those errors).
|
||||
PermissionDenied Code = 7
|
||||
|
||||
// Unauthenticated indicates the request does not have valid
|
||||
// authentication credentials for the operation.
|
||||
Unauthenticated Code = 16
|
||||
|
||||
// ResourceExhausted indicates some resource has been exhausted, perhaps
|
||||
// a per-user quota, or perhaps the entire file system is out of space.
|
||||
ResourceExhausted Code = 8
|
||||
|
||||
// FailedPrecondition indicates operation was rejected because the
|
||||
// system is not in a state required for the operation's execution.
|
||||
// For example, directory to be deleted may be non-empty, an rmdir
|
||||
// operation is applied to a non-directory, etc.
|
||||
//
|
||||
// A litmus test that may help a service implementor in deciding
|
||||
// between FailedPrecondition, Aborted, and Unavailable:
|
||||
// (a) Use Unavailable if the client can retry just the failing call.
|
||||
// (b) Use Aborted if the client should retry at a higher-level
|
||||
// (e.g., restarting a read-modify-write sequence).
|
||||
// (c) Use FailedPrecondition if the client should not retry until
|
||||
// the system state has been explicitly fixed. E.g., if an "rmdir"
|
||||
// fails because the directory is non-empty, FailedPrecondition
|
||||
// should be returned since the client should not retry unless
|
||||
// they have first fixed up the directory by deleting files from it.
|
||||
// (d) Use FailedPrecondition if the client performs conditional
|
||||
// REST Get/Update/Delete on a resource and the resource on the
|
||||
// server does not match the condition. E.g., conflicting
|
||||
// read-modify-write on the same resource.
|
||||
FailedPrecondition Code = 9
|
||||
|
||||
// Aborted indicates the operation was aborted, typically due to a
|
||||
// concurrency issue like sequencer check failures, transaction aborts,
|
||||
// etc.
|
||||
//
|
||||
// See litmus test above for deciding between FailedPrecondition,
|
||||
// Aborted, and Unavailable.
|
||||
Aborted Code = 10
|
||||
|
||||
// OutOfRange means operation was attempted past the valid range.
|
||||
// E.g., seeking or reading past end of file.
|
||||
//
|
||||
// Unlike InvalidArgument, this error indicates a problem that may
|
||||
// be fixed if the system state changes. For example, a 32-bit file
|
||||
// system will generate InvalidArgument if asked to read at an
|
||||
// offset that is not in the range [0,2^32-1], but it will generate
|
||||
// OutOfRange if asked to read from an offset past the current
|
||||
// file size.
|
||||
//
|
||||
// There is a fair bit of overlap between FailedPrecondition and
|
||||
// OutOfRange. We recommend using OutOfRange (the more specific
|
||||
// error) when it applies so that callers who are iterating through
|
||||
// a space can easily look for an OutOfRange error to detect when
|
||||
// they are done.
|
||||
OutOfRange Code = 11
|
||||
|
||||
// Unimplemented indicates operation is not implemented or not
|
||||
// supported/enabled in this service.
|
||||
Unimplemented Code = 12
|
||||
|
||||
// Internal errors. Means some invariants expected by underlying
|
||||
// system has been broken. If you see one of these errors,
|
||||
// something is very broken.
|
||||
Internal Code = 13
|
||||
|
||||
// Unavailable indicates the service is currently unavailable.
|
||||
// This is a most likely a transient condition and may be corrected
|
||||
// by retrying with a backoff.
|
||||
//
|
||||
// See litmus test above for deciding between FailedPrecondition,
|
||||
// Aborted, and Unavailable.
|
||||
Unavailable Code = 14
|
||||
|
||||
// DataLoss indicates unrecoverable data loss or corruption.
|
||||
DataLoss Code = 15
|
||||
)
|
239
vendor/src/google.golang.org/grpc/credentials/credentials.go
vendored
Normal file
239
vendor/src/google.golang.org/grpc/credentials/credentials.go
vendored
Normal file
|
@ -0,0 +1,239 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package credentials implements various credentials supported by gRPC library,
|
||||
// which encapsulate all the state needed by a client to authenticate with a
|
||||
// server and make various assertions, e.g., about the client's identity, role,
|
||||
// or whether it is authorized to make a particular call.
|
||||
package credentials // import "google.golang.org/grpc/credentials"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
// alpnProtoStr are the specified application level protocols for gRPC.
|
||||
alpnProtoStr = []string{"h2"}
|
||||
)
|
||||
|
||||
// Credentials defines the common interface all supported credentials must
|
||||
// implement.
|
||||
type Credentials interface {
|
||||
// GetRequestMetadata gets the current request metadata, refreshing
|
||||
// tokens if required. This should be called by the transport layer on
|
||||
// each request, and the data should be populated in headers or other
|
||||
// context. uri is the URI of the entry point for the request. When
|
||||
// supported by the underlying implementation, ctx can be used for
|
||||
// timeout and cancellation.
|
||||
// TODO(zhaoq): Define the set of the qualified keys instead of leaving
|
||||
// it as an arbitrary string.
|
||||
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
|
||||
// RequireTransportSecurity indicates whether the credentails requires
|
||||
// transport security.
|
||||
RequireTransportSecurity() bool
|
||||
}
|
||||
|
||||
// ProtocolInfo provides information regarding the gRPC wire protocol version,
|
||||
// security protocol, security protocol version in use, etc.
|
||||
type ProtocolInfo struct {
|
||||
// ProtocolVersion is the gRPC wire protocol version.
|
||||
ProtocolVersion string
|
||||
// SecurityProtocol is the security protocol in use.
|
||||
SecurityProtocol string
|
||||
// SecurityVersion is the security protocol version.
|
||||
SecurityVersion string
|
||||
}
|
||||
|
||||
// AuthInfo defines the common interface for the auth information the users are interested in.
|
||||
type AuthInfo interface {
|
||||
AuthType() string
|
||||
}
|
||||
|
||||
type authInfoKey struct{}
|
||||
|
||||
// NewContext creates a new context with authInfo attached.
|
||||
func NewContext(ctx context.Context, authInfo AuthInfo) context.Context {
|
||||
return context.WithValue(ctx, authInfoKey{}, authInfo)
|
||||
}
|
||||
|
||||
// FromContext returns the authInfo in ctx if it exists.
|
||||
func FromContext(ctx context.Context) (authInfo AuthInfo, ok bool) {
|
||||
authInfo, ok = ctx.Value(authInfoKey{}).(AuthInfo)
|
||||
return
|
||||
}
|
||||
|
||||
// TransportAuthenticator defines the common interface for all the live gRPC wire
|
||||
// protocols and supported transport security protocols (e.g., TLS, SSL).
|
||||
type TransportAuthenticator interface {
|
||||
// ClientHandshake does the authentication handshake specified by the corresponding
|
||||
// authentication protocol on rawConn for clients. It returns the authenticated
|
||||
// connection and the corresponding auth information about the connection.
|
||||
ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, AuthInfo, error)
|
||||
// ServerHandshake does the authentication handshake for servers. It returns
|
||||
// the authenticated connection and the corresponding auth information about
|
||||
// the connection.
|
||||
ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
|
||||
// Info provides the ProtocolInfo of this TransportAuthenticator.
|
||||
Info() ProtocolInfo
|
||||
Credentials
|
||||
}
|
||||
|
||||
// TLSInfo contains the auth information for a TLS authenticated connection.
|
||||
// It implements the AuthInfo interface.
|
||||
type TLSInfo struct {
|
||||
State tls.ConnectionState
|
||||
}
|
||||
|
||||
func (t TLSInfo) AuthType() string {
|
||||
return "tls"
|
||||
}
|
||||
|
||||
// tlsCreds is the credentials required for authenticating a connection using TLS.
|
||||
type tlsCreds struct {
|
||||
// TLS configuration
|
||||
config tls.Config
|
||||
}
|
||||
|
||||
func (c tlsCreds) Info() ProtocolInfo {
|
||||
return ProtocolInfo{
|
||||
SecurityProtocol: "tls",
|
||||
SecurityVersion: "1.2",
|
||||
}
|
||||
}
|
||||
|
||||
// GetRequestMetadata returns nil, nil since TLS credentials does not have
|
||||
// metadata.
|
||||
func (c *tlsCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) RequireTransportSecurity() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type timeoutError struct{}
|
||||
|
||||
func (timeoutError) Error() string { return "credentials: Dial timed out" }
|
||||
func (timeoutError) Timeout() bool { return true }
|
||||
func (timeoutError) Temporary() bool { return true }
|
||||
|
||||
func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, _ AuthInfo, err error) {
|
||||
// borrow some code from tls.DialWithDialer
|
||||
var errChannel chan error
|
||||
if timeout != 0 {
|
||||
errChannel = make(chan error, 2)
|
||||
time.AfterFunc(timeout, func() {
|
||||
errChannel <- timeoutError{}
|
||||
})
|
||||
}
|
||||
if c.config.ServerName == "" {
|
||||
colonPos := strings.LastIndex(addr, ":")
|
||||
if colonPos == -1 {
|
||||
colonPos = len(addr)
|
||||
}
|
||||
c.config.ServerName = addr[:colonPos]
|
||||
}
|
||||
conn := tls.Client(rawConn, &c.config)
|
||||
if timeout == 0 {
|
||||
err = conn.Handshake()
|
||||
} else {
|
||||
go func() {
|
||||
errChannel <- conn.Handshake()
|
||||
}()
|
||||
err = <-errChannel
|
||||
}
|
||||
if err != nil {
|
||||
rawConn.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
// TODO(zhaoq): Omit the auth info for client now. It is more for
|
||||
// information than anything else.
|
||||
return conn, nil, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
|
||||
conn := tls.Server(rawConn, &c.config)
|
||||
if err := conn.Handshake(); err != nil {
|
||||
rawConn.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
return conn, TLSInfo{conn.ConnectionState()}, nil
|
||||
}
|
||||
|
||||
// NewTLS uses c to construct a TransportAuthenticator based on TLS.
|
||||
func NewTLS(c *tls.Config) TransportAuthenticator {
|
||||
tc := &tlsCreds{*c}
|
||||
tc.config.NextProtos = alpnProtoStr
|
||||
return tc
|
||||
}
|
||||
|
||||
// NewClientTLSFromCert constructs a TLS from the input certificate for client.
|
||||
func NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportAuthenticator {
|
||||
return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp})
|
||||
}
|
||||
|
||||
// NewClientTLSFromFile constructs a TLS from the input certificate file for client.
|
||||
func NewClientTLSFromFile(certFile, serverName string) (TransportAuthenticator, error) {
|
||||
b, err := ioutil.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cp := x509.NewCertPool()
|
||||
if !cp.AppendCertsFromPEM(b) {
|
||||
return nil, fmt.Errorf("credentials: failed to append certificates")
|
||||
}
|
||||
return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}), nil
|
||||
}
|
||||
|
||||
// NewServerTLSFromCert constructs a TLS from the input certificate for server.
|
||||
func NewServerTLSFromCert(cert *tls.Certificate) TransportAuthenticator {
|
||||
return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
|
||||
}
|
||||
|
||||
// NewServerTLSFromFile constructs a TLS from the input certificate file and key
|
||||
// file for server.
|
||||
func NewServerTLSFromFile(certFile, keyFile string) (TransportAuthenticator, error) {
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
|
||||
}
|
177
vendor/src/google.golang.org/grpc/credentials/oauth/oauth.go
vendored
Normal file
177
vendor/src/google.golang.org/grpc/credentials/oauth/oauth.go
vendored
Normal file
|
@ -0,0 +1,177 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2015, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package oauth implements gRPC credentials using OAuth.
|
||||
package oauth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"golang.org/x/oauth2/jwt"
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
// TokenSource supplies credentials from an oauth2.TokenSource.
|
||||
type TokenSource struct {
|
||||
oauth2.TokenSource
|
||||
}
|
||||
|
||||
// GetRequestMetadata gets the request metadata as a map from a TokenSource.
|
||||
func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
|
||||
token, err := ts.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]string{
|
||||
"authorization": token.TokenType + " " + token.AccessToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ts TokenSource) RequireTransportSecurity() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type jwtAccess struct {
|
||||
jsonKey []byte
|
||||
}
|
||||
|
||||
func NewJWTAccessFromFile(keyFile string) (credentials.Credentials, error) {
|
||||
jsonKey, err := ioutil.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err)
|
||||
}
|
||||
return NewJWTAccessFromKey(jsonKey)
|
||||
}
|
||||
|
||||
func NewJWTAccessFromKey(jsonKey []byte) (credentials.Credentials, error) {
|
||||
return jwtAccess{jsonKey}, nil
|
||||
}
|
||||
|
||||
func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
|
||||
ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
token, err := ts.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]string{
|
||||
"authorization": token.TokenType + " " + token.AccessToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (j jwtAccess) RequireTransportSecurity() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// oauthAccess supplies credentials from a given token.
|
||||
type oauthAccess struct {
|
||||
token oauth2.Token
|
||||
}
|
||||
|
||||
// NewOauthAccess constructs the credentials using a given token.
|
||||
func NewOauthAccess(token *oauth2.Token) credentials.Credentials {
|
||||
return oauthAccess{token: *token}
|
||||
}
|
||||
|
||||
func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
|
||||
return map[string]string{
|
||||
"authorization": oa.token.TokenType + " " + oa.token.AccessToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (oa oauthAccess) RequireTransportSecurity() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// NewComputeEngine constructs the credentials that fetches access tokens from
|
||||
// Google Compute Engine (GCE)'s metadata server. It is only valid to use this
|
||||
// if your program is running on a GCE instance.
|
||||
// TODO(dsymonds): Deprecate and remove this.
|
||||
func NewComputeEngine() credentials.Credentials {
|
||||
return TokenSource{google.ComputeTokenSource("")}
|
||||
}
|
||||
|
||||
// serviceAccount represents credentials via JWT signing key.
|
||||
type serviceAccount struct {
|
||||
config *jwt.Config
|
||||
}
|
||||
|
||||
func (s serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
|
||||
token, err := s.config.TokenSource(ctx).Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]string{
|
||||
"authorization": token.TokenType + " " + token.AccessToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s serviceAccount) RequireTransportSecurity() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// NewServiceAccountFromKey constructs the credentials using the JSON key slice
|
||||
// from a Google Developers service account.
|
||||
func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.Credentials, error) {
|
||||
config, err := google.JWTConfigFromJSON(jsonKey, scope...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return serviceAccount{config: config}, nil
|
||||
}
|
||||
|
||||
// NewServiceAccountFromFile constructs the credentials using the JSON key file
|
||||
// of a Google Developers service account.
|
||||
func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.Credentials, error) {
|
||||
jsonKey, err := ioutil.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err)
|
||||
}
|
||||
return NewServiceAccountFromKey(jsonKey, scope...)
|
||||
}
|
||||
|
||||
// NewApplicationDefault returns "Application Default Credentials". For more
|
||||
// detail, see https://developers.google.com/accounts/docs/application-default-credentials.
|
||||
func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.Credentials, error) {
|
||||
t, err := google.DefaultTokenSource(ctx, scope...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return TokenSource{t}, nil
|
||||
}
|
6
vendor/src/google.golang.org/grpc/doc.go
vendored
Normal file
6
vendor/src/google.golang.org/grpc/doc.go
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
/*
|
||||
Package grpc implements an RPC system called gRPC.
|
||||
|
||||
See https://github.com/grpc/grpc for more information about gRPC.
|
||||
*/
|
||||
package grpc // import "google.golang.org/grpc"
|
90
vendor/src/google.golang.org/grpc/grpclog/logger.go
vendored
Normal file
90
vendor/src/google.golang.org/grpc/grpclog/logger.go
vendored
Normal file
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2015, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
Package grpclog defines logging for grpc.
|
||||
*/
|
||||
package grpclog // import "google.golang.org/grpc/grpclog"
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Use golang's standard logger by default.
|
||||
var logger Logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
|
||||
// Logger mimics golang's standard Logger as an interface.
|
||||
type Logger interface {
|
||||
Fatal(args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
Fatalln(args ...interface{})
|
||||
Print(args ...interface{})
|
||||
Printf(format string, args ...interface{})
|
||||
Println(args ...interface{})
|
||||
}
|
||||
|
||||
// SetLogger sets the logger that is used in grpc.
|
||||
func SetLogger(l Logger) {
|
||||
logger = l
|
||||
}
|
||||
|
||||
// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code.
|
||||
func Fatal(args ...interface{}) {
|
||||
logger.Fatal(args...)
|
||||
}
|
||||
|
||||
// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code.
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
logger.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code.
|
||||
func Fatalln(args ...interface{}) {
|
||||
logger.Fatalln(args...)
|
||||
}
|
||||
|
||||
// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
|
||||
func Print(args ...interface{}) {
|
||||
logger.Print(args...)
|
||||
}
|
||||
|
||||
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
|
||||
func Printf(format string, args ...interface{}) {
|
||||
logger.Printf(format, args...)
|
||||
}
|
||||
|
||||
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
|
||||
func Println(args ...interface{}) {
|
||||
logger.Println(args...)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue