From d17b9f3da064f4c7e70867be5e64d06b127df301 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 7 Jun 2016 14:30:43 -0700 Subject: [PATCH] Update containerd to cf554d59dd96e459544748290eb91 This bumps containerd to cf554d59dd96e459544748290eb9167f4bcde509 and includes various fixes and updates the grpc package and types generated for use. Signed-off-by: Michael Crosby --- Dockerfile | 2 +- Dockerfile.aarch64 | 2 +- Dockerfile.armhf | 2 +- Dockerfile.gccgo | 2 +- Dockerfile.ppc64le | 2 +- Dockerfile.s390x | 2 +- Dockerfile.simple | 2 +- hack/vendor.sh | 8 +- .../containerd/api/grpc/types/api.pb.go | 597 +++--- .../containerd/api/grpc/types/api.proto | 7 + .../github.com/golang/protobuf/proto/Makefile | 2 +- .../golang/protobuf/proto/decode.go | 9 +- .../golang/protobuf/proto/encode.go | 8 +- .../github.com/golang/protobuf/proto/text.go | 133 +- .../golang/protobuf/proto/text_parser.go | 97 +- .../src/golang.org/x/net/context/context.go | 14 +- .../x/net/context/ctxhttp/cancelreq.go | 1 + .../x/net/context/ctxhttp/ctxhttp.go | 68 +- vendor/src/golang.org/x/net/http2/Dockerfile | 15 +- vendor/src/golang.org/x/net/http2/buffer.go | 76 - .../x/net/http2/client_conn_pool.go | 225 +++ .../x/net/http2/configure_transport.go | 89 + vendor/src/golang.org/x/net/http2/errors.go | 54 +- .../golang.org/x/net/http2/fixed_buffer.go | 60 + vendor/src/golang.org/x/net/http2/flow.go | 7 +- vendor/src/golang.org/x/net/http2/frame.go | 431 ++++- vendor/src/golang.org/x/net/http2/go15.go | 11 + vendor/src/golang.org/x/net/http2/gotrack.go | 3 - .../src/golang.org/x/net/http2/headermap.go | 4 +- .../golang.org/x/net/http2/hpack/encode.go | 9 +- .../src/golang.org/x/net/http2/hpack/hpack.go | 155 +- .../golang.org/x/net/http2/hpack/huffman.go | 49 +- .../golang.org/x/net/http2/hpack/tables.go | 13 +- vendor/src/golang.org/x/net/http2/http2.go | 249 ++- vendor/src/golang.org/x/net/http2/not_go15.go | 11 + vendor/src/golang.org/x/net/http2/not_go16.go | 13 + vendor/src/golang.org/x/net/http2/pipe.go | 150 +- vendor/src/golang.org/x/net/http2/server.go | 1116 +++++++---- .../src/golang.org/x/net/http2/transport.go | 1689 ++++++++++++++--- vendor/src/golang.org/x/net/http2/write.go | 98 +- .../src/golang.org/x/net/http2/writesched.go | 3 - vendor/src/golang.org/x/net/trace/trace.go | 15 +- .../golang.org/x/net/websocket/websocket.go | 2 + vendor/src/google.golang.org/grpc/.travis.yml | 5 +- .../google.golang.org/grpc/CONTRIBUTING.md | 31 +- vendor/src/google.golang.org/grpc/Makefile | 29 +- vendor/src/google.golang.org/grpc/README.md | 4 +- vendor/src/google.golang.org/grpc/backoff.go | 80 + vendor/src/google.golang.org/grpc/call.go | 39 +- .../src/google.golang.org/grpc/clientconn.go | 69 +- vendor/src/google.golang.org/grpc/coverage.sh | 13 +- .../grpc/credentials/credentials.go | 13 - vendor/src/google.golang.org/grpc/doc.go | 2 +- .../google.golang.org/grpc/grpclog/logger.go | 5 +- .../src/google.golang.org/grpc/interceptor.go | 74 + .../grpc/internal/internal.go | 49 + .../src/google.golang.org/grpc/peer/peer.go | 65 + vendor/src/google.golang.org/grpc/picker.go | 2 +- vendor/src/google.golang.org/grpc/rpc_util.go | 203 +- vendor/src/google.golang.org/grpc/server.go | 455 +++-- vendor/src/google.golang.org/grpc/stream.go | 76 +- .../grpc/transport/control.go | 88 +- .../grpc/transport/handler_server.go | 383 ++++ .../grpc/transport/http2_client.go | 266 +-- .../grpc/transport/http2_server.go | 175 +- .../grpc/transport/http_util.go | 170 +- .../grpc/transport/transport.go | 107 +- 67 files changed, 5898 insertions(+), 2010 deletions(-) delete mode 100644 vendor/src/golang.org/x/net/http2/buffer.go create mode 100644 vendor/src/golang.org/x/net/http2/client_conn_pool.go create mode 100644 vendor/src/golang.org/x/net/http2/configure_transport.go create mode 100644 vendor/src/golang.org/x/net/http2/fixed_buffer.go create mode 100644 vendor/src/golang.org/x/net/http2/go15.go create mode 100644 vendor/src/golang.org/x/net/http2/not_go15.go create mode 100644 vendor/src/golang.org/x/net/http2/not_go16.go create mode 100644 vendor/src/google.golang.org/grpc/backoff.go create mode 100644 vendor/src/google.golang.org/grpc/interceptor.go create mode 100644 vendor/src/google.golang.org/grpc/internal/internal.go create mode 100644 vendor/src/google.golang.org/grpc/peer/peer.go create mode 100644 vendor/src/google.golang.org/grpc/transport/handler_server.go diff --git a/Dockerfile b/Dockerfile index 9972de9226..e340dc2133 100644 --- a/Dockerfile +++ b/Dockerfile @@ -244,7 +244,7 @@ RUN set -x \ && rm -rf "$GOPATH" # Install containerd -ENV CONTAINERD_COMMIT 57b7c3da915ebe943bd304c00890959b191e5264 +ENV CONTAINERD_COMMIT cf554d59dd96e459544748290eb9167f4bcde509 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ diff --git a/Dockerfile.aarch64 b/Dockerfile.aarch64 index 037508bdcb..20402436b5 100644 --- a/Dockerfile.aarch64 +++ b/Dockerfile.aarch64 @@ -191,7 +191,7 @@ RUN set -x \ && rm -rf "$GOPATH" # Install containerd -ENV CONTAINERD_COMMIT 57b7c3da915ebe943bd304c00890959b191e5264 +ENV CONTAINERD_COMMIT cf554d59dd96e459544748290eb9167f4bcde509 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ diff --git a/Dockerfile.armhf b/Dockerfile.armhf index ac3b7f51b7..3147de7191 100644 --- a/Dockerfile.armhf +++ b/Dockerfile.armhf @@ -200,7 +200,7 @@ RUN set -x \ && rm -rf "$GOPATH" # Install containerd -ENV CONTAINERD_COMMIT 57b7c3da915ebe943bd304c00890959b191e5264 +ENV CONTAINERD_COMMIT cf554d59dd96e459544748290eb9167f4bcde509 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ diff --git a/Dockerfile.gccgo b/Dockerfile.gccgo index 458fd420d4..221418e3c4 100644 --- a/Dockerfile.gccgo +++ b/Dockerfile.gccgo @@ -85,7 +85,7 @@ RUN set -x \ && rm -rf "$GOPATH" # Install containerd -ENV CONTAINERD_COMMIT 57b7c3da915ebe943bd304c00890959b191e5264 +ENV CONTAINERD_COMMIT cf554d59dd96e459544748290eb9167f4bcde509 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ diff --git a/Dockerfile.ppc64le b/Dockerfile.ppc64le index 6240bb9509..b09be972ec 100644 --- a/Dockerfile.ppc64le +++ b/Dockerfile.ppc64le @@ -215,7 +215,7 @@ RUN set -x \ && rm -rf "$GOPATH" # Install containerd -ENV CONTAINERD_COMMIT 57b7c3da915ebe943bd304c00890959b191e5264 +ENV CONTAINERD_COMMIT cf554d59dd96e459544748290eb9167f4bcde509 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ diff --git a/Dockerfile.s390x b/Dockerfile.s390x index 7852a96f9a..5e7c6e7df1 100644 --- a/Dockerfile.s390x +++ b/Dockerfile.s390x @@ -208,7 +208,7 @@ RUN set -x \ && rm -rf "$GOPATH" # Install containerd -ENV CONTAINERD_COMMIT 57b7c3da915ebe943bd304c00890959b191e5264 +ENV CONTAINERD_COMMIT cf554d59dd96e459544748290eb9167f4bcde509 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ diff --git a/Dockerfile.simple b/Dockerfile.simple index 61f6a1b5f2..106b49cfc1 100644 --- a/Dockerfile.simple +++ b/Dockerfile.simple @@ -68,7 +68,7 @@ RUN set -x \ && rm -rf "$GOPATH" # Install containerd -ENV CONTAINERD_COMMIT 57b7c3da915ebe943bd304c00890959b191e5264 +ENV CONTAINERD_COMMIT cf554d59dd96e459544748290eb9167f4bcde509 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ diff --git a/hack/vendor.sh b/hack/vendor.sh index 570d187110..7e9a47db4a 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -56,7 +56,7 @@ clone git github.com/mattn/go-sqlite3 v1.1.0 clone git github.com/tchap/go-patricia v2.1.0 clone git github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 # forked golang.org/x/net package includes a patch for lazy loading trace templates -clone git golang.org/x/net 78cb2c067747f08b343f20614155233ab4ea2ad3 https://github.com/tonistiigi/net.git +clone git golang.org/x/net 2beffdc2e92c8a3027590f898fe88f69af48a3f8 https://github.com/tonistiigi/net.git clone git golang.org/x/sys eb2c74142fd19a79b3f237334c7384d5167b1b46 https://github.com/golang/sys.git clone git github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3 clone git github.com/docker/go-connections v0.2.0 @@ -97,7 +97,7 @@ clone git github.com/pborman/uuid v1.0 # get desired notary commit, might also need to be updated in Dockerfile clone git github.com/docker/notary v0.3.0 -clone git google.golang.org/grpc a22b6611561e9f0a3e0919690dd2caf48f14c517 https://github.com/grpc/grpc-go.git +clone git google.golang.org/grpc ab0be5212fb225475f2087566eded7da5d727960 https://github.com/grpc/grpc-go.git clone git github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f clone git github.com/docker/go v1.5.1-1-1-gbaf439e clone git github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c @@ -109,7 +109,7 @@ clone git github.com/seccomp/libseccomp-golang 60c9953736798c4a04e90d0f3da2f933d clone git github.com/coreos/go-systemd v4 clone git github.com/godbus/dbus v4.0.0 clone git github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852 -clone git github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 +clone git github.com/golang/protobuf 3c84672111d91bb5ac31719e112f9f7126a0e26e # gelf logging driver deps clone git github.com/Graylog2/go-gelf aab2f594e4585d43468ac57287b0dece9d806883 @@ -136,7 +136,7 @@ clone git google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0 https clone git github.com/docker/docker-credential-helpers v0.3.0 # containerd -clone git github.com/docker/containerd 57b7c3da915ebe943bd304c00890959b191e5264 +clone git github.com/docker/containerd cf554d59dd96e459544748290eb9167f4bcde509 # cli clone git github.com/spf13/cobra 75205f23b3ea70dc7ae5e900d074e010c23c37e9 https://github.com/dnephin/cobra.git diff --git a/vendor/src/github.com/docker/containerd/api/grpc/types/api.pb.go b/vendor/src/github.com/docker/containerd/api/grpc/types/api.pb.go index 3ce61dbba6..2013db1f50 100644 --- a/vendor/src/github.com/docker/containerd/api/grpc/types/api.pb.go +++ b/vendor/src/github.com/docker/containerd/api/grpc/types/api.pb.go @@ -115,14 +115,17 @@ func (*UpdateProcessResponse) ProtoMessage() {} func (*UpdateProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } type CreateContainerRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"` - Checkpoint string `protobuf:"bytes,3,opt,name=checkpoint" json:"checkpoint,omitempty"` - Stdin string `protobuf:"bytes,4,opt,name=stdin" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,5,opt,name=stdout" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,6,opt,name=stderr" json:"stderr,omitempty"` - Labels []string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty"` - NoPivotRoot bool `protobuf:"varint,8,opt,name=noPivotRoot" json:"noPivotRoot,omitempty"` + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"` + Checkpoint string `protobuf:"bytes,3,opt,name=checkpoint" json:"checkpoint,omitempty"` + Stdin string `protobuf:"bytes,4,opt,name=stdin" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,5,opt,name=stdout" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,6,opt,name=stderr" json:"stderr,omitempty"` + Labels []string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty"` + NoPivotRoot bool `protobuf:"varint,8,opt,name=noPivotRoot" json:"noPivotRoot,omitempty"` + Runtime string `protobuf:"bytes,9,opt,name=runtime" json:"runtime,omitempty"` + RuntimeArgs []string `protobuf:"bytes,10,rep,name=runtimeArgs" json:"runtimeArgs,omitempty"` + CheckpointDir string `protobuf:"bytes,11,opt,name=checkpointDir" json:"checkpointDir,omitempty"` } func (m *CreateContainerRequest) Reset() { *m = CreateContainerRequest{} } @@ -233,8 +236,9 @@ func (*AddProcessResponse) ProtoMessage() {} func (*AddProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } type CreateCheckpointRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - Checkpoint *Checkpoint `protobuf:"bytes,2,opt,name=checkpoint" json:"checkpoint,omitempty"` + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Checkpoint *Checkpoint `protobuf:"bytes,2,opt,name=checkpoint" json:"checkpoint,omitempty"` + CheckpointDir string `protobuf:"bytes,3,opt,name=checkpointDir" json:"checkpointDir,omitempty"` } func (m *CreateCheckpointRequest) Reset() { *m = CreateCheckpointRequest{} } @@ -258,8 +262,9 @@ func (*CreateCheckpointResponse) ProtoMessage() {} func (*CreateCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } type DeleteCheckpointRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + CheckpointDir string `protobuf:"bytes,3,opt,name=checkpointDir" json:"checkpointDir,omitempty"` } func (m *DeleteCheckpointRequest) Reset() { *m = DeleteCheckpointRequest{} } @@ -276,7 +281,8 @@ func (*DeleteCheckpointResponse) ProtoMessage() {} func (*DeleteCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } type ListCheckpointRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + CheckpointDir string `protobuf:"bytes,2,opt,name=checkpointDir" json:"checkpointDir,omitempty"` } func (m *ListCheckpointRequest) Reset() { *m = ListCheckpointRequest{} } @@ -446,16 +452,17 @@ func (m *UpdateContainerRequest) GetResources() *UpdateResource { } type UpdateResource struct { - BlkioWeight uint32 `protobuf:"varint,1,opt,name=blkioWeight" json:"blkioWeight,omitempty"` - CpuShares uint32 `protobuf:"varint,2,opt,name=cpuShares" json:"cpuShares,omitempty"` - CpuPeriod uint32 `protobuf:"varint,3,opt,name=cpuPeriod" json:"cpuPeriod,omitempty"` - CpuQuota uint32 `protobuf:"varint,4,opt,name=cpuQuota" json:"cpuQuota,omitempty"` - CpusetCpus string `protobuf:"bytes,5,opt,name=cpusetCpus" json:"cpusetCpus,omitempty"` - CpusetMems string `protobuf:"bytes,6,opt,name=cpusetMems" json:"cpusetMems,omitempty"` - MemoryLimit uint32 `protobuf:"varint,7,opt,name=memoryLimit" json:"memoryLimit,omitempty"` - MemorySwap uint32 `protobuf:"varint,8,opt,name=memorySwap" json:"memorySwap,omitempty"` - MemoryReservation uint32 `protobuf:"varint,9,opt,name=memoryReservation" json:"memoryReservation,omitempty"` - KernelMemoryLimit uint32 `protobuf:"varint,10,opt,name=kernelMemoryLimit" json:"kernelMemoryLimit,omitempty"` + BlkioWeight uint32 `protobuf:"varint,1,opt,name=blkioWeight" json:"blkioWeight,omitempty"` + CpuShares uint32 `protobuf:"varint,2,opt,name=cpuShares" json:"cpuShares,omitempty"` + CpuPeriod uint32 `protobuf:"varint,3,opt,name=cpuPeriod" json:"cpuPeriod,omitempty"` + CpuQuota uint32 `protobuf:"varint,4,opt,name=cpuQuota" json:"cpuQuota,omitempty"` + CpusetCpus string `protobuf:"bytes,5,opt,name=cpusetCpus" json:"cpusetCpus,omitempty"` + CpusetMems string `protobuf:"bytes,6,opt,name=cpusetMems" json:"cpusetMems,omitempty"` + MemoryLimit uint32 `protobuf:"varint,7,opt,name=memoryLimit" json:"memoryLimit,omitempty"` + MemorySwap uint32 `protobuf:"varint,8,opt,name=memorySwap" json:"memorySwap,omitempty"` + MemoryReservation uint32 `protobuf:"varint,9,opt,name=memoryReservation" json:"memoryReservation,omitempty"` + KernelMemoryLimit uint32 `protobuf:"varint,10,opt,name=kernelMemoryLimit" json:"kernelMemoryLimit,omitempty"` + KernelTCPMemoryLimit uint32 `protobuf:"varint,11,opt,name=kernelTCPMemoryLimit" json:"kernelTCPMemoryLimit,omitempty"` } func (m *UpdateResource) Reset() { *m = UpdateResource{} } @@ -495,14 +502,14 @@ func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } type NetworkStats struct { Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes" json:"rx_bytes,omitempty"` - Rx_Packets uint64 `protobuf:"varint,3,opt,name=rx_Packets" json:"rx_Packets,omitempty"` - RxErrors uint64 `protobuf:"varint,4,opt,name=Rx_errors" json:"Rx_errors,omitempty"` - RxDropped uint64 `protobuf:"varint,5,opt,name=Rx_dropped" json:"Rx_dropped,omitempty"` - TxBytes uint64 `protobuf:"varint,6,opt,name=Tx_bytes" json:"Tx_bytes,omitempty"` - TxPackets uint64 `protobuf:"varint,7,opt,name=Tx_packets" json:"Tx_packets,omitempty"` - TxErrors uint64 `protobuf:"varint,8,opt,name=Tx_errors" json:"Tx_errors,omitempty"` - TxDropped uint64 `protobuf:"varint,9,opt,name=Tx_dropped" json:"Tx_dropped,omitempty"` + RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes" json:"rx_bytes,omitempty"` + Rx_Packets uint64 `protobuf:"varint,3,opt,name=rx_Packets,json=rxPackets" json:"rx_Packets,omitempty"` + RxErrors uint64 `protobuf:"varint,4,opt,name=Rx_errors,json=rxErrors" json:"Rx_errors,omitempty"` + RxDropped uint64 `protobuf:"varint,5,opt,name=Rx_dropped,json=rxDropped" json:"Rx_dropped,omitempty"` + TxBytes uint64 `protobuf:"varint,6,opt,name=Tx_bytes,json=txBytes" json:"Tx_bytes,omitempty"` + TxPackets uint64 `protobuf:"varint,7,opt,name=Tx_packets,json=txPackets" json:"Tx_packets,omitempty"` + TxErrors uint64 `protobuf:"varint,8,opt,name=Tx_errors,json=txErrors" json:"Tx_errors,omitempty"` + TxDropped uint64 `protobuf:"varint,9,opt,name=Tx_dropped,json=txDropped" json:"Tx_dropped,omitempty"` } func (m *NetworkStats) Reset() { *m = NetworkStats{} } @@ -511,10 +518,10 @@ func (*NetworkStats) ProtoMessage() {} func (*NetworkStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } type CpuUsage struct { - TotalUsage uint64 `protobuf:"varint,1,opt,name=total_usage" json:"total_usage,omitempty"` - PercpuUsage []uint64 `protobuf:"varint,2,rep,name=percpu_usage" json:"percpu_usage,omitempty"` - UsageInKernelmode uint64 `protobuf:"varint,3,opt,name=usage_in_kernelmode" json:"usage_in_kernelmode,omitempty"` - UsageInUsermode uint64 `protobuf:"varint,4,opt,name=usage_in_usermode" json:"usage_in_usermode,omitempty"` + TotalUsage uint64 `protobuf:"varint,1,opt,name=total_usage,json=totalUsage" json:"total_usage,omitempty"` + PercpuUsage []uint64 `protobuf:"varint,2,rep,name=percpu_usage,json=percpuUsage" json:"percpu_usage,omitempty"` + UsageInKernelmode uint64 `protobuf:"varint,3,opt,name=usage_in_kernelmode,json=usageInKernelmode" json:"usage_in_kernelmode,omitempty"` + UsageInUsermode uint64 `protobuf:"varint,4,opt,name=usage_in_usermode,json=usageInUsermode" json:"usage_in_usermode,omitempty"` } func (m *CpuUsage) Reset() { *m = CpuUsage{} } @@ -524,8 +531,8 @@ func (*CpuUsage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31 type ThrottlingData struct { Periods uint64 `protobuf:"varint,1,opt,name=periods" json:"periods,omitempty"` - ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods" json:"throttled_periods,omitempty"` - ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time" json:"throttled_time,omitempty"` + ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods" json:"throttled_periods,omitempty"` + ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"` } func (m *ThrottlingData) Reset() { *m = ThrottlingData{} } @@ -534,9 +541,9 @@ func (*ThrottlingData) ProtoMessage() {} func (*ThrottlingData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } type CpuStats struct { - CpuUsage *CpuUsage `protobuf:"bytes,1,opt,name=cpu_usage" json:"cpu_usage,omitempty"` - ThrottlingData *ThrottlingData `protobuf:"bytes,2,opt,name=throttling_data" json:"throttling_data,omitempty"` - SystemUsage uint64 `protobuf:"varint,3,opt,name=system_usage" json:"system_usage,omitempty"` + CpuUsage *CpuUsage `protobuf:"bytes,1,opt,name=cpu_usage,json=cpuUsage" json:"cpu_usage,omitempty"` + ThrottlingData *ThrottlingData `protobuf:"bytes,2,opt,name=throttling_data,json=throttlingData" json:"throttling_data,omitempty"` + SystemUsage uint64 `protobuf:"varint,3,opt,name=system_usage,json=systemUsage" json:"system_usage,omitempty"` } func (m *CpuStats) Reset() { *m = CpuStats{} } @@ -570,7 +577,7 @@ func (*PidsStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3 type MemoryData struct { Usage uint64 `protobuf:"varint,1,opt,name=usage" json:"usage,omitempty"` - MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage" json:"max_usage,omitempty"` + MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage,json=maxUsage" json:"max_usage,omitempty"` Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt" json:"failcnt,omitempty"` Limit uint64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"` } @@ -583,8 +590,8 @@ func (*MemoryData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{ type MemoryStats struct { Cache uint64 `protobuf:"varint,1,opt,name=cache" json:"cache,omitempty"` Usage *MemoryData `protobuf:"bytes,2,opt,name=usage" json:"usage,omitempty"` - SwapUsage *MemoryData `protobuf:"bytes,3,opt,name=swap_usage" json:"swap_usage,omitempty"` - KernelUsage *MemoryData `protobuf:"bytes,4,opt,name=kernel_usage" json:"kernel_usage,omitempty"` + SwapUsage *MemoryData `protobuf:"bytes,3,opt,name=swap_usage,json=swapUsage" json:"swap_usage,omitempty"` + KernelUsage *MemoryData `protobuf:"bytes,4,opt,name=kernel_usage,json=kernelUsage" json:"kernel_usage,omitempty"` Stats map[string]uint64 `protobuf:"bytes,5,rep,name=stats" json:"stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` } @@ -634,14 +641,14 @@ func (*BlkioStatsEntry) ProtoMessage() {} func (*BlkioStatsEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } type BlkioStats struct { - IoServiceBytesRecursive []*BlkioStatsEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive" json:"io_service_bytes_recursive,omitempty"` - IoServicedRecursive []*BlkioStatsEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive" json:"io_serviced_recursive,omitempty"` - IoQueuedRecursive []*BlkioStatsEntry `protobuf:"bytes,3,rep,name=io_queued_recursive" json:"io_queued_recursive,omitempty"` - IoServiceTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive" json:"io_service_time_recursive,omitempty"` - IoWaitTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive" json:"io_wait_time_recursive,omitempty"` - IoMergedRecursive []*BlkioStatsEntry `protobuf:"bytes,6,rep,name=io_merged_recursive" json:"io_merged_recursive,omitempty"` - IoTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,7,rep,name=io_time_recursive" json:"io_time_recursive,omitempty"` - SectorsRecursive []*BlkioStatsEntry `protobuf:"bytes,8,rep,name=sectors_recursive" json:"sectors_recursive,omitempty"` + IoServiceBytesRecursive []*BlkioStatsEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive" json:"io_service_bytes_recursive,omitempty"` + IoServicedRecursive []*BlkioStatsEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive" json:"io_serviced_recursive,omitempty"` + IoQueuedRecursive []*BlkioStatsEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive" json:"io_queued_recursive,omitempty"` + IoServiceTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive" json:"io_service_time_recursive,omitempty"` + IoWaitTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive" json:"io_wait_time_recursive,omitempty"` + IoMergedRecursive []*BlkioStatsEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive" json:"io_merged_recursive,omitempty"` + IoTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive" json:"io_time_recursive,omitempty"` + SectorsRecursive []*BlkioStatsEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive" json:"sectors_recursive,omitempty"` } func (m *BlkioStats) Reset() { *m = BlkioStats{} } @@ -707,7 +714,7 @@ func (m *BlkioStats) GetSectorsRecursive() []*BlkioStatsEntry { type HugetlbStats struct { Usage uint64 `protobuf:"varint,1,opt,name=usage" json:"usage,omitempty"` - MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage" json:"max_usage,omitempty"` + MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage,json=maxUsage" json:"max_usage,omitempty"` Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt" json:"failcnt,omitempty"` Limit uint64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"` } @@ -718,11 +725,11 @@ func (*HugetlbStats) ProtoMessage() {} func (*HugetlbStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } type CgroupStats struct { - CpuStats *CpuStats `protobuf:"bytes,1,opt,name=cpu_stats" json:"cpu_stats,omitempty"` - MemoryStats *MemoryStats `protobuf:"bytes,2,opt,name=memory_stats" json:"memory_stats,omitempty"` - BlkioStats *BlkioStats `protobuf:"bytes,3,opt,name=blkio_stats" json:"blkio_stats,omitempty"` - HugetlbStats map[string]*HugetlbStats `protobuf:"bytes,4,rep,name=hugetlb_stats" json:"hugetlb_stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - PidsStats *PidsStats `protobuf:"bytes,5,opt,name=pids_stats" json:"pids_stats,omitempty"` + CpuStats *CpuStats `protobuf:"bytes,1,opt,name=cpu_stats,json=cpuStats" json:"cpu_stats,omitempty"` + MemoryStats *MemoryStats `protobuf:"bytes,2,opt,name=memory_stats,json=memoryStats" json:"memory_stats,omitempty"` + BlkioStats *BlkioStats `protobuf:"bytes,3,opt,name=blkio_stats,json=blkioStats" json:"blkio_stats,omitempty"` + HugetlbStats map[string]*HugetlbStats `protobuf:"bytes,4,rep,name=hugetlb_stats,json=hugetlbStats" json:"hugetlb_stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + PidsStats *PidsStats `protobuf:"bytes,5,opt,name=pids_stats,json=pidsStats" json:"pids_stats,omitempty"` } func (m *CgroupStats) Reset() { *m = CgroupStats{} } @@ -766,8 +773,8 @@ func (m *CgroupStats) GetPidsStats() *PidsStats { } type StatsResponse struct { - NetworkStats []*NetworkStats `protobuf:"bytes,1,rep,name=network_stats" json:"network_stats,omitempty"` - CgroupStats *CgroupStats `protobuf:"bytes,2,opt,name=cgroup_stats" json:"cgroup_stats,omitempty"` + NetworkStats []*NetworkStats `protobuf:"bytes,1,rep,name=network_stats,json=networkStats" json:"network_stats,omitempty"` + CgroupStats *CgroupStats `protobuf:"bytes,2,opt,name=cgroup_stats,json=cgroupStats" json:"cgroup_stats,omitempty"` Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp" json:"timestamp,omitempty"` } @@ -849,6 +856,10 @@ func init() { var _ context.Context var _ grpc.ClientConn +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion2 + // Client API for API service type APIClient interface { @@ -1026,124 +1037,184 @@ func RegisterAPIServer(s *grpc.Server, srv APIServer) { s.RegisterService(&_API_serviceDesc, srv) } -func _API_GetServerVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _API_GetServerVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetServerVersionRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(APIServer).GetServerVersion(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(APIServer).GetServerVersion(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/GetServerVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).GetServerVersion(ctx, req.(*GetServerVersionRequest)) + } + return interceptor(ctx, in, info, handler) } -func _API_CreateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _API_CreateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateContainerRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(APIServer).CreateContainer(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(APIServer).CreateContainer(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/CreateContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).CreateContainer(ctx, req.(*CreateContainerRequest)) + } + return interceptor(ctx, in, info, handler) } -func _API_UpdateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _API_UpdateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UpdateContainerRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(APIServer).UpdateContainer(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(APIServer).UpdateContainer(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/UpdateContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).UpdateContainer(ctx, req.(*UpdateContainerRequest)) + } + return interceptor(ctx, in, info, handler) } -func _API_Signal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _API_Signal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SignalRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(APIServer).Signal(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(APIServer).Signal(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/Signal", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).Signal(ctx, req.(*SignalRequest)) + } + return interceptor(ctx, in, info, handler) } -func _API_UpdateProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _API_UpdateProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UpdateProcessRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(APIServer).UpdateProcess(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(APIServer).UpdateProcess(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/UpdateProcess", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).UpdateProcess(ctx, req.(*UpdateProcessRequest)) + } + return interceptor(ctx, in, info, handler) } -func _API_AddProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _API_AddProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(AddProcessRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(APIServer).AddProcess(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(APIServer).AddProcess(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/AddProcess", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).AddProcess(ctx, req.(*AddProcessRequest)) + } + return interceptor(ctx, in, info, handler) } -func _API_CreateCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _API_CreateCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateCheckpointRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(APIServer).CreateCheckpoint(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(APIServer).CreateCheckpoint(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/CreateCheckpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).CreateCheckpoint(ctx, req.(*CreateCheckpointRequest)) + } + return interceptor(ctx, in, info, handler) } -func _API_DeleteCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _API_DeleteCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DeleteCheckpointRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(APIServer).DeleteCheckpoint(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(APIServer).DeleteCheckpoint(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/DeleteCheckpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).DeleteCheckpoint(ctx, req.(*DeleteCheckpointRequest)) + } + return interceptor(ctx, in, info, handler) } -func _API_ListCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _API_ListCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListCheckpointRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(APIServer).ListCheckpoint(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(APIServer).ListCheckpoint(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/ListCheckpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).ListCheckpoint(ctx, req.(*ListCheckpointRequest)) + } + return interceptor(ctx, in, info, handler) } -func _API_State_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _API_State_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StateRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(APIServer).State(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(APIServer).State(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/State", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).State(ctx, req.(*StateRequest)) + } + return interceptor(ctx, in, info, handler) } func _API_Events_Handler(srv interface{}, stream grpc.ServerStream) error { @@ -1167,16 +1238,22 @@ func (x *aPIEventsServer) Send(m *Event) error { return x.ServerStream.SendMsg(m) } -func _API_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _API_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StatsRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(APIServer).Stats(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(APIServer).Stats(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/Stats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).Stats(ctx, req.(*StatsRequest)) + } + return interceptor(ctx, in, info, handler) } var _API_serviceDesc = grpc.ServiceDesc{ @@ -1238,148 +1315,152 @@ var _API_serviceDesc = grpc.ServiceDesc{ } var fileDescriptor0 = []byte{ - // 2285 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x19, 0xcb, 0x72, 0x1c, 0x49, - 0xd1, 0xf3, 0x94, 0x26, 0xe7, 0x21, 0xa9, 0xfd, 0xd0, 0x78, 0x76, 0xed, 0x35, 0x1d, 0xc0, 0x1a, - 0x58, 0x84, 0x91, 0x77, 0x03, 0x07, 0x04, 0x44, 0xac, 0x65, 0xb3, 0x98, 0xb5, 0x16, 0xb9, 0x25, - 0xb1, 0x17, 0x22, 0x26, 0x5a, 0x33, 0xe5, 0x99, 0x46, 0x33, 0xdd, 0xbd, 0xdd, 0x35, 0xd2, 0xe8, - 0xc2, 0x11, 0x6e, 0xfc, 0x00, 0x11, 0x5c, 0xb8, 0x71, 0xe7, 0xc0, 0x17, 0xf0, 0x27, 0xc4, 0x5e, - 0xb8, 0x73, 0x24, 0xab, 0x32, 0xbb, 0xba, 0x7a, 0x1e, 0xd2, 0x72, 0x20, 0xb8, 0xec, 0x65, 0xa2, - 0x32, 0x2b, 0x2b, 0x33, 0x2b, 0xdf, 0x5d, 0x03, 0x0d, 0x3f, 0x0e, 0xf6, 0xe2, 0x24, 0x92, 0x91, - 0x53, 0x93, 0x57, 0xb1, 0x48, 0xdd, 0xfb, 0xb0, 0xfb, 0x89, 0x90, 0xc7, 0x22, 0xb9, 0x10, 0xc9, - 0xaf, 0x45, 0x92, 0x06, 0x51, 0xe8, 0x89, 0x2f, 0x66, 0x22, 0x95, 0xee, 0x1c, 0xba, 0xcb, 0x5b, - 0x69, 0x1c, 0x85, 0xa9, 0x70, 0xee, 0x40, 0x6d, 0xea, 0xff, 0x36, 0x4a, 0xba, 0xa5, 0x47, 0xa5, - 0xc7, 0x6d, 0x8f, 0x00, 0x8d, 0x0d, 0x42, 0xc4, 0x96, 0x19, 0xab, 0x00, 0x85, 0x8d, 0x7d, 0x39, - 0x18, 0x77, 0x2b, 0x84, 0xd5, 0x80, 0xd3, 0x83, 0xcd, 0x44, 0x5c, 0x04, 0x8a, 0x6b, 0xb7, 0x8a, - 0x1b, 0x0d, 0xcf, 0xc0, 0xee, 0xef, 0x4b, 0x70, 0xe7, 0x34, 0x1e, 0xfa, 0x52, 0x1c, 0x25, 0xd1, - 0x40, 0xa4, 0x29, 0xab, 0xe4, 0x74, 0xa0, 0x1c, 0x0c, 0xb5, 0xcc, 0x86, 0x87, 0x2b, 0x67, 0x1b, - 0x2a, 0x31, 0x22, 0xca, 0x1a, 0xa1, 0x96, 0xce, 0x43, 0x80, 0xc1, 0x24, 0x4a, 0xc5, 0xb1, 0x1c, - 0x06, 0xa1, 0x96, 0xb8, 0xe9, 0x59, 0x18, 0xa5, 0xcc, 0x65, 0x30, 0x94, 0x63, 0x2d, 0x13, 0x95, - 0xd1, 0x80, 0x73, 0x0f, 0xea, 0x63, 0x11, 0x8c, 0xc6, 0xb2, 0x5b, 0xd3, 0x68, 0x86, 0xdc, 0x5d, - 0xb8, 0xbb, 0xa0, 0x07, 0xdd, 0xdf, 0xfd, 0xb2, 0x04, 0xf7, 0x0e, 0x12, 0x81, 0x3b, 0x07, 0x51, - 0x28, 0xfd, 0x20, 0x14, 0xc9, 0x3a, 0x1d, 0x51, 0xa3, 0xb3, 0x59, 0x38, 0x9c, 0x88, 0x23, 0x1f, - 0xc5, 0x92, 0xaa, 0x16, 0x46, 0x6b, 0x3c, 0x16, 0x83, 0xf3, 0x38, 0x0a, 0x42, 0xa9, 0x35, 0xc6, - 0xfd, 0x1c, 0xa3, 0x34, 0x4e, 0xf5, 0x65, 0xc8, 0x4a, 0x04, 0x28, 0x8d, 0x71, 0x11, 0xcd, 0x48, - 0xe3, 0x86, 0xc7, 0x10, 0xe3, 0x45, 0x92, 0x74, 0xeb, 0x06, 0x8f, 0x90, 0xc2, 0x4f, 0xfc, 0x33, - 0x31, 0x49, 0xbb, 0x1b, 0x8f, 0x2a, 0x0a, 0x4f, 0x90, 0xf3, 0x08, 0x9a, 0x61, 0x74, 0x14, 0x5c, - 0x44, 0xd2, 0x8b, 0x22, 0xd9, 0xdd, 0xd4, 0x06, 0xb3, 0x51, 0xee, 0x2b, 0xd8, 0x5d, 0xba, 0x29, - 0x47, 0xc1, 0x1e, 0x34, 0x06, 0x19, 0x52, 0xdf, 0xb8, 0xb9, 0xbf, 0xbd, 0xa7, 0xe3, 0x6a, 0x2f, - 0x27, 0xce, 0x49, 0x90, 0x55, 0xfb, 0x38, 0x18, 0x85, 0xfe, 0xe4, 0xab, 0xfb, 0x53, 0xdd, 0x47, - 0x1f, 0xe1, 0xe8, 0x61, 0xc8, 0xdd, 0x86, 0x4e, 0xc6, 0x8a, 0x5d, 0xf2, 0xb7, 0x0a, 0xec, 0x7c, - 0x3c, 0x1c, 0xde, 0x10, 0x31, 0x18, 0x76, 0x52, 0x24, 0x18, 0x98, 0xc8, 0xb1, 0xac, 0x2f, 0x6b, - 0x60, 0xe7, 0x3d, 0xa8, 0xce, 0x52, 0xbc, 0x49, 0x45, 0xdf, 0xa4, 0xc9, 0x37, 0x39, 0x45, 0x94, - 0xa7, 0x37, 0x1c, 0x07, 0xaa, 0x7e, 0x32, 0x4a, 0xd1, 0x13, 0xca, 0x84, 0x7a, 0xad, 0x54, 0x16, - 0xe1, 0x05, 0x7a, 0x41, 0xa1, 0xd4, 0x52, 0x61, 0x06, 0x97, 0x43, 0xb6, 0xbf, 0x5a, 0x66, 0xd7, - 0xda, 0xc8, 0xaf, 0x65, 0x9c, 0xba, 0xb9, 0xda, 0xa9, 0x8d, 0x35, 0x4e, 0x85, 0x82, 0x53, 0x5d, - 0x68, 0x0d, 0xfc, 0xd8, 0x3f, 0x0b, 0x26, 0x81, 0x0c, 0x44, 0xda, 0x6d, 0x6a, 0x25, 0x0a, 0x38, - 0xe7, 0x31, 0x6c, 0xf9, 0x71, 0xec, 0x27, 0xd3, 0x28, 0x41, 0xd3, 0xbc, 0x0d, 0x26, 0xa2, 0xdb, - 0xd2, 0x4c, 0x16, 0xd1, 0x8a, 0x5b, 0x2a, 0x26, 0x41, 0x38, 0x9b, 0xbf, 0x56, 0xb1, 0xd1, 0x6d, - 0x6b, 0xb2, 0x02, 0x4e, 0x71, 0x0b, 0xa3, 0xcf, 0xc4, 0xe5, 0x51, 0x12, 0x5c, 0xe0, 0x99, 0x11, - 0x0a, 0xed, 0x68, 0x2b, 0x2e, 0xa2, 0x9d, 0xf7, 0x61, 0x23, 0x99, 0x04, 0xd3, 0x40, 0xa6, 0xdd, - 0x2d, 0x54, 0xab, 0xb9, 0xdf, 0x66, 0x7b, 0x7a, 0x1a, 0xeb, 0x65, 0xbb, 0xee, 0x0b, 0xa8, 0x13, - 0x4a, 0x99, 0x57, 0x91, 0xb0, 0xb7, 0xf4, 0x5a, 0xe1, 0xd2, 0xe8, 0xad, 0xd4, 0xbe, 0xaa, 0x7a, - 0x7a, 0xad, 0x70, 0x63, 0x3f, 0x19, 0x6a, 0x3f, 0x21, 0x4e, 0xad, 0x5d, 0x0f, 0xaa, 0xca, 0x51, - 0xca, 0xd4, 0x33, 0x76, 0x78, 0xdb, 0x53, 0x4b, 0x85, 0x19, 0x71, 0x4c, 0x21, 0x06, 0x97, 0xce, - 0xb7, 0xa1, 0xe3, 0x0f, 0x87, 0x68, 0x9e, 0x08, 0xbd, 0xfe, 0x49, 0x30, 0x4c, 0x91, 0x53, 0x05, - 0x37, 0x17, 0xb0, 0xee, 0x1d, 0x70, 0xec, 0x80, 0xe2, 0x38, 0xfb, 0x8d, 0xc9, 0x07, 0x93, 0xa3, - 0xeb, 0x82, 0xed, 0x87, 0x85, 0xd4, 0x2e, 0xeb, 0xb0, 0xda, 0xc9, 0x12, 0x24, 0x3f, 0x6d, 0x11, - 0xb9, 0x3d, 0xe8, 0x2e, 0x73, 0x67, 0xc9, 0x3f, 0x85, 0xdd, 0x17, 0x62, 0x22, 0xbe, 0x8a, 0x64, - 0x34, 0x51, 0xe8, 0x4f, 0x05, 0x67, 0x92, 0x5e, 0x2b, 0xd6, 0xcb, 0xc7, 0x99, 0xf5, 0xfb, 0x70, - 0xf7, 0x75, 0x90, 0xca, 0x1b, 0x19, 0xbb, 0xbf, 0x03, 0xc8, 0x89, 0x8c, 0x98, 0x52, 0x2e, 0x46, - 0xe1, 0xc4, 0x3c, 0x90, 0x9c, 0x5d, 0x7a, 0xad, 0x7c, 0x20, 0x07, 0x31, 0x97, 0x63, 0xb5, 0x54, - 0x75, 0x67, 0x16, 0x06, 0xf3, 0xe3, 0x68, 0x70, 0x2e, 0x64, 0xaa, 0x6b, 0x1b, 0xd6, 0x1d, 0x0b, - 0xa5, 0x53, 0x64, 0x2c, 0x26, 0x13, 0x5d, 0xe0, 0x36, 0x3d, 0x02, 0xdc, 0x43, 0xb8, 0xb7, 0xa8, - 0x28, 0x17, 0xa3, 0xa7, 0xd0, 0xcc, 0xed, 0x98, 0xa2, 0x4a, 0x95, 0xd5, 0xd6, 0xb6, 0xa9, 0xdc, - 0x87, 0xd0, 0x3a, 0x96, 0x68, 0xed, 0x75, 0xd7, 0x7d, 0x0c, 0x1d, 0x53, 0xc9, 0x34, 0x21, 0xe5, - 0xa2, 0x2f, 0x67, 0x29, 0x53, 0x31, 0xe4, 0xfe, 0xbd, 0x02, 0x1b, 0x1c, 0x2a, 0x59, 0xbe, 0x97, - 0xf2, 0x7c, 0xff, 0xbf, 0x94, 0x9d, 0x77, 0xa1, 0x91, 0x5e, 0xa5, 0x52, 0x4c, 0x8f, 0xb8, 0xf8, - 0xb4, 0xbd, 0x1c, 0xf1, 0x75, 0x09, 0xca, 0x4b, 0xd0, 0x3f, 0x4a, 0xd0, 0x30, 0x6e, 0xfe, 0xaf, - 0x1b, 0xf8, 0x07, 0xd0, 0x88, 0xc9, 0xf1, 0x82, 0x2a, 0x49, 0x73, 0xbf, 0xc3, 0x82, 0xb2, 0xda, - 0x91, 0x13, 0x58, 0xf1, 0x53, 0xb5, 0xe3, 0xc7, 0x6a, 0xd0, 0xb5, 0x42, 0x83, 0x46, 0xe7, 0xc7, - 0xaa, 0x44, 0xd5, 0x75, 0x89, 0xd2, 0x6b, 0xa7, 0x8b, 0x17, 0x9b, 0x85, 0x32, 0xc0, 0xcc, 0xa3, - 0x9e, 0x92, 0x81, 0xee, 0x47, 0xb0, 0x71, 0xe8, 0x0f, 0xc6, 0x78, 0x0f, 0x75, 0x70, 0x10, 0x73, - 0x98, 0xe2, 0x41, 0xb5, 0x56, 0x42, 0xa6, 0x02, 0xed, 0x7d, 0xc5, 0xf5, 0x94, 0x21, 0xf7, 0x1c, - 0x1b, 0x33, 0xa5, 0x01, 0x27, 0xd3, 0x13, 0xac, 0x5c, 0x99, 0x41, 0xb2, 0x5c, 0x5a, 0x6e, 0xed, - 0x16, 0x0d, 0xba, 0x65, 0x63, 0x4a, 0x92, 0xb9, 0xd0, 0x65, 0x36, 0x60, 0x7d, 0xbc, 0x6c, 0xdb, - 0xfd, 0x03, 0xce, 0x4e, 0x34, 0x55, 0xdd, 0x38, 0x3b, 0xad, 0x9e, 0x07, 0xc8, 0x7c, 0x95, 0x82, - 0xf9, 0x9e, 0x42, 0x23, 0x11, 0x69, 0x34, 0x4b, 0xd0, 0xcc, 0xda, 0xb2, 0xcd, 0xfd, 0xbb, 0x59, - 0x26, 0x69, 0x59, 0x1e, 0xef, 0x7a, 0x39, 0x9d, 0xfb, 0x65, 0x19, 0x3a, 0xc5, 0x5d, 0x55, 0x97, - 0xce, 0x26, 0xe7, 0x41, 0xf4, 0x39, 0x8d, 0x83, 0x64, 0x3c, 0x1b, 0xa5, 0xb2, 0x0a, 0x6d, 0x79, - 0x8c, 0x5d, 0x07, 0x25, 0x51, 0x57, 0xc9, 0x11, 0xbc, 0x7b, 0x24, 0x92, 0x20, 0x1a, 0xf2, 0xc8, - 0x92, 0x23, 0x54, 0x19, 0x40, 0xe0, 0xcd, 0x2c, 0x92, 0x3e, 0x0f, 0xa0, 0x06, 0xd6, 0x73, 0x20, - 0xfa, 0x48, 0xc8, 0x03, 0xe5, 0xb5, 0x1a, 0xcf, 0x81, 0x06, 0x93, 0xef, 0x1f, 0x8a, 0x69, 0xca, - 0x69, 0x6e, 0x61, 0x94, 0xe6, 0xe4, 0xcd, 0xd7, 0x2a, 0xa8, 0x39, 0xdf, 0x6d, 0x94, 0xe2, 0x40, - 0xe0, 0xf1, 0xa5, 0x1f, 0xeb, 0xb4, 0x6f, 0x7b, 0x16, 0x06, 0x03, 0x79, 0x87, 0x20, 0xb4, 0x06, - 0x4e, 0xfd, 0xbe, 0x6a, 0x85, 0xba, 0x0c, 0xb4, 0xbd, 0xe5, 0x0d, 0x45, 0x7d, 0x2e, 0x92, 0x50, - 0x4c, 0x0e, 0x2d, 0xa9, 0x40, 0xd4, 0x4b, 0x1b, 0xea, 0x3b, 0x63, 0xc9, 0xe7, 0xdc, 0x7b, 0xbe, - 0x0f, 0xed, 0x97, 0x17, 0x02, 0xab, 0x71, 0x16, 0x05, 0x68, 0x43, 0x15, 0xcc, 0xe8, 0xd9, 0x69, - 0xac, 0x3d, 0x50, 0xf5, 0x72, 0x84, 0x9b, 0x42, 0x4d, 0x93, 0xaf, 0x1c, 0x17, 0x28, 0x80, 0xca, - 0x26, 0x80, 0x8a, 0xe1, 0xd2, 0x36, 0xe1, 0xc2, 0x81, 0x55, 0xcd, 0x03, 0xab, 0x20, 0xb4, 0xb6, - 0x28, 0xf4, 0x8f, 0x65, 0x68, 0x7d, 0x26, 0xe4, 0x65, 0x94, 0x9c, 0xab, 0x44, 0x49, 0x57, 0x76, - 0xbe, 0xfb, 0xf8, 0x49, 0x33, 0xef, 0x9f, 0x5d, 0x49, 0x0e, 0x8c, 0x2a, 0xe6, 0xe5, 0xfc, 0xb9, - 0x02, 0x9d, 0x07, 0x00, 0xb8, 0x75, 0xe4, 0x53, 0xb7, 0xa3, 0xc1, 0xa5, 0x91, 0xcc, 0x19, 0xe1, - 0xbc, 0x03, 0x0d, 0x6f, 0xde, 0xc7, 0x7a, 0x1a, 0x25, 0x14, 0xbd, 0x55, 0xfc, 0x1a, 0x9a, 0xbf, - 0xd4, 0xb0, 0x3a, 0x8b, 0x9b, 0xc3, 0x24, 0x8a, 0x63, 0x31, 0xcc, 0x54, 0x4b, 0xe6, 0x2f, 0x08, - 0xa1, 0xa4, 0x9e, 0x64, 0x52, 0xeb, 0x24, 0x55, 0xe6, 0x52, 0x71, 0x2b, 0x66, 0xa9, 0x1b, 0x7c, - 0x29, 0x5b, 0xea, 0x89, 0x91, 0xba, 0x49, 0x52, 0xa5, 0x25, 0xf5, 0x24, 0x97, 0xda, 0xc8, 0xce, - 0xb2, 0x54, 0xf7, 0xaf, 0x25, 0xd8, 0xc4, 0xb0, 0x3c, 0x4d, 0xfd, 0x91, 0xc0, 0x0e, 0xd6, 0x94, - 0x18, 0xc2, 0x93, 0xfe, 0x4c, 0x81, 0xec, 0x32, 0xd0, 0x28, 0x22, 0xf8, 0x06, 0xb4, 0x62, 0x91, - 0x60, 0xb0, 0x32, 0x45, 0x19, 0x0b, 0x4a, 0xd5, 0x6b, 0x12, 0x8e, 0x48, 0xf6, 0xe0, 0xb6, 0xde, - 0xeb, 0x07, 0x61, 0x9f, 0xc2, 0x67, 0x1a, 0x0d, 0x05, 0x9b, 0x6a, 0x47, 0x6f, 0xbd, 0x0a, 0x3f, - 0x35, 0x1b, 0xce, 0x77, 0x61, 0xc7, 0xd0, 0xab, 0x2e, 0xa9, 0xa9, 0xc9, 0x74, 0x5b, 0x4c, 0x7d, - 0xca, 0x68, 0x1c, 0x5a, 0x3a, 0x27, 0x63, 0xfc, 0xea, 0x95, 0xd8, 0x46, 0x46, 0x2f, 0x7c, 0x4c, - 0x36, 0xac, 0xa0, 0xb1, 0x4e, 0xc9, 0x94, 0xb5, 0xcd, 0x40, 0xe7, 0x7b, 0xb0, 0x23, 0x89, 0x56, - 0x0c, 0xfb, 0x19, 0x0d, 0x79, 0x73, 0xdb, 0x6c, 0x1c, 0x31, 0xf1, 0xb7, 0xa0, 0x93, 0x13, 0xeb, - 0x7a, 0x4c, 0xfa, 0xb6, 0x0d, 0xf6, 0x44, 0x55, 0xe5, 0x3f, 0x91, 0xb1, 0x28, 0x72, 0x3e, 0xd0, - 0x15, 0xc2, 0x32, 0x55, 0x73, 0x7f, 0x2b, 0xab, 0xac, 0x6c, 0x0c, 0x5d, 0x15, 0xc8, 0x2c, 0x3f, - 0x83, 0x2d, 0x69, 0x54, 0xef, 0x63, 0x02, 0xf9, 0x5c, 0x5e, 0xb3, 0xea, 0x56, 0xbc, 0x98, 0xd7, - 0x91, 0xc5, 0x8b, 0xa2, 0xe5, 0xa9, 0xe5, 0xb3, 0x40, 0xd2, 0xaf, 0x49, 0x38, 0x2d, 0xc2, 0xfd, - 0x09, 0x34, 0x70, 0x1e, 0x48, 0x49, 0x3b, 0x34, 0xcc, 0x60, 0x96, 0x24, 0x98, 0x5f, 0x99, 0x61, - 0x18, 0x54, 0xf3, 0x82, 0x6e, 0x97, 0x6c, 0x0c, 0x02, 0xdc, 0x08, 0x80, 0xd2, 0x5c, 0x4b, 0x43, - 0x1a, 0x3b, 0x04, 0x08, 0x50, 0x71, 0x36, 0xf5, 0xe7, 0xc6, 0xf5, 0x3a, 0xce, 0x10, 0x41, 0x17, - 0x44, 0x81, 0x6f, 0xfd, 0x60, 0x32, 0xe0, 0x6f, 0x5f, 0x14, 0xc8, 0x60, 0x2e, 0xb0, 0x6a, 0x0b, - 0xfc, 0x4b, 0x19, 0x9a, 0x24, 0x91, 0x14, 0x46, 0xaa, 0x01, 0x36, 0x16, 0x23, 0x52, 0x03, 0xd8, - 0xfa, 0x6b, 0xb9, 0xb8, 0x7c, 0x0c, 0xcc, 0x55, 0xcd, 0x74, 0xc3, 0x46, 0x97, 0x62, 0xed, 0xb3, - 0xac, 0xb3, 0x92, 0xba, 0xa1, 0x88, 0x48, 0xe1, 0x0f, 0xa1, 0x45, 0xf1, 0xc9, 0x67, 0xaa, 0xeb, - 0xce, 0x34, 0x89, 0x8c, 0x4e, 0x3d, 0x55, 0xd3, 0x16, 0xea, 0xab, 0xbb, 0x7b, 0x73, 0xff, 0x41, - 0x81, 0x5c, 0xdf, 0x64, 0x4f, 0xff, 0xbe, 0x0c, 0x25, 0x96, 0x59, 0xa2, 0xed, 0x3d, 0x03, 0xc8, - 0x91, 0xaa, 0x66, 0x9d, 0x8b, 0xab, 0x6c, 0xaa, 0xc4, 0xa5, 0xba, 0xfb, 0x85, 0x3f, 0x99, 0x65, - 0x46, 0x25, 0xe0, 0xc7, 0xe5, 0x67, 0x25, 0x77, 0x00, 0x5b, 0xcf, 0x55, 0xcf, 0xb2, 0x8e, 0x17, - 0x9e, 0x6c, 0xaa, 0x2b, 0x9f, 0x6c, 0xaa, 0xd9, 0x93, 0x0d, 0x96, 0xd1, 0x28, 0xe6, 0x0e, 0x8b, - 0xab, 0x5c, 0x50, 0xd5, 0x12, 0xe4, 0xfe, 0xb3, 0x0a, 0x90, 0x4b, 0x71, 0x8e, 0xa1, 0x17, 0x44, - 0x7d, 0xd5, 0x20, 0x82, 0x81, 0xa0, 0x82, 0xd4, 0x4f, 0x04, 0x86, 0x4f, 0x1a, 0x5c, 0x08, 0x9e, - 0x21, 0xee, 0xf1, 0xbd, 0x17, 0x94, 0xf3, 0x76, 0x11, 0xa2, 0x83, 0xba, 0x72, 0x79, 0xd9, 0x31, - 0xe7, 0x97, 0x70, 0x37, 0x67, 0x3a, 0xb4, 0xf8, 0x95, 0xaf, 0xe5, 0x77, 0xdb, 0xf0, 0x1b, 0xe6, - 0xbc, 0x7e, 0x0e, 0x88, 0xee, 0x63, 0x8f, 0x99, 0x15, 0x38, 0x55, 0xae, 0xe5, 0xb4, 0x13, 0x44, - 0x6f, 0xf4, 0x89, 0x9c, 0xcf, 0x1b, 0xb8, 0x6f, 0x5d, 0x54, 0xa5, 0xbd, 0xc5, 0xad, 0x7a, 0x2d, - 0xb7, 0x7b, 0x46, 0x2f, 0x55, 0x18, 0x72, 0x96, 0x9f, 0x02, 0xee, 0xf4, 0x2f, 0xfd, 0x40, 0x2e, - 0xf2, 0xab, 0xdd, 0x74, 0xcf, 0xcf, 0xf1, 0x50, 0x91, 0x19, 0xdd, 0x73, 0x2a, 0x92, 0x51, 0xe1, - 0x9e, 0xf5, 0x9b, 0xee, 0x79, 0xa8, 0x4f, 0xe4, 0x7c, 0x9e, 0x03, 0x22, 0x17, 0xf5, 0xd9, 0xb8, - 0x96, 0xcb, 0x56, 0x10, 0x15, 0x75, 0x39, 0x80, 0x9d, 0x54, 0x0c, 0x24, 0x76, 0x14, 0x8b, 0xc7, - 0xe6, 0xb5, 0x3c, 0xb6, 0xf9, 0x80, 0x61, 0xe2, 0x7e, 0x01, 0xad, 0x5f, 0xcc, 0x46, 0x42, 0x4e, - 0xce, 0x4c, 0xce, 0xff, 0xaf, 0xcb, 0xcc, 0xbf, 0xb1, 0xcc, 0x1c, 0x8c, 0x92, 0x68, 0x16, 0x17, - 0xaa, 0x36, 0xe5, 0xf0, 0x52, 0xd5, 0xd6, 0x34, 0xba, 0x6a, 0x13, 0xf5, 0x47, 0xd0, 0xa2, 0x81, - 0x89, 0x0f, 0x50, 0x15, 0x72, 0x96, 0x93, 0x3e, 0x1b, 0xd0, 0xe8, 0xd8, 0x3e, 0x0f, 0x9f, 0x7c, - 0xaa, 0x58, 0x8d, 0x72, 0x33, 0xe1, 0xd7, 0x47, 0x9e, 0x75, 0xaf, 0xa0, 0x3d, 0x26, 0xdb, 0xf0, - 0x29, 0x0a, 0xc0, 0x6f, 0x66, 0xca, 0xe5, 0x77, 0xd8, 0xb3, 0x6d, 0x48, 0xa6, 0x6e, 0x8d, 0x6d, - 0xb3, 0xfe, 0x00, 0x40, 0x7d, 0x5e, 0xf4, 0xb3, 0x42, 0x65, 0xbf, 0xe7, 0x99, 0x0e, 0x81, 0xdf, - 0x32, 0xd9, 0xb2, 0x77, 0x02, 0x3b, 0x4b, 0x3c, 0x57, 0x94, 0xa9, 0xef, 0xd8, 0x65, 0xaa, 0xb9, - 0x7f, 0x9b, 0x59, 0xda, 0x47, 0xed, 0xda, 0xf5, 0xe7, 0x12, 0x7d, 0x8d, 0x98, 0x27, 0x17, 0xe7, - 0x19, 0xb4, 0x43, 0x1a, 0xbe, 0x8c, 0x03, 0x2a, 0x16, 0x23, 0x7b, 0x30, 0xf3, 0x5a, 0xa1, 0x3d, - 0xa6, 0xa1, 0x23, 0x06, 0xda, 0x02, 0x2b, 0x1d, 0x61, 0x19, 0xc7, 0x6b, 0x0e, 0x2c, 0x6f, 0x17, - 0x86, 0xc1, 0xca, 0xe2, 0x30, 0xc8, 0x8f, 0x06, 0xeb, 0xde, 0x18, 0xf7, 0xff, 0x55, 0x87, 0xca, - 0xc7, 0x47, 0xaf, 0x9c, 0x53, 0xd8, 0x5e, 0x7c, 0x40, 0x77, 0x1e, 0xb2, 0xe8, 0x35, 0x8f, 0xee, - 0xbd, 0xf7, 0xd6, 0xee, 0xf3, 0xb4, 0x7c, 0xcb, 0xf1, 0x60, 0x6b, 0xe1, 0x41, 0xd6, 0xc9, 0xda, - 0xc9, 0xea, 0x27, 0xe9, 0xde, 0xc3, 0x75, 0xdb, 0x36, 0xcf, 0x85, 0xf1, 0xdc, 0xf0, 0x5c, 0xfd, - 0xa9, 0x66, 0x78, 0xae, 0x9b, 0xea, 0x6f, 0x39, 0x3f, 0x82, 0x3a, 0x3d, 0xd1, 0x3a, 0x77, 0x98, - 0xb6, 0xf0, 0xf8, 0xdb, 0xbb, 0xbb, 0x80, 0x35, 0x07, 0x5f, 0x43, 0xbb, 0xf0, 0xea, 0xee, 0xbc, - 0x53, 0x90, 0x55, 0x7c, 0xe1, 0xed, 0xbd, 0xbb, 0x7a, 0xd3, 0x70, 0x3b, 0x00, 0xc8, 0x5f, 0xf1, - 0x9c, 0x2e, 0x53, 0x2f, 0xbd, 0x14, 0xf7, 0xee, 0xaf, 0xd8, 0x31, 0x4c, 0xd0, 0x95, 0x8b, 0xcf, - 0x72, 0xce, 0x82, 0x55, 0x17, 0x9f, 0xce, 0x8c, 0x2b, 0xd7, 0xbe, 0xe7, 0x69, 0xb6, 0x8b, 0x4f, - 0x72, 0x86, 0xed, 0x9a, 0xa7, 0x3e, 0xc3, 0x76, 0xed, 0x5b, 0xde, 0x2d, 0xe7, 0x57, 0xd0, 0x29, - 0x3e, 0x92, 0x39, 0x99, 0x91, 0x56, 0x3e, 0xf2, 0xf5, 0x1e, 0xac, 0xd9, 0x35, 0x0c, 0x3f, 0x84, - 0x1a, 0xbd, 0x7e, 0x65, 0x29, 0x67, 0x3f, 0x9a, 0xf5, 0xee, 0x14, 0x91, 0xe6, 0xd4, 0x13, 0xa8, - 0xd3, 0x87, 0x9d, 0x09, 0x80, 0xc2, 0x77, 0x5e, 0xaf, 0x65, 0x63, 0xdd, 0x5b, 0x4f, 0x4a, 0x99, - 0x9c, 0xb4, 0x20, 0x27, 0x5d, 0x25, 0xc7, 0x72, 0xce, 0x59, 0x5d, 0xff, 0xa3, 0xf5, 0xf4, 0x3f, - 0x01, 0x00, 0x00, 0xff, 0xff, 0xf0, 0xa3, 0xf6, 0xb8, 0xde, 0x1a, 0x00, 0x00, + // 2348 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x59, 0x4b, 0x73, 0x1c, 0x49, + 0x11, 0xf6, 0x3c, 0xa5, 0xc9, 0x79, 0x48, 0xea, 0xd5, 0x63, 0x3c, 0xbb, 0xf6, 0x9a, 0x8e, 0x05, + 0x0c, 0x2c, 0xc2, 0x8c, 0x77, 0x03, 0x07, 0x44, 0x10, 0x61, 0x4b, 0x66, 0x31, 0x6b, 0x2d, 0xe3, + 0x96, 0xc4, 0x1e, 0x27, 0x5a, 0xdd, 0xe5, 0x99, 0x46, 0x33, 0xdd, 0xbd, 0xdd, 0x35, 0xd2, 0xe8, + 0xc2, 0x81, 0x03, 0xdc, 0xf8, 0x03, 0x10, 0x5c, 0xb8, 0x71, 0xe7, 0xc0, 0x2f, 0x20, 0x82, 0x1f, + 0xc2, 0x8d, 0x3b, 0x47, 0xb2, 0xaa, 0xb2, 0xab, 0xab, 0xe7, 0x21, 0x99, 0x03, 0xc1, 0x85, 0xcb, + 0x44, 0xd5, 0x57, 0x59, 0x99, 0x59, 0xf9, 0xaa, 0xec, 0x1a, 0x68, 0xb8, 0x71, 0x70, 0x18, 0x27, + 0x11, 0x8f, 0xac, 0x1a, 0xbf, 0x89, 0x59, 0x6a, 0xdf, 0x87, 0x83, 0xcf, 0x18, 0x3f, 0x65, 0xc9, + 0x15, 0x4b, 0x7e, 0xc1, 0x92, 0x34, 0x88, 0x42, 0x87, 0x7d, 0x35, 0x63, 0x29, 0xb7, 0xe7, 0xd0, + 0x5d, 0x5e, 0x4a, 0xe3, 0x28, 0x4c, 0x99, 0xb5, 0x0b, 0xb5, 0xa9, 0xfb, 0xcb, 0x28, 0xe9, 0x96, + 0x1e, 0x95, 0x1e, 0xb7, 0x1d, 0x35, 0x91, 0x68, 0x10, 0x22, 0x5a, 0x26, 0x54, 0x4c, 0x04, 0x1a, + 0xbb, 0xdc, 0x1b, 0x77, 0x2b, 0x0a, 0x95, 0x13, 0xab, 0x07, 0x9b, 0x09, 0xbb, 0x0a, 0x04, 0xd7, + 0x6e, 0x15, 0x17, 0x1a, 0x8e, 0x9e, 0xdb, 0xbf, 0x29, 0xc1, 0xee, 0x79, 0xec, 0xbb, 0x9c, 0x0d, + 0x92, 0xc8, 0x63, 0x69, 0x4a, 0x2a, 0x59, 0x1d, 0x28, 0x07, 0xbe, 0x94, 0xd9, 0x70, 0x70, 0x64, + 0x6d, 0x43, 0x25, 0x46, 0xa0, 0x2c, 0x01, 0x31, 0xb4, 0x1e, 0x02, 0x78, 0x93, 0x28, 0x65, 0xa7, + 0xdc, 0x0f, 0x42, 0x29, 0x71, 0xd3, 0x31, 0x10, 0xa1, 0xcc, 0x75, 0xe0, 0xf3, 0xb1, 0x94, 0x89, + 0xca, 0xc8, 0x89, 0xb5, 0x0f, 0xf5, 0x31, 0x0b, 0x46, 0x63, 0xde, 0xad, 0x49, 0x98, 0x66, 0xf6, + 0x01, 0xec, 0x2d, 0xe8, 0xa1, 0xce, 0x6f, 0xff, 0xbd, 0x0c, 0xfb, 0x47, 0x09, 0xc3, 0x95, 0xa3, + 0x28, 0xe4, 0x6e, 0x10, 0xb2, 0x64, 0x9d, 0x8e, 0xa8, 0xd1, 0xc5, 0x2c, 0xf4, 0x27, 0x6c, 0xe0, + 0xa2, 0x58, 0xa5, 0xaa, 0x81, 0x48, 0x8d, 0xc7, 0xcc, 0xbb, 0x8c, 0xa3, 0x20, 0xe4, 0x52, 0x63, + 0x5c, 0xcf, 0x11, 0xa1, 0x71, 0x2a, 0x0f, 0xa3, 0xac, 0xa4, 0x26, 0x42, 0x63, 0x1c, 0x44, 0x33, + 0xa5, 0x71, 0xc3, 0xa1, 0x19, 0xe1, 0x2c, 0x49, 0xba, 0x75, 0x8d, 0xe3, 0x4c, 0xe0, 0x13, 0xf7, + 0x82, 0x4d, 0xd2, 0xee, 0xc6, 0xa3, 0x8a, 0xc0, 0xd5, 0xcc, 0x7a, 0x04, 0xcd, 0x30, 0x1a, 0x04, + 0x57, 0x11, 0x77, 0xa2, 0x88, 0x77, 0x37, 0xa5, 0xc1, 0x4c, 0xc8, 0xea, 0xc2, 0x46, 0x32, 0x0b, + 0x79, 0x30, 0x65, 0xdd, 0x86, 0x64, 0x99, 0x4d, 0xc5, 0x5e, 0x1a, 0x3e, 0x4f, 0x46, 0x69, 0x17, + 0x24, 0x63, 0x13, 0xb2, 0x3e, 0x82, 0x76, 0x7e, 0x92, 0xe3, 0x20, 0xe9, 0x36, 0x25, 0x87, 0x22, + 0x68, 0xbf, 0x82, 0x83, 0x25, 0x5b, 0x52, 0x9c, 0x1d, 0x42, 0xc3, 0xcb, 0x40, 0x69, 0xd3, 0x66, + 0x7f, 0xfb, 0x50, 0x46, 0xee, 0x61, 0x4e, 0x9c, 0x93, 0x20, 0xab, 0xf6, 0x69, 0x30, 0x0a, 0xdd, + 0xc9, 0xbb, 0x47, 0x8c, 0xb0, 0x98, 0xdc, 0x42, 0xf1, 0x49, 0x33, 0x7b, 0x1b, 0x3a, 0x19, 0x2b, + 0x72, 0xfa, 0x5f, 0x2a, 0xb0, 0xf3, 0xdc, 0xf7, 0xef, 0x88, 0x49, 0x0c, 0x6c, 0xce, 0x12, 0x0c, + 0x7d, 0xe4, 0x58, 0x96, 0xe6, 0xd4, 0x73, 0xeb, 0x43, 0xa8, 0xce, 0x52, 0x3c, 0x49, 0x45, 0x9e, + 0xa4, 0x49, 0x27, 0x39, 0x47, 0xc8, 0x91, 0x0b, 0x96, 0x05, 0x55, 0x57, 0xd8, 0xb2, 0x2a, 0x6d, + 0x29, 0xc7, 0x42, 0x65, 0x16, 0x5e, 0xa1, 0x9f, 0x05, 0x24, 0x86, 0x02, 0xf1, 0xae, 0x7d, 0xf2, + 0xb0, 0x18, 0x66, 0xc7, 0xda, 0xc8, 0x8f, 0xa5, 0xc3, 0x66, 0x73, 0x75, 0xd8, 0x34, 0xd6, 0x84, + 0x0d, 0x14, 0xc2, 0xc6, 0x86, 0x96, 0xe7, 0xc6, 0xee, 0x45, 0x30, 0x09, 0x78, 0xc0, 0x52, 0xf4, + 0x9f, 0x50, 0xa2, 0x80, 0x59, 0x8f, 0x61, 0xcb, 0x8d, 0x63, 0x37, 0x99, 0x46, 0x09, 0x9a, 0xe6, + 0x6d, 0x30, 0x61, 0xdd, 0x96, 0x64, 0xb2, 0x08, 0x0b, 0x6e, 0x29, 0x9b, 0x04, 0xe1, 0x6c, 0xfe, + 0x5a, 0x44, 0x5f, 0xb7, 0x2d, 0xc9, 0x0a, 0x98, 0xe0, 0x16, 0x46, 0x5f, 0xb0, 0xeb, 0x41, 0x12, + 0x5c, 0xe1, 0x9e, 0x11, 0x0a, 0xed, 0x48, 0x2b, 0x2e, 0xc2, 0xd6, 0x37, 0x31, 0x30, 0x27, 0xc1, + 0x34, 0xe0, 0x69, 0x77, 0x0b, 0xd5, 0x6a, 0xf6, 0xdb, 0x64, 0x4f, 0x47, 0xa2, 0x4e, 0xb6, 0x6a, + 0x1f, 0x43, 0x5d, 0x41, 0xc2, 0xbc, 0x82, 0x84, 0xbc, 0x25, 0xc7, 0x02, 0x4b, 0xa3, 0xb7, 0x5c, + 0xfa, 0xaa, 0xea, 0xc8, 0xb1, 0xc0, 0xc6, 0x6e, 0xe2, 0x4b, 0x3f, 0x21, 0x26, 0xc6, 0xb6, 0x03, + 0x55, 0xe1, 0x28, 0x61, 0xea, 0x19, 0x39, 0xbc, 0xed, 0x88, 0xa1, 0x40, 0x46, 0x14, 0x53, 0x88, + 0xe0, 0xd0, 0xfa, 0x06, 0x74, 0x5c, 0xdf, 0x47, 0xf3, 0x44, 0xe8, 0xf5, 0xcf, 0x02, 0x3f, 0x45, + 0x4e, 0x15, 0x5c, 0x5c, 0x40, 0xed, 0x5d, 0xb0, 0xcc, 0x80, 0xa2, 0x38, 0xfb, 0x75, 0x49, 0x27, + 0x84, 0xce, 0x93, 0x75, 0xd1, 0xf6, 0xfd, 0x42, 0xf5, 0x28, 0xcb, 0xb8, 0xda, 0xc9, 0x32, 0x24, + 0xdf, 0x6d, 0x16, 0x94, 0xa5, 0xa4, 0xac, 0xac, 0x4a, 0xca, 0x1e, 0x74, 0x97, 0x75, 0x20, 0x05, + 0x3d, 0x38, 0x38, 0x66, 0x13, 0xf6, 0x2e, 0xfa, 0xa1, 0x25, 0x43, 0x17, 0x4b, 0x87, 0x4a, 0x38, + 0x39, 0x7e, 0x77, 0x05, 0x96, 0x85, 0x90, 0x02, 0x27, 0xb0, 0xf7, 0x3a, 0x48, 0xf9, 0xdd, 0xe2, + 0x97, 0x44, 0x95, 0x57, 0x89, 0xfa, 0x15, 0x40, 0xce, 0x4a, 0xab, 0x5c, 0x32, 0x54, 0x46, 0x8c, + 0xcd, 0x03, 0x4e, 0x09, 0x2d, 0xc7, 0xc2, 0xed, 0xdc, 0x8b, 0xe9, 0x8e, 0x11, 0x43, 0x51, 0x10, + 0x67, 0x61, 0x30, 0x3f, 0x8d, 0xbc, 0x4b, 0xc6, 0x53, 0x59, 0xb0, 0xb1, 0x98, 0x1a, 0x90, 0xcc, + 0xca, 0x31, 0x9b, 0x4c, 0x64, 0xd5, 0xde, 0x74, 0xd4, 0x04, 0x8f, 0xb3, 0xbf, 0x78, 0x1c, 0xaa, + 0x7f, 0x4f, 0xa1, 0x99, 0xab, 0x9a, 0xa2, 0x4a, 0x95, 0xd5, 0xfe, 0x35, 0xa9, 0xec, 0x87, 0xd0, + 0x3a, 0xe5, 0xe8, 0xb9, 0x35, 0x46, 0xb1, 0x1f, 0x43, 0x47, 0x17, 0x4f, 0x49, 0xa8, 0xd2, 0xdf, + 0xe5, 0xb3, 0x94, 0xa8, 0x68, 0x66, 0xff, 0xb5, 0x02, 0x1b, 0x14, 0x9d, 0x59, 0x89, 0x29, 0xe5, + 0x25, 0xe6, 0x7f, 0x52, 0xe9, 0x3e, 0x80, 0x46, 0x7a, 0x93, 0x72, 0x36, 0x1d, 0x50, 0xbd, 0x6b, + 0x3b, 0x39, 0xf0, 0xff, 0xaa, 0x97, 0x57, 0xbd, 0xbf, 0x95, 0xa0, 0xa1, 0xdd, 0xfc, 0x1f, 0x77, + 0x25, 0x1f, 0x43, 0x23, 0x56, 0x8e, 0x67, 0xaa, 0x78, 0x35, 0xfb, 0x1d, 0x12, 0x94, 0x95, 0xab, + 0x9c, 0xc0, 0x88, 0x9f, 0xaa, 0x19, 0x3f, 0x46, 0xd7, 0x51, 0x2b, 0x74, 0x1d, 0xe8, 0xfc, 0x58, + 0x54, 0xc5, 0xba, 0xac, 0x8a, 0x72, 0x6c, 0xf6, 0x19, 0x1b, 0x85, 0x3e, 0xc3, 0xfe, 0x14, 0x36, + 0x4e, 0x5c, 0x6f, 0x8c, 0xe7, 0x10, 0x1b, 0xbd, 0x98, 0xc2, 0x14, 0x37, 0x8a, 0xb1, 0x10, 0x32, + 0x65, 0x68, 0xef, 0x1b, 0x2a, 0xe1, 0x34, 0xb3, 0x2f, 0xb1, 0x17, 0x50, 0x69, 0x40, 0xc9, 0xf4, + 0x04, 0x6b, 0x65, 0x66, 0x90, 0x2c, 0x97, 0x96, 0xbb, 0x09, 0x83, 0x06, 0xdd, 0xb2, 0x31, 0x55, + 0x92, 0xa9, 0xb4, 0x66, 0x36, 0x20, 0x7d, 0x9c, 0x6c, 0xd9, 0xfe, 0x6d, 0x09, 0xf6, 0x55, 0xab, + 0x78, 0x67, 0x43, 0xb8, 0xba, 0x05, 0x51, 0xe6, 0xab, 0x14, 0xcc, 0xf7, 0x14, 0x1a, 0x09, 0x4b, + 0xa3, 0x59, 0x82, 0x66, 0x96, 0x96, 0x6d, 0xf6, 0xf7, 0xb2, 0x4c, 0x92, 0xb2, 0x1c, 0x5a, 0x75, + 0x72, 0x3a, 0xfb, 0x0f, 0x15, 0xe8, 0x14, 0x57, 0x45, 0x5d, 0xba, 0x98, 0x5c, 0x06, 0xd1, 0x97, + 0xaa, 0xc7, 0x55, 0xc6, 0x33, 0x21, 0x91, 0x55, 0x68, 0xcb, 0x53, 0xbc, 0xe8, 0x50, 0x92, 0xba, + 0xc8, 0x72, 0x80, 0x56, 0x07, 0x2c, 0x09, 0x22, 0x9f, 0xba, 0xa4, 0x1c, 0x10, 0x65, 0x00, 0x27, + 0x6f, 0x66, 0x11, 0x77, 0xa9, 0xab, 0xd6, 0x73, 0xd9, 0xdc, 0xa2, 0x8f, 0x18, 0x3f, 0x12, 0x5e, + 0xab, 0x51, 0x73, 0xab, 0x91, 0x7c, 0xfd, 0x84, 0x4d, 0x53, 0x4a, 0x73, 0x03, 0x11, 0x9a, 0x2b, + 0x6f, 0xbe, 0x16, 0x41, 0x4d, 0xf9, 0x6e, 0x42, 0x82, 0x83, 0x9a, 0x9e, 0x5e, 0xbb, 0xb1, 0x4c, + 0xfb, 0xb6, 0x63, 0x20, 0x18, 0xc8, 0x3b, 0x6a, 0x86, 0xd6, 0xc0, 0x4f, 0x19, 0x57, 0xdc, 0xbe, + 0xb2, 0x0c, 0xb4, 0x9d, 0xe5, 0x05, 0x41, 0x7d, 0xc9, 0x92, 0x90, 0x4d, 0x4e, 0x0c, 0xa9, 0xa0, + 0xa8, 0x97, 0x16, 0xac, 0x3e, 0xec, 0x2a, 0xf0, 0xec, 0x68, 0x60, 0x6e, 0x68, 0xca, 0x0d, 0x2b, + 0xd7, 0xc4, 0x07, 0xd7, 0x52, 0x9c, 0xd0, 0xad, 0xf6, 0x5d, 0x68, 0xbf, 0xbc, 0x62, 0x58, 0xc1, + 0xb3, 0xc8, 0x41, 0xbb, 0x8b, 0x04, 0xc0, 0x68, 0x98, 0xc6, 0xd2, 0x6b, 0x55, 0x27, 0x07, 0xec, + 0x14, 0x6a, 0x92, 0x7c, 0x65, 0x57, 0xa3, 0x82, 0xae, 0xac, 0x83, 0xae, 0x18, 0x62, 0x6d, 0x1d, + 0x62, 0x14, 0x8c, 0xd5, 0x3c, 0x18, 0x0b, 0x42, 0x6b, 0x8b, 0x42, 0x7f, 0x57, 0x86, 0xd6, 0x17, + 0x8c, 0x5f, 0x47, 0xc9, 0xa5, 0x48, 0xae, 0x74, 0xe5, 0x6d, 0x79, 0x1f, 0xbf, 0xed, 0xe6, 0xc3, + 0x8b, 0x1b, 0x4e, 0xc1, 0x54, 0xc5, 0x5c, 0x9e, 0xbf, 0x10, 0x53, 0xeb, 0x01, 0x00, 0x2e, 0x0d, + 0x5c, 0x75, 0x43, 0xaa, 0xfe, 0xaa, 0x91, 0xcc, 0x09, 0xb0, 0xde, 0x87, 0x86, 0x33, 0x1f, 0x62, + 0x0d, 0x8e, 0x12, 0x15, 0xf1, 0x55, 0xfc, 0x2c, 0x9c, 0xbf, 0x94, 0x73, 0xb1, 0x17, 0x17, 0xfd, + 0x24, 0x8a, 0x63, 0xe6, 0x67, 0xaa, 0x25, 0xf3, 0x63, 0x05, 0x08, 0xa9, 0x67, 0x99, 0xd4, 0xba, + 0x92, 0xca, 0x73, 0xa9, 0xb8, 0x14, 0x93, 0xd4, 0x0d, 0x3a, 0x94, 0x29, 0xf5, 0x4c, 0x4b, 0xdd, + 0x54, 0x52, 0xb9, 0x21, 0xf5, 0x2c, 0x97, 0xda, 0xc8, 0xf6, 0x92, 0x54, 0xfb, 0xcf, 0x25, 0xd8, + 0xc4, 0x50, 0x3e, 0x4f, 0xdd, 0x11, 0xc3, 0x5b, 0xaf, 0xc9, 0x31, 0xec, 0x27, 0xc3, 0x99, 0x98, + 0x92, 0xcb, 0x40, 0x42, 0x8a, 0xe0, 0x6b, 0xd0, 0x8a, 0x59, 0x82, 0x01, 0x4e, 0x14, 0x65, 0x2c, + 0x42, 0x55, 0xa7, 0xa9, 0x30, 0x45, 0x72, 0x08, 0xef, 0xc9, 0xb5, 0x61, 0x10, 0x0e, 0x55, 0x04, + 0x4d, 0x23, 0x9f, 0x91, 0xa9, 0x76, 0xe4, 0xd2, 0xab, 0xf0, 0x73, 0xbd, 0x60, 0x7d, 0x1b, 0x76, + 0x34, 0xbd, 0xb8, 0x59, 0x25, 0xb5, 0x32, 0xdd, 0x16, 0x51, 0x9f, 0x13, 0x8c, 0x8d, 0x4e, 0xe7, + 0x6c, 0x8c, 0x9f, 0xff, 0x1c, 0xaf, 0x9e, 0xd1, 0xb1, 0x8b, 0x09, 0x8a, 0x55, 0x37, 0x96, 0x69, + 0x9c, 0x92, 0xb6, 0xd9, 0xd4, 0xfa, 0x0e, 0xec, 0x70, 0x45, 0xcb, 0xfc, 0x61, 0x46, 0xa3, 0xbc, + 0xb9, 0xad, 0x17, 0x06, 0x44, 0xfc, 0x75, 0xe8, 0xe4, 0xc4, 0xb2, 0x86, 0x2b, 0x7d, 0xdb, 0x1a, + 0x3d, 0x13, 0x95, 0xfc, 0xf7, 0xca, 0x58, 0x2a, 0x72, 0x3e, 0x96, 0x55, 0xc5, 0x30, 0x55, 0xb3, + 0xbf, 0x95, 0x55, 0x63, 0x32, 0x86, 0xac, 0x24, 0xca, 0x2c, 0x3f, 0x86, 0x2d, 0xae, 0x55, 0x1f, + 0x62, 0x02, 0xb9, 0x54, 0x92, 0xb3, 0x8a, 0x58, 0x3c, 0x98, 0xd3, 0xe1, 0xc5, 0x83, 0xa2, 0xe5, + 0x55, 0x9b, 0x40, 0x02, 0x95, 0x7e, 0x4d, 0x85, 0x49, 0x11, 0xf6, 0x8f, 0xa0, 0x81, 0x3d, 0x44, + 0xaa, 0xb4, 0x43, 0xc3, 0x78, 0xb3, 0x24, 0xc1, 0xfc, 0xca, 0x0c, 0x43, 0x53, 0xd1, 0x63, 0xc8, + 0x2b, 0x96, 0x8c, 0xa1, 0x26, 0x76, 0x04, 0xa0, 0xd2, 0x5c, 0x4a, 0x43, 0x1a, 0x33, 0x04, 0xd4, + 0x44, 0xc4, 0xd9, 0xd4, 0x9d, 0x6b, 0xd7, 0xcb, 0x38, 0x43, 0x40, 0x1d, 0x10, 0x05, 0xbe, 0x75, + 0x83, 0x89, 0x47, 0x8f, 0x00, 0x28, 0x90, 0xa6, 0xb9, 0xc0, 0xaa, 0x29, 0xf0, 0x4f, 0x65, 0x68, + 0x2a, 0x89, 0x4a, 0x61, 0xa4, 0xf2, 0xf0, 0x32, 0xd2, 0x22, 0xe5, 0x04, 0xdb, 0x85, 0x5a, 0x2e, + 0x2e, 0x6f, 0x1d, 0x73, 0x55, 0x33, 0xdd, 0xf0, 0x72, 0x4c, 0xb1, 0x5e, 0x1a, 0xd6, 0x59, 0x49, + 0xdd, 0x10, 0x44, 0x4a, 0xe1, 0x4f, 0xa0, 0xa5, 0xe2, 0x93, 0xf6, 0x54, 0xd7, 0xed, 0x69, 0x2a, + 0x32, 0xb5, 0xeb, 0xa9, 0xe8, 0xd0, 0x50, 0x5f, 0xd9, 0x11, 0x34, 0xfb, 0x0f, 0x0a, 0xe4, 0xf2, + 0x24, 0x87, 0xf2, 0xf7, 0x65, 0xc8, 0xb1, 0x34, 0x2b, 0xda, 0xde, 0x33, 0x80, 0x1c, 0x14, 0x35, + 0xeb, 0x92, 0xdd, 0x64, 0x9d, 0x28, 0x0e, 0xc5, 0xd9, 0xaf, 0xdc, 0xc9, 0x2c, 0x33, 0xaa, 0x9a, + 0xfc, 0xb0, 0xfc, 0xac, 0x84, 0x9f, 0x2a, 0x5b, 0x2f, 0xc4, 0x3d, 0x67, 0x6c, 0x2f, 0xbc, 0x5d, + 0x55, 0x57, 0xbe, 0x5d, 0x55, 0xb3, 0xb7, 0x2b, 0x2c, 0xa3, 0x51, 0x4c, 0xb7, 0x32, 0x8e, 0x72, + 0x41, 0x55, 0x43, 0x90, 0xfd, 0x8f, 0x2a, 0x40, 0x2e, 0xc5, 0x3a, 0x85, 0x5e, 0x10, 0x0d, 0xc5, + 0xa5, 0x12, 0x78, 0x4c, 0x15, 0xa4, 0x61, 0xc2, 0x30, 0x7c, 0xd2, 0xe0, 0x8a, 0x51, 0xdf, 0xb1, + 0x4f, 0xe7, 0x5e, 0x50, 0xce, 0x39, 0xc0, 0x99, 0xda, 0x28, 0x2b, 0x97, 0x93, 0x6d, 0xb3, 0x7e, + 0x06, 0x7b, 0x39, 0x53, 0xdf, 0xe0, 0x57, 0xbe, 0x95, 0xdf, 0x7b, 0x9a, 0x9f, 0x9f, 0xf3, 0xfa, + 0x09, 0x20, 0x3c, 0xc4, 0x3b, 0x66, 0x56, 0xe0, 0x54, 0xb9, 0x95, 0xd3, 0x4e, 0x10, 0xbd, 0x91, + 0x3b, 0x72, 0x3e, 0x6f, 0xe0, 0xbe, 0x71, 0x50, 0x91, 0xf6, 0x06, 0xb7, 0xea, 0xad, 0xdc, 0xf6, + 0xb5, 0x5e, 0xa2, 0x30, 0xe4, 0x2c, 0x3f, 0x07, 0x5c, 0x19, 0x5e, 0xbb, 0x01, 0x5f, 0xe4, 0x57, + 0xbb, 0xeb, 0x9c, 0x5f, 0xe2, 0xa6, 0x22, 0x33, 0x75, 0xce, 0x29, 0x4b, 0x46, 0x85, 0x73, 0xd6, + 0xef, 0x3a, 0xe7, 0x89, 0xdc, 0x91, 0xf3, 0x79, 0x01, 0x08, 0x2e, 0xea, 0xb3, 0x71, 0x2b, 0x97, + 0xad, 0x20, 0x2a, 0xea, 0x72, 0x04, 0x3b, 0x29, 0xf3, 0x38, 0xde, 0x28, 0x06, 0x8f, 0xcd, 0x5b, + 0x79, 0x6c, 0xd3, 0x06, 0xcd, 0xc4, 0xfe, 0x0a, 0x5a, 0x3f, 0x9d, 0x8d, 0x18, 0x9f, 0x5c, 0xe8, + 0x9c, 0xff, 0x6f, 0x97, 0x99, 0x7f, 0x61, 0x99, 0x39, 0x1a, 0x25, 0xd1, 0x2c, 0x2e, 0x54, 0x6d, + 0x95, 0xc3, 0x4b, 0x55, 0x5b, 0xd2, 0xc8, 0xaa, 0xad, 0xa8, 0x3f, 0x85, 0x96, 0x6a, 0xb2, 0x68, + 0x83, 0xaa, 0x42, 0xd6, 0x72, 0xd2, 0x67, 0x4d, 0x9d, 0xda, 0xd6, 0xa7, 0x86, 0x95, 0x76, 0x15, + 0xab, 0x51, 0x6e, 0x26, 0xfc, 0x62, 0xc9, 0xb3, 0xee, 0x15, 0xb4, 0xc7, 0xca, 0x36, 0xb4, 0x4b, + 0x05, 0xe0, 0x47, 0x99, 0x72, 0xf9, 0x19, 0x0e, 0x4d, 0x1b, 0x2a, 0x53, 0xb7, 0xc6, 0xa6, 0x59, + 0xbf, 0x07, 0x20, 0x3e, 0x49, 0x86, 0x59, 0xa1, 0x32, 0x9f, 0x1d, 0xf5, 0x0d, 0x81, 0xdf, 0x3f, + 0xd9, 0xb0, 0x77, 0x06, 0x3b, 0x4b, 0x3c, 0x57, 0x94, 0xa9, 0x6f, 0x99, 0x65, 0xaa, 0xd9, 0x7f, + 0x8f, 0x58, 0x9a, 0x5b, 0xcd, 0xda, 0xf5, 0xc7, 0x92, 0xfa, 0x82, 0xd1, 0x2f, 0x43, 0xd6, 0x33, + 0x68, 0x87, 0xaa, 0xf9, 0xd2, 0x0e, 0xa8, 0x18, 0x8c, 0xcc, 0xc6, 0xcc, 0x69, 0x85, 0x66, 0x9b, + 0x86, 0x8e, 0xf0, 0xa4, 0x05, 0x56, 0x3a, 0xc2, 0x30, 0x8e, 0xd3, 0xf4, 0x0c, 0x6f, 0x17, 0x9a, + 0xc1, 0xca, 0x62, 0x33, 0x48, 0x0f, 0x0d, 0xeb, 0x9e, 0x42, 0xfb, 0xff, 0xac, 0x43, 0xe5, 0xf9, + 0xe0, 0x95, 0x75, 0x0e, 0xdb, 0x8b, 0xff, 0x24, 0x58, 0x0f, 0x49, 0xf4, 0x9a, 0x7f, 0x1f, 0x7a, + 0x1f, 0xae, 0x5d, 0xa7, 0x6e, 0xf9, 0x9e, 0xe5, 0xc0, 0xd6, 0xc2, 0xbb, 0xb1, 0x95, 0x5d, 0x27, + 0xab, 0xdf, 0xe6, 0x7b, 0x0f, 0xd7, 0x2d, 0x9b, 0x3c, 0x17, 0xda, 0x73, 0xcd, 0x73, 0xf5, 0xe7, + 0x9d, 0xe6, 0xb9, 0xae, 0xab, 0xbf, 0x67, 0xfd, 0x00, 0xea, 0xea, 0x25, 0xd9, 0xda, 0x25, 0xda, + 0xc2, 0x1b, 0x75, 0x6f, 0x6f, 0x01, 0xd5, 0x1b, 0x5f, 0x43, 0xbb, 0xf0, 0xf7, 0x83, 0xf5, 0x7e, + 0x41, 0x56, 0xf1, 0x21, 0xba, 0xf7, 0xc1, 0xea, 0x45, 0xcd, 0xed, 0x08, 0x20, 0x7f, 0x6c, 0xb4, + 0xba, 0x44, 0xbd, 0xf4, 0xa0, 0xdd, 0xbb, 0xbf, 0x62, 0x45, 0x33, 0x41, 0x57, 0x2e, 0x3e, 0x0b, + 0x5a, 0x0b, 0x56, 0x5d, 0x7c, 0x94, 0xd3, 0xae, 0x5c, 0xfb, 0x9e, 0x28, 0xd9, 0x2e, 0x3e, 0xf6, + 0x69, 0xb6, 0x6b, 0x9e, 0x1a, 0x35, 0xdb, 0xb5, 0xaf, 0x84, 0xf7, 0xac, 0x9f, 0x43, 0xa7, 0xf8, + 0xb0, 0x66, 0x65, 0x46, 0x5a, 0xf9, 0x7c, 0xd8, 0x7b, 0xb0, 0x66, 0x55, 0x33, 0xfc, 0x04, 0x6a, + 0xea, 0xc5, 0x2c, 0x4b, 0x39, 0xf3, 0xa1, 0xad, 0xb7, 0x5b, 0x04, 0xf5, 0xae, 0x27, 0x50, 0x57, + 0x1f, 0x76, 0x3a, 0x00, 0x0a, 0xdf, 0x79, 0xbd, 0x96, 0x89, 0xda, 0xf7, 0x9e, 0x94, 0x32, 0x39, + 0x69, 0x41, 0x4e, 0xba, 0x4a, 0x8e, 0xe1, 0x9c, 0x8b, 0xba, 0xfc, 0x6b, 0xef, 0xe9, 0xbf, 0x03, + 0x00, 0x00, 0xff, 0xff, 0x58, 0xa9, 0x0a, 0x41, 0xe7, 0x1b, 0x00, 0x00, } diff --git a/vendor/src/github.com/docker/containerd/api/grpc/types/api.proto b/vendor/src/github.com/docker/containerd/api/grpc/types/api.proto index d6f56e1043..7ae3a3dd64 100644 --- a/vendor/src/github.com/docker/containerd/api/grpc/types/api.proto +++ b/vendor/src/github.com/docker/containerd/api/grpc/types/api.proto @@ -47,6 +47,9 @@ message CreateContainerRequest { string stderr = 6; // path to file where stderr will be written (optional) repeated string labels = 7; bool noPivotRoot = 8; + string runtime = 9; + repeated string runtimeArgs = 10; + string checkpointDir = 11; // Directory where checkpoints are stored } message CreateContainerResponse { @@ -98,6 +101,7 @@ message AddProcessResponse { message CreateCheckpointRequest { string id = 1; // ID of container Checkpoint checkpoint = 2; // Checkpoint configuration + string checkpointDir = 3; // Directory where checkpoints are stored } message CreateCheckpointResponse { @@ -106,6 +110,7 @@ message CreateCheckpointResponse { message DeleteCheckpointRequest { string id = 1; // ID of container string name = 2; // Name of checkpoint + string checkpointDir = 3; // Directory where checkpoints are stored } message DeleteCheckpointResponse { @@ -113,6 +118,7 @@ message DeleteCheckpointResponse { message ListCheckpointRequest { string id = 1; // ID of container + string checkpointDir = 2; // Directory where checkpoints are stored } message Checkpoint { @@ -193,6 +199,7 @@ message UpdateResource { uint32 memorySwap = 8; uint32 memoryReservation = 9; uint32 kernelMemoryLimit = 10; + uint32 kernelTCPMemoryLimit = 11; } message UpdateContainerResponse { diff --git a/vendor/src/github.com/golang/protobuf/proto/Makefile b/vendor/src/github.com/golang/protobuf/proto/Makefile index e2e0651a93..f1f06564a1 100644 --- a/vendor/src/github.com/golang/protobuf/proto/Makefile +++ b/vendor/src/github.com/golang/protobuf/proto/Makefile @@ -39,5 +39,5 @@ test: install generate-test-pbs generate-test-pbs: make install make -C testdata - protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto + protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto make diff --git a/vendor/src/github.com/golang/protobuf/proto/decode.go b/vendor/src/github.com/golang/protobuf/proto/decode.go index f94b9f416e..5810782fd8 100644 --- a/vendor/src/github.com/golang/protobuf/proto/decode.go +++ b/vendor/src/github.com/golang/protobuf/proto/decode.go @@ -768,11 +768,10 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { } } keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) + if !keyelem.IsValid() || !valelem.IsValid() { + // We did not decode the key or the value in the map entry. + // Either way, it's an invalid map entry. + return fmt.Errorf("proto: bad map data: missing key/val") } v.SetMapIndex(keyelem, valelem) diff --git a/vendor/src/github.com/golang/protobuf/proto/encode.go b/vendor/src/github.com/golang/protobuf/proto/encode.go index eb7e0474ef..231b07401a 100644 --- a/vendor/src/github.com/golang/protobuf/proto/encode.go +++ b/vendor/src/github.com/golang/protobuf/proto/encode.go @@ -64,10 +64,6 @@ var ( // a struct with a repeated field containing a nil element. errRepeatedHasNil = errors.New("proto: repeated field has nil element") - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - // ErrNil is the error returned if Marshal is called with nil. ErrNil = errors.New("proto: Marshal called with nil") ) @@ -1226,9 +1222,7 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { // Do oneof fields. if prop.oneofMarshaler != nil { m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { + if err := prop.oneofMarshaler(m, o); err != nil { return err } } diff --git a/vendor/src/github.com/golang/protobuf/proto/text.go b/vendor/src/github.com/golang/protobuf/proto/text.go index 37c953570d..1cbaf86d39 100644 --- a/vendor/src/github.com/golang/protobuf/proto/text.go +++ b/vendor/src/github.com/golang/protobuf/proto/text.go @@ -175,93 +175,7 @@ type raw interface { Bytes() []byte } -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } +func writeStruct(w *textWriter, sv reflect.Value) error { st := sv.Type() sprops := GetProperties(st) for i := 0; i < sv.NumField(); i++ { @@ -313,7 +227,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { } continue } - if err := tm.writeAny(w, v, props); err != nil { + if err := writeAny(w, v, props); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -355,7 +269,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + if err := writeAny(w, key, props.mkeyprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -372,7 +286,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := tm.writeAny(w, val, props.mvalprop); err != nil { + if err := writeAny(w, val, props.mvalprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -444,7 +358,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { } // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { + if err := writeAny(w, fv, props); err != nil { return err } @@ -456,7 +370,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { // Extensions (the XXX_extensions field). pv := sv.Addr() if pv.Type().Implements(extendableProtoType) { - if err := tm.writeExtensions(w, pv); err != nil { + if err := writeExtensions(w, pv); err != nil { return err } } @@ -486,7 +400,7 @@ func writeRaw(w *textWriter, b []byte) error { } // writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { +func writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) // Floats have special cases. @@ -535,15 +449,15 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert } } w.indent() - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() + if tm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := tm.MarshalText() if err != nil { return err } if _, err = w.Write(text); err != nil { return err } - } else if err := tm.writeStruct(w, v); err != nil { + } else if err := writeStruct(w, v); err != nil { return err } w.unindent() @@ -687,7 +601,7 @@ func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // writeExtensions writes all the extensions in pv. // pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { +func writeExtensions(w *textWriter, pv reflect.Value) error { emap := extensionMaps[pv.Type().Elem()] ep := pv.Interface().(extendableProto) @@ -722,13 +636,13 @@ func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error // Repeated extensions will appear as a slice. if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { + if err := writeExtension(w, desc.Name, pb); err != nil { return err } } else { v := reflect.ValueOf(pb) for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { return err } } @@ -737,7 +651,7 @@ func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error return nil } -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { +func writeExtension(w *textWriter, name string, pb interface{}) error { if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { return err } @@ -746,7 +660,7 @@ func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface return err } } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -773,13 +687,12 @@ func (w *textWriter) writeIndent() { // TextMarshaler is a configurable text format marshaler. type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types + Compact bool // use compact text format (one line). } // Marshal writes a given protocol buffer in text format. // The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { +func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error { val := reflect.ValueOf(pb) if pb == nil || val.IsNil() { w.Write([]byte("")) @@ -794,11 +707,11 @@ func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { aw := &textWriter{ w: ww, complete: true, - compact: tm.Compact, + compact: m.Compact, } - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() + if tm, ok := pb.(encoding.TextMarshaler); ok { + text, err := tm.MarshalText() if err != nil { return err } @@ -812,7 +725,7 @@ func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { } // Dereference the received pointer so we don't have outer < and >. v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { + if err := writeStruct(aw, v); err != nil { return err } if bw != nil { @@ -822,9 +735,9 @@ func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { } // Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { +func (m *TextMarshaler) Text(pb Message) string { var buf bytes.Buffer - tm.Marshal(&buf, pb) + m.Marshal(&buf, pb) return buf.String() } diff --git a/vendor/src/github.com/golang/protobuf/proto/text_parser.go b/vendor/src/github.com/golang/protobuf/proto/text_parser.go index b5e1c8e1f4..451323262c 100644 --- a/vendor/src/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/src/github.com/golang/protobuf/proto/text_parser.go @@ -163,7 +163,7 @@ func (p *textParser) advance() { p.cur.offset, p.cur.line = p.offset, p.line p.cur.unquoted = "" switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + case '<', '>', '{', '}', ':', '[', ']', ';', ',': // Single symbol p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] case '"', '\'': @@ -451,10 +451,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { fieldSet := make(map[string]bool) // A struct is a sequence of "name: value", terminated by one of // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > + // "[extension]". for { tok := p.next() if tok.err != nil { @@ -464,66 +461,33 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { break } if tok.value == "[" { - // Looks like an extension or an Any. + // Looks like an extension. // // TODO: Check whether we need to handle // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err + tok = p.next() + if tok.err != nil { + return tok.err } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - continue - } - var desc *ExtensionDesc // This could be faster, but it's functional. // TODO: Do something smarter than a linear scan. for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { + if d.Name == tok.value { desc = d break } } if desc == nil { - return p.errorf("unrecognized extension %q", extName) + return p.errorf("unrecognized extension %q", tok.value) + } + // Check the extension terminator. + tok = p.next() + if tok.err != nil { + return tok.err + } + if tok.value != "]" { + return p.errorf("unrecognized extension terminator %q", tok.value) } props := &Properties{} @@ -679,35 +643,6 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { return reqFieldErr } -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - } - return strings.Join(parts, ""), nil -} - // consumeOptionalSeparator consumes an optional semicolon or comma. // It is used in readStruct to provide backward compatibility. func (p *textParser) consumeOptionalSeparator() error { diff --git a/vendor/src/golang.org/x/net/context/context.go b/vendor/src/golang.org/x/net/context/context.go index e7ee376c47..19235cf26a 100644 --- a/vendor/src/golang.org/x/net/context/context.go +++ b/vendor/src/golang.org/x/net/context/context.go @@ -189,7 +189,7 @@ func Background() Context { } // TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it's is not yet available (because the +// it's unclear which Context to use or it is not yet available (because the // surrounding function has not yet been extended to accept a Context // parameter). TODO is recognized by static analysis tools that determine // whether Contexts are propagated correctly in a program. @@ -210,13 +210,13 @@ type CancelFunc func() // call cancel as soon as the operations running in this Context complete. func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { c := newCancelCtx(parent) - propagateCancel(parent, &c) - return &c, func() { c.cancel(true, Canceled) } + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } } // newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) cancelCtx { - return cancelCtx{ +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ Context: parent, done: make(chan struct{}), } @@ -259,7 +259,7 @@ func parentCancelCtx(parent Context) (*cancelCtx, bool) { case *cancelCtx: return c, true case *timerCtx: - return &c.cancelCtx, true + return c.cancelCtx, true case *valueCtx: parent = c.Context default: @@ -377,7 +377,7 @@ func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { // implement Done and Err. It implements cancel by stopping its timer then // delegating to cancelCtx.cancel. type timerCtx struct { - cancelCtx + *cancelCtx timer *time.Timer // Under cancelCtx.mu. deadline time.Time diff --git a/vendor/src/golang.org/x/net/context/ctxhttp/cancelreq.go b/vendor/src/golang.org/x/net/context/ctxhttp/cancelreq.go index 48610e3627..e3170e3333 100644 --- a/vendor/src/golang.org/x/net/context/ctxhttp/cancelreq.go +++ b/vendor/src/golang.org/x/net/context/ctxhttp/cancelreq.go @@ -9,6 +9,7 @@ package ctxhttp import "net/http" func canceler(client *http.Client, req *http.Request) func() { + // TODO(djd): Respect any existing value of req.Cancel. ch := make(chan struct{}) req.Cancel = ch diff --git a/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp.go index 504dd63ed9..a7ed8d8103 100644 --- a/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ b/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -14,6 +14,14 @@ import ( "golang.org/x/net/context" ) +func nop() {} + +var ( + testHookContextDoneBeforeHeaders = nop + testHookDoReturned = nop + testHookDidBodyClose = nop +) + // Do sends an HTTP request with the provided http.Client and returns an HTTP response. // If the client is nil, http.DefaultClient is used. // If the context is canceled or times out, ctx.Err() will be returned. @@ -31,18 +39,51 @@ func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Resp } result := make(chan responseAndError, 1) + // Make local copies of test hooks closed over by goroutines below. + // Prevents data races in tests. + testHookDoReturned := testHookDoReturned + testHookDidBodyClose := testHookDidBodyClose + go func() { resp, err := client.Do(req) + testHookDoReturned() result <- responseAndError{resp, err} }() + var resp *http.Response + select { case <-ctx.Done(): + testHookContextDoneBeforeHeaders() cancel() + // Clean up after the goroutine calling client.Do: + go func() { + if r := <-result; r.resp != nil { + testHookDidBodyClose() + r.resp.Body.Close() + } + }() return nil, ctx.Err() case r := <-result: - return r.resp, r.err + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + cancel() + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil } // Get issues a GET request via the Do function. @@ -77,3 +118,28 @@ func Post(ctx context.Context, client *http.Client, url string, bodyType string, func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) } + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +} diff --git a/vendor/src/golang.org/x/net/http2/Dockerfile b/vendor/src/golang.org/x/net/http2/Dockerfile index b4e14d55a5..53fc525797 100644 --- a/vendor/src/golang.org/x/net/http2/Dockerfile +++ b/vendor/src/golang.org/x/net/http2/Dockerfile @@ -17,8 +17,15 @@ RUN apt-get install -y --no-install-recommends \ libcunit1-dev libssl-dev libxml2-dev libevent-dev \ automake autoconf +# The list of packages nghttp2 recommends for h2load: +RUN apt-get install -y --no-install-recommends make binutils \ + autoconf automake autotools-dev \ + libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ + libev-dev libevent-dev libjansson-dev libjemalloc-dev \ + cython python3.4-dev python-setuptools + # Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: -ENV NGHTTP2_VER af24f8394e43f4 +ENV NGHTTP2_VER 895da9a RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git WORKDIR /root/nghttp2 @@ -31,9 +38,9 @@ RUN make RUN make install WORKDIR /root -RUN wget http://curl.haxx.se/download/curl-7.40.0.tar.gz -RUN tar -zxvf curl-7.40.0.tar.gz -WORKDIR /root/curl-7.40.0 +RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz +RUN tar -zxvf curl-7.45.0.tar.gz +WORKDIR /root/curl-7.45.0 RUN ./configure --with-ssl --with-nghttp2=/usr/local RUN make RUN make install diff --git a/vendor/src/golang.org/x/net/http2/buffer.go b/vendor/src/golang.org/x/net/http2/buffer.go deleted file mode 100644 index c43954cf04..0000000000 --- a/vendor/src/golang.org/x/net/http2/buffer.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2014 The Go Authors. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE - -package http2 - -import ( - "errors" -) - -// buffer is an io.ReadWriteCloser backed by a fixed size buffer. -// It never allocates, but moves old data as new data is written. -type buffer struct { - buf []byte - r, w int - closed bool - err error // err to return to reader -} - -var ( - errReadEmpty = errors.New("read from empty buffer") - errWriteClosed = errors.New("write on closed buffer") - errWriteFull = errors.New("write on full buffer") -) - -// Read copies bytes from the buffer into p. -// It is an error to read when no data is available. -func (b *buffer) Read(p []byte) (n int, err error) { - n = copy(p, b.buf[b.r:b.w]) - b.r += n - if b.closed && b.r == b.w { - err = b.err - } else if b.r == b.w && n == 0 { - err = errReadEmpty - } - return n, err -} - -// Len returns the number of bytes of the unread portion of the buffer. -func (b *buffer) Len() int { - return b.w - b.r -} - -// Write copies bytes from p into the buffer. -// It is an error to write more data than the buffer can hold. -func (b *buffer) Write(p []byte) (n int, err error) { - if b.closed { - return 0, errWriteClosed - } - - // Slide existing data to beginning. - if b.r > 0 && len(p) > len(b.buf)-b.w { - copy(b.buf, b.buf[b.r:b.w]) - b.w -= b.r - b.r = 0 - } - - // Write new data. - n = copy(b.buf[b.w:], p) - b.w += n - if n < len(p) { - err = errWriteFull - } - return n, err -} - -// Close marks the buffer as closed. Future calls to Write will -// return an error. Future calls to Read, once the buffer is -// empty, will return err. -func (b *buffer) Close(err error) { - if !b.closed { - b.closed = true - b.err = err - } -} diff --git a/vendor/src/golang.org/x/net/http2/client_conn_pool.go b/vendor/src/golang.org/x/net/http2/client_conn_pool.go new file mode 100644 index 0000000000..772ea5e924 --- /dev/null +++ b/vendor/src/golang.org/x/net/http2/client_conn_pool.go @@ -0,0 +1,225 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code's client connection pooling. + +package http2 + +import ( + "crypto/tls" + "net/http" + "sync" +) + +// ClientConnPool manages a pool of HTTP/2 client connections. +type ClientConnPool interface { + GetClientConn(req *http.Request, addr string) (*ClientConn, error) + MarkDead(*ClientConn) +} + +// TODO: use singleflight for dialing and addConnCalls? +type clientConnPool struct { + t *Transport + + mu sync.Mutex // TODO: maybe switch to RWMutex + // TODO: add support for sharing conns based on cert names + // (e.g. share conn for googleapis.com and appspot.com) + conns map[string][]*ClientConn // key is host:port + dialing map[string]*dialCall // currently in-flight dials + keys map[*ClientConn][]string + addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls +} + +func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, dialOnMiss) +} + +const ( + dialOnMiss = true + noDialOnMiss = false +) + +func (p *clientConnPool) getClientConn(_ *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { + p.mu.Lock() + for _, cc := range p.conns[addr] { + if cc.CanTakeNewRequest() { + p.mu.Unlock() + return cc, nil + } + } + if !dialOnMiss { + p.mu.Unlock() + return nil, ErrNoCachedConn + } + call := p.getStartDialLocked(addr) + p.mu.Unlock() + <-call.done + return call.res, call.err +} + +// dialCall is an in-flight Transport dial call to a host. +type dialCall struct { + p *clientConnPool + done chan struct{} // closed when done + res *ClientConn // valid after done is closed + err error // valid after done is closed +} + +// requires p.mu is held. +func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { + if call, ok := p.dialing[addr]; ok { + // A dial is already in-flight. Don't start another. + return call + } + call := &dialCall{p: p, done: make(chan struct{})} + if p.dialing == nil { + p.dialing = make(map[string]*dialCall) + } + p.dialing[addr] = call + go call.dial(addr) + return call +} + +// run in its own goroutine. +func (c *dialCall) dial(addr string) { + c.res, c.err = c.p.t.dialClientConn(addr) + close(c.done) + + c.p.mu.Lock() + delete(c.p.dialing, addr) + if c.err == nil { + c.p.addConnLocked(addr, c.res) + } + c.p.mu.Unlock() +} + +// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't +// already exist. It coalesces concurrent calls with the same key. +// This is used by the http1 Transport code when it creates a new connection. Because +// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know +// the protocol), it can get into a situation where it has multiple TLS connections. +// This code decides which ones live or die. +// The return value used is whether c was used. +// c is never closed. +func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { + p.mu.Lock() + for _, cc := range p.conns[key] { + if cc.CanTakeNewRequest() { + p.mu.Unlock() + return false, nil + } + } + call, dup := p.addConnCalls[key] + if !dup { + if p.addConnCalls == nil { + p.addConnCalls = make(map[string]*addConnCall) + } + call = &addConnCall{ + p: p, + done: make(chan struct{}), + } + p.addConnCalls[key] = call + go call.run(t, key, c) + } + p.mu.Unlock() + + <-call.done + if call.err != nil { + return false, call.err + } + return !dup, nil +} + +type addConnCall struct { + p *clientConnPool + done chan struct{} // closed when done + err error +} + +func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { + cc, err := t.NewClientConn(tc) + + p := c.p + p.mu.Lock() + if err != nil { + c.err = err + } else { + p.addConnLocked(key, cc) + } + delete(p.addConnCalls, key) + p.mu.Unlock() + close(c.done) +} + +func (p *clientConnPool) addConn(key string, cc *ClientConn) { + p.mu.Lock() + p.addConnLocked(key, cc) + p.mu.Unlock() +} + +// p.mu must be held +func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { + for _, v := range p.conns[key] { + if v == cc { + return + } + } + if p.conns == nil { + p.conns = make(map[string][]*ClientConn) + } + if p.keys == nil { + p.keys = make(map[*ClientConn][]string) + } + p.conns[key] = append(p.conns[key], cc) + p.keys[cc] = append(p.keys[cc], key) +} + +func (p *clientConnPool) MarkDead(cc *ClientConn) { + p.mu.Lock() + defer p.mu.Unlock() + for _, key := range p.keys[cc] { + vv, ok := p.conns[key] + if !ok { + continue + } + newList := filterOutClientConn(vv, cc) + if len(newList) > 0 { + p.conns[key] = newList + } else { + delete(p.conns, key) + } + } + delete(p.keys, cc) +} + +func (p *clientConnPool) closeIdleConnections() { + p.mu.Lock() + defer p.mu.Unlock() + // TODO: don't close a cc if it was just added to the pool + // milliseconds ago and has never been used. There's currently + // a small race window with the HTTP/1 Transport's integration + // where it can add an idle conn just before using it, and + // somebody else can concurrently call CloseIdleConns and + // break some caller's RoundTrip. + for _, vv := range p.conns { + for _, cc := range vv { + cc.closeIfIdle() + } + } +} + +func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { + out := in[:0] + for _, v := range in { + if v != exclude { + out = append(out, v) + } + } + // If we filtered it out, zero out the last item to prevent + // the GC from seeing it. + if len(in) != len(out) { + in[len(in)-1] = nil + } + return out +} diff --git a/vendor/src/golang.org/x/net/http2/configure_transport.go b/vendor/src/golang.org/x/net/http2/configure_transport.go new file mode 100644 index 0000000000..daa17f5d43 --- /dev/null +++ b/vendor/src/golang.org/x/net/http2/configure_transport.go @@ -0,0 +1,89 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.6 + +package http2 + +import ( + "crypto/tls" + "fmt" + "net/http" +) + +func configureTransport(t1 *http.Transport) (*Transport, error) { + connPool := new(clientConnPool) + t2 := &Transport{ + ConnPool: noDialClientConnPool{connPool}, + t1: t1, + } + connPool.t = t2 + if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { + return nil, err + } + if t1.TLSClientConfig == nil { + t1.TLSClientConfig = new(tls.Config) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { + t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { + t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") + } + upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { + addr := authorityAddr(authority) + if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { + go c.Close() + return erringRoundTripper{err} + } else if !used { + // Turns out we don't need this c. + // For example, two goroutines made requests to the same host + // at the same time, both kicking off TCP dials. (since protocol + // was unknown) + go c.Close() + } + return t2 + } + if m := t1.TLSNextProto; len(m) == 0 { + t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ + "h2": upgradeFn, + } + } else { + m["h2"] = upgradeFn + } + return t2, nil +} + +// registerHTTPSProtocol calls Transport.RegisterProtocol but +// convering panics into errors. +func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + }() + t.RegisterProtocol("https", rt) + return nil +} + +// noDialClientConnPool is an implementation of http2.ClientConnPool +// which never dials. We let the HTTP/1.1 client dial and use its TLS +// connection instead. +type noDialClientConnPool struct{ *clientConnPool } + +func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, noDialOnMiss) +} + +// noDialH2RoundTripper is a RoundTripper which only tries to complete the request +// if there's already has a cached connection to the host. +type noDialH2RoundTripper struct{ t *Transport } + +func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + res, err := rt.t.RoundTrip(req) + if err == ErrNoCachedConn { + return nil, http.ErrSkipAltProtocol + } + return res, err +} diff --git a/vendor/src/golang.org/x/net/http2/errors.go b/vendor/src/golang.org/x/net/http2/errors.go index c885328a82..71a4e29056 100644 --- a/vendor/src/golang.org/x/net/http2/errors.go +++ b/vendor/src/golang.org/x/net/http2/errors.go @@ -1,11 +1,13 @@ -// Copyright 2014 The Go Authors. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package http2 -import "fmt" +import ( + "errors" + "fmt" +) // An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. type ErrCode uint32 @@ -76,3 +78,45 @@ func (e StreamError) Error() string { type goAwayFlowError struct{} func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } + +// connErrorReason wraps a ConnectionError with an informative error about why it occurs. + +// Errors of this type are only returned by the frame parser functions +// and converted into ConnectionError(ErrCodeProtocol). +type connError struct { + Code ErrCode + Reason string +} + +func (e connError) Error() string { + return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) +} + +type pseudoHeaderError string + +func (e pseudoHeaderError) Error() string { + return fmt.Sprintf("invalid pseudo-header %q", string(e)) +} + +type duplicatePseudoHeaderError string + +func (e duplicatePseudoHeaderError) Error() string { + return fmt.Sprintf("duplicate pseudo-header %q", string(e)) +} + +type headerFieldNameError string + +func (e headerFieldNameError) Error() string { + return fmt.Sprintf("invalid header field name %q", string(e)) +} + +type headerFieldValueError string + +func (e headerFieldValueError) Error() string { + return fmt.Sprintf("invalid header field value %q", string(e)) +} + +var ( + errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") + errPseudoAfterRegular = errors.New("pseudo header field after regular") +) diff --git a/vendor/src/golang.org/x/net/http2/fixed_buffer.go b/vendor/src/golang.org/x/net/http2/fixed_buffer.go new file mode 100644 index 0000000000..47da0f0bf7 --- /dev/null +++ b/vendor/src/golang.org/x/net/http2/fixed_buffer.go @@ -0,0 +1,60 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" +) + +// fixedBuffer is an io.ReadWriter backed by a fixed size buffer. +// It never allocates, but moves old data as new data is written. +type fixedBuffer struct { + buf []byte + r, w int +} + +var ( + errReadEmpty = errors.New("read from empty fixedBuffer") + errWriteFull = errors.New("write on full fixedBuffer") +) + +// Read copies bytes from the buffer into p. +// It is an error to read when no data is available. +func (b *fixedBuffer) Read(p []byte) (n int, err error) { + if b.r == b.w { + return 0, errReadEmpty + } + n = copy(p, b.buf[b.r:b.w]) + b.r += n + if b.r == b.w { + b.r = 0 + b.w = 0 + } + return n, nil +} + +// Len returns the number of bytes of the unread portion of the buffer. +func (b *fixedBuffer) Len() int { + return b.w - b.r +} + +// Write copies bytes from p into the buffer. +// It is an error to write more data than the buffer can hold. +func (b *fixedBuffer) Write(p []byte) (n int, err error) { + // Slide existing data to beginning. + if b.r > 0 && len(p) > len(b.buf)-b.w { + copy(b.buf, b.buf[b.r:b.w]) + b.w -= b.r + b.r = 0 + } + + // Write new data. + n = copy(b.buf[b.w:], p) + b.w += n + if n < len(p) { + err = errWriteFull + } + return n, err +} diff --git a/vendor/src/golang.org/x/net/http2/flow.go b/vendor/src/golang.org/x/net/http2/flow.go index 540fc4283e..957de25420 100644 --- a/vendor/src/golang.org/x/net/http2/flow.go +++ b/vendor/src/golang.org/x/net/http2/flow.go @@ -1,7 +1,6 @@ -// Copyright 2014 The Go Authors. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // Flow control diff --git a/vendor/src/golang.org/x/net/http2/frame.go b/vendor/src/golang.org/x/net/http2/frame.go index e8b872a19b..6943f93380 100644 --- a/vendor/src/golang.org/x/net/http2/frame.go +++ b/vendor/src/golang.org/x/net/http2/frame.go @@ -1,7 +1,6 @@ -// Copyright 2014 The Go Authors. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package http2 @@ -11,7 +10,11 @@ import ( "errors" "fmt" "io" + "log" + "strings" "sync" + + "golang.org/x/net/http2/hpack" ) const frameHeaderLen = 9 @@ -172,6 +175,12 @@ func (h FrameHeader) Header() FrameHeader { return h } func (h FrameHeader) String() string { var buf bytes.Buffer buf.WriteString("[FrameHeader ") + h.writeDebug(&buf) + buf.WriteByte(']') + return buf.String() +} + +func (h FrameHeader) writeDebug(buf *bytes.Buffer) { buf.WriteString(h.Type.String()) if h.Flags != 0 { buf.WriteString(" flags=") @@ -188,15 +197,14 @@ func (h FrameHeader) String() string { if name != "" { buf.WriteString(name) } else { - fmt.Fprintf(&buf, "0x%x", 1<>16), byte(length>>8), byte(length)) + if logFrameWrites { + f.logWrite() + } + n, err := f.w.Write(f.wbuf) if err == nil && n != len(f.wbuf) { err = io.ErrShortWrite @@ -318,6 +365,24 @@ func (f *Framer) endWrite() error { return err } +func (f *Framer) logWrite() { + if f.debugFramer == nil { + f.debugFramerBuf = new(bytes.Buffer) + f.debugFramer = NewFramer(nil, f.debugFramerBuf) + f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below + // Let us read anything, even if we accidentally wrote it + // in the wrong order: + f.debugFramer.AllowIllegalReads = true + } + f.debugFramerBuf.Write(f.wbuf) + fr, err := f.debugFramer.ReadFrame() + if err != nil { + log.Printf("http2: Framer %p: failed to decode just-written frame", f) + return + } + log.Printf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) +} + func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } @@ -333,8 +398,9 @@ const ( // NewFramer returns a Framer that writes frames to w and reads them from r. func NewFramer(w io.Writer, r io.Reader) *Framer { fr := &Framer{ - w: w, - r: r, + w: w, + r: r, + logReads: logFrameReads, } fr.getReadBuf = func(size uint32) []byte { if cap(fr.readBuf) >= int(size) { @@ -358,15 +424,39 @@ func (fr *Framer) SetMaxReadFrameSize(v uint32) { fr.maxReadSize = v } +// ErrorDetail returns a more detailed error of the last error +// returned by Framer.ReadFrame. For instance, if ReadFrame +// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail +// will say exactly what was invalid. ErrorDetail is not guaranteed +// to return a non-nil value and like the rest of the http2 package, +// its return value is not protected by an API compatibility promise. +// ErrorDetail is reset after the next call to ReadFrame. +func (fr *Framer) ErrorDetail() error { + return fr.errDetail +} + // ErrFrameTooLarge is returned from Framer.ReadFrame when the peer // sends a frame that is larger than declared with SetMaxReadFrameSize. var ErrFrameTooLarge = errors.New("http2: frame too large") +// terminalReadFrameError reports whether err is an unrecoverable +// error from ReadFrame and no other frames should be read. +func terminalReadFrameError(err error) bool { + if _, ok := err.(StreamError); ok { + return false + } + return err != nil +} + // ReadFrame reads a single frame. The returned Frame is only valid // until the next call to ReadFrame. -// If the frame is larger than previously set with SetMaxReadFrameSize, -// the returned error is ErrFrameTooLarge. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is ErrFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from from the underlying +// reader. func (fr *Framer) ReadFrame() (Frame, error) { + fr.errDetail = nil if fr.lastFrame != nil { fr.lastFrame.invalidate() } @@ -383,12 +473,71 @@ func (fr *Framer) ReadFrame() (Frame, error) { } f, err := typeFrameParser(fh.Type)(fh, payload) if err != nil { + if ce, ok := err.(connError); ok { + return nil, fr.connError(ce.Code, ce.Reason) + } return nil, err } - fr.lastFrame = f + if err := fr.checkFrameOrder(f); err != nil { + return nil, err + } + if fr.logReads { + log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f)) + } + if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { + return fr.readMetaFrame(f.(*HeadersFrame)) + } return f, nil } +// connError returns ConnectionError(code) but first +// stashes away a public reason to the caller can optionally relay it +// to the peer before hanging up on them. This might help others debug +// their implementations. +func (fr *Framer) connError(code ErrCode, reason string) error { + fr.errDetail = errors.New(reason) + return ConnectionError(code) +} + +// checkFrameOrder reports an error if f is an invalid frame to return +// next from ReadFrame. Mostly it checks whether HEADERS and +// CONTINUATION frames are contiguous. +func (fr *Framer) checkFrameOrder(f Frame) error { + last := fr.lastFrame + fr.lastFrame = f + if fr.AllowIllegalReads { + return nil + } + + fh := f.Header() + if fr.lastHeaderStream != 0 { + if fh.Type != FrameContinuation { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", + fh.Type, fh.StreamID, + last.Header().Type, fr.lastHeaderStream)) + } + if fh.StreamID != fr.lastHeaderStream { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", + fh.StreamID, fr.lastHeaderStream)) + } + } else if fh.Type == FrameContinuation { + return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) + } + + switch fh.Type { + case FrameHeaders, FrameContinuation: + if fh.Flags.Has(FlagHeadersEndHeaders) { + fr.lastHeaderStream = 0 + } else { + fr.lastHeaderStream = fh.StreamID + } + } + + return nil +} + // A DataFrame conveys arbitrary, variable-length sequences of octets // associated with a stream. // See http://http2.github.io/http2-spec/#rfc.section.6.1 @@ -417,7 +566,7 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { // field is 0x0, the recipient MUST respond with a // connection error (Section 5.4.1) of type // PROTOCOL_ERROR. - return nil, ConnectionError(ErrCodeProtocol) + return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} } f := &DataFrame{ FrameHeader: fh, @@ -435,7 +584,7 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { // length of the frame payload, the recipient MUST // treat this as a connection error. // Filed: https://github.com/http2/http2-spec/issues/610 - return nil, ConnectionError(ErrCodeProtocol) + return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} } f.data = payload[:len(payload)-int(padSize)] return f, nil @@ -575,6 +724,8 @@ type PingFrame struct { Data [8]byte } +func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } + func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) { if len(payload) != 8 { return nil, ConnectionError(ErrCodeFrameSize) @@ -663,7 +814,7 @@ func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) { // See http://http2.github.io/http2-spec/#rfc.section.6.9 type WindowUpdateFrame struct { FrameHeader - Increment uint32 + Increment uint32 // never read with high bit set } func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { @@ -740,7 +891,7 @@ func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { // is received whose stream identifier field is 0x0, the recipient MUST // respond with a connection error (Section 5.4.1) of type // PROTOCOL_ERROR. - return nil, ConnectionError(ErrCodeProtocol) + return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} } var padLength uint8 if fh.Flags.Has(FlagHeadersPadded) { @@ -870,10 +1021,10 @@ func (p PriorityParam) IsZero() bool { func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) { if fh.StreamID == 0 { - return nil, ConnectionError(ErrCodeProtocol) + return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} } if len(payload) != 5 { - return nil, ConnectionError(ErrCodeFrameSize) + return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} } v := binary.BigEndian.Uint32(payload[:4]) streamID := v & 0x7fffffff // mask off high bit @@ -943,13 +1094,12 @@ type ContinuationFrame struct { } func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} + } return &ContinuationFrame{fh, p}, nil } -func (f *ContinuationFrame) StreamEnded() bool { - return f.FrameHeader.Flags.Has(FlagDataEndStream) -} - func (f *ContinuationFrame) HeaderBlockFragment() []byte { f.checkValid() return f.headerFragBuf @@ -1111,3 +1261,236 @@ type streamEnder interface { type headersEnder interface { HeadersEnded() bool } + +type headersOrContinuation interface { + headersEnder + HeaderBlockFragment() []byte +} + +// A MetaHeadersFrame is the representation of one HEADERS frame and +// zero or more contiguous CONTINUATION frames and the decoding of +// their HPACK-encoded contents. +// +// This type of frame does not appear on the wire and is only returned +// by the Framer when Framer.ReadMetaHeaders is set. +type MetaHeadersFrame struct { + *HeadersFrame + + // Fields are the fields contained in the HEADERS and + // CONTINUATION frames. The underlying slice is owned by the + // Framer and must not be retained after the next call to + // ReadFrame. + // + // Fields are guaranteed to be in the correct http2 order and + // not have unknown pseudo header fields or invalid header + // field names or values. Required pseudo header fields may be + // missing, however. Use the MetaHeadersFrame.Pseudo accessor + // method access pseudo headers. + Fields []hpack.HeaderField + + // Truncated is whether the max header list size limit was hit + // and Fields is incomplete. The hpack decoder state is still + // valid, however. + Truncated bool +} + +// PseudoValue returns the given pseudo header field's value. +// The provided pseudo field should not contain the leading colon. +func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { + for _, hf := range mh.Fields { + if !hf.IsPseudo() { + return "" + } + if hf.Name[1:] == pseudo { + return hf.Value + } + } + return "" +} + +// RegularFields returns the regular (non-pseudo) header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[i:] + } + } + return nil +} + +// PseudoFields returns the pseudo header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[:i] + } + } + return mh.Fields +} + +func (mh *MetaHeadersFrame) checkPseudos() error { + var isRequest, isResponse bool + pf := mh.PseudoFields() + for i, hf := range pf { + switch hf.Name { + case ":method", ":path", ":scheme", ":authority": + isRequest = true + case ":status": + isResponse = true + default: + return pseudoHeaderError(hf.Name) + } + // Check for duplicates. + // This would be a bad algorithm, but N is 4. + // And this doesn't allocate. + for _, hf2 := range pf[:i] { + if hf.Name == hf2.Name { + return duplicatePseudoHeaderError(hf.Name) + } + } + } + if isRequest && isResponse { + return errMixPseudoHeaderTypes + } + return nil +} + +func (fr *Framer) maxHeaderStringLen() int { + v := fr.maxHeaderListSize() + if uint32(int(v)) == v { + return int(v) + } + // They had a crazy big number for MaxHeaderBytes anyway, + // so give them unlimited header lengths: + return 0 +} + +// readMetaFrame returns 0 or more CONTINUATION frames from fr and +// merge them into into the provided hf and returns a MetaHeadersFrame +// with the decoded hpack values. +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { + if fr.AllowIllegalReads { + return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") + } + mh := &MetaHeadersFrame{ + HeadersFrame: hf, + } + var remainSize = fr.maxHeaderListSize() + var sawRegular bool + + var invalid error // pseudo header field errors + hdec := fr.ReadMetaHeaders + hdec.SetEmitEnabled(true) + hdec.SetMaxStringLength(fr.maxHeaderStringLen()) + hdec.SetEmitFunc(func(hf hpack.HeaderField) { + if !validHeaderFieldValue(hf.Value) { + invalid = headerFieldValueError(hf.Value) + } + isPseudo := strings.HasPrefix(hf.Name, ":") + if isPseudo { + if sawRegular { + invalid = errPseudoAfterRegular + } + } else { + sawRegular = true + if !validHeaderFieldName(hf.Name) { + invalid = headerFieldNameError(hf.Name) + } + } + + if invalid != nil { + hdec.SetEmitEnabled(false) + return + } + + size := hf.Size() + if size > remainSize { + hdec.SetEmitEnabled(false) + mh.Truncated = true + return + } + remainSize -= size + + mh.Fields = append(mh.Fields, hf) + }) + // Lose reference to MetaHeadersFrame: + defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) + + var hc headersOrContinuation = hf + for { + frag := hc.HeaderBlockFragment() + if _, err := hdec.Write(frag); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + + if hc.HeadersEnded() { + break + } + if f, err := fr.ReadFrame(); err != nil { + return nil, err + } else { + hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder + } + } + + mh.HeadersFrame.headerFragBuf = nil + mh.HeadersFrame.invalidate() + + if err := hdec.Close(); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + if invalid != nil { + fr.errDetail = invalid + return nil, StreamError{mh.StreamID, ErrCodeProtocol} + } + if err := mh.checkPseudos(); err != nil { + fr.errDetail = err + return nil, StreamError{mh.StreamID, ErrCodeProtocol} + } + return mh, nil +} + +func summarizeFrame(f Frame) string { + var buf bytes.Buffer + f.Header().writeDebug(&buf) + switch f := f.(type) { + case *SettingsFrame: + n := 0 + f.ForeachSetting(func(s Setting) error { + n++ + if n == 1 { + buf.WriteString(", settings:") + } + fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) + return nil + }) + if n > 0 { + buf.Truncate(buf.Len() - 1) // remove trailing comma + } + case *DataFrame: + data := f.Data() + const max = 256 + if len(data) > max { + data = data[:max] + } + fmt.Fprintf(&buf, " data=%q", data) + if len(f.Data()) > max { + fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) + } + case *WindowUpdateFrame: + if f.StreamID == 0 { + buf.WriteString(" (conn)") + } + fmt.Fprintf(&buf, " incr=%v", f.Increment) + case *PingFrame: + fmt.Fprintf(&buf, " ping=%q", f.Data[:]) + case *GoAwayFrame: + fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", + f.LastStreamID, f.ErrCode, f.debugData) + case *RSTStreamFrame: + fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) + } + return buf.String() +} diff --git a/vendor/src/golang.org/x/net/http2/go15.go b/vendor/src/golang.org/x/net/http2/go15.go new file mode 100644 index 0000000000..f0a5624141 --- /dev/null +++ b/vendor/src/golang.org/x/net/http2/go15.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package http2 + +import "net/http" + +func requestCancel(req *http.Request) <-chan struct{} { return req.Cancel } diff --git a/vendor/src/golang.org/x/net/http2/gotrack.go b/vendor/src/golang.org/x/net/http2/gotrack.go index 7dc2ef90db..9933c9f8c7 100644 --- a/vendor/src/golang.org/x/net/http2/gotrack.go +++ b/vendor/src/golang.org/x/net/http2/gotrack.go @@ -1,9 +1,6 @@ // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE // Defensive debug-only utility to track that functions run on the // goroutine that they're supposed to. diff --git a/vendor/src/golang.org/x/net/http2/headermap.go b/vendor/src/golang.org/x/net/http2/headermap.go index 67c7c48357..c2805f6ac4 100644 --- a/vendor/src/golang.org/x/net/http2/headermap.go +++ b/vendor/src/golang.org/x/net/http2/headermap.go @@ -1,9 +1,6 @@ // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE package http2 @@ -60,6 +57,7 @@ func init() { "server", "set-cookie", "strict-transport-security", + "trailer", "transfer-encoding", "user-agent", "vary", diff --git a/vendor/src/golang.org/x/net/http2/hpack/encode.go b/vendor/src/golang.org/x/net/http2/hpack/encode.go index 19bd9f4fcb..f9bb033984 100644 --- a/vendor/src/golang.org/x/net/http2/hpack/encode.go +++ b/vendor/src/golang.org/x/net/http2/hpack/encode.go @@ -1,7 +1,6 @@ -// Copyright 2014 The Go Authors. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package hpack @@ -145,7 +144,7 @@ func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { // shouldIndex reports whether f should be indexed. func (e *Encoder) shouldIndex(f HeaderField) bool { - return !f.Sensitive && f.size() <= e.dynTab.maxSize + return !f.Sensitive && f.Size() <= e.dynTab.maxSize } // appendIndexed appends index i, as encoded in "Indexed Header Field" diff --git a/vendor/src/golang.org/x/net/http2/hpack/hpack.go b/vendor/src/golang.org/x/net/http2/hpack/hpack.go index c9e36f7427..dcf257afa4 100644 --- a/vendor/src/golang.org/x/net/http2/hpack/hpack.go +++ b/vendor/src/golang.org/x/net/http2/hpack/hpack.go @@ -1,7 +1,6 @@ -// Copyright 2014 The Go Authors. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // Package hpack implements HPACK, a compression format for // efficiently representing HTTP header fields in the context of HTTP/2. @@ -42,7 +41,24 @@ type HeaderField struct { Sensitive bool } -func (hf *HeaderField) size() uint32 { +// IsPseudo reports whether the header field is an http2 pseudo header. +// That is, it reports whether it starts with a colon. +// It is not otherwise guaranteed to be a valid psuedo header field, +// though. +func (hf HeaderField) IsPseudo() bool { + return len(hf.Name) != 0 && hf.Name[0] == ':' +} + +func (hf HeaderField) String() string { + var suffix string + if hf.Sensitive { + suffix = " (sensitive)" + } + return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) +} + +// Size returns the size of an entry per RFC 7540 section 5.2. +func (hf HeaderField) Size() uint32 { // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 // "The size of the dynamic table is the sum of the size of // its entries. The size of an entry is the sum of its name's @@ -64,23 +80,65 @@ type Decoder struct { dynTab dynamicTable emit func(f HeaderField) + emitEnabled bool // whether calls to emit are enabled + maxStrLen int // 0 means unlimited + // buf is the unparsed buffer. It's only written to // saveBuf if it was truncated in the middle of a header // block. Because it's usually not owned, we can only // process it under Write. - buf []byte // usually not owned + buf []byte // not owned; only valid during Write + + // saveBuf is previous data passed to Write which we weren't able + // to fully parse before. Unlike buf, we own this data. saveBuf bytes.Buffer } -func NewDecoder(maxSize uint32, emitFunc func(f HeaderField)) *Decoder { +// NewDecoder returns a new decoder with the provided maximum dynamic +// table size. The emitFunc will be called for each valid field +// parsed, in the same goroutine as calls to Write, before Write returns. +func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { d := &Decoder{ - emit: emitFunc, + emit: emitFunc, + emitEnabled: true, } - d.dynTab.allowedMaxSize = maxSize - d.dynTab.setMaxSize(maxSize) + d.dynTab.allowedMaxSize = maxDynamicTableSize + d.dynTab.setMaxSize(maxDynamicTableSize) return d } +// ErrStringLength is returned by Decoder.Write when the max string length +// (as configured by Decoder.SetMaxStringLength) would be violated. +var ErrStringLength = errors.New("hpack: string too long") + +// SetMaxStringLength sets the maximum size of a HeaderField name or +// value string. If a string exceeds this length (even after any +// decompression), Write will return ErrStringLength. +// A value of 0 means unlimited and is the default from NewDecoder. +func (d *Decoder) SetMaxStringLength(n int) { + d.maxStrLen = n +} + +// SetEmitFunc changes the callback used when new header fields +// are decoded. +// It must be non-nil. It does not affect EmitEnabled. +func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { + d.emit = emitFunc +} + +// SetEmitEnabled controls whether the emitFunc provided to NewDecoder +// should be called. The default is true. +// +// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE +// while still decoding and keeping in-sync with decoder state, but +// without doing unnecessary decompression or generating unnecessary +// garbage for header fields past the limit. +func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } + +// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder +// are currently enabled. The default is true. +func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } + // TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their // underlying buffers for garbage reasons. @@ -122,7 +180,7 @@ func (dt *dynamicTable) setMaxSize(v uint32) { func (dt *dynamicTable) add(f HeaderField) { dt.ents = append(dt.ents, f) - dt.size += f.size() + dt.size += f.Size() dt.evict() } @@ -130,7 +188,7 @@ func (dt *dynamicTable) add(f HeaderField) { func (dt *dynamicTable) evict() { base := dt.ents // keep base pointer of slice for dt.size > dt.maxSize { - dt.size -= dt.ents[0].size() + dt.size -= dt.ents[0].Size() dt.ents = dt.ents[1:] } @@ -247,15 +305,23 @@ func (d *Decoder) Write(p []byte) (n int, err error) { for len(d.buf) > 0 { err = d.parseHeaderFieldRepr() - if err != nil { - if err == errNeedMore { - err = nil - d.saveBuf.Write(d.buf) + if err == errNeedMore { + // Extra paranoia, making sure saveBuf won't + // get too large. All the varint and string + // reading code earlier should already catch + // overlong things and return ErrStringLength, + // but keep this as a last resort. + const varIntOverhead = 8 // conservative + if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { + return 0, ErrStringLength } + d.saveBuf.Write(d.buf) + return len(p), nil + } + if err != nil { break } } - return len(p), err } @@ -323,9 +389,8 @@ func (d *Decoder) parseFieldIndexed() error { if !ok { return DecodingError{InvalidIndexError(idx)} } - d.emit(HeaderField{Name: hf.Name, Value: hf.Value}) d.buf = buf - return nil + return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) } // (same invariants and behavior as parseHeaderFieldRepr) @@ -337,6 +402,7 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { } var hf HeaderField + wantStr := d.emitEnabled || it.indexed() if nameIdx > 0 { ihf, ok := d.at(nameIdx) if !ok { @@ -344,12 +410,12 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { } hf.Name = ihf.Name } else { - hf.Name, buf, err = readString(buf) + hf.Name, buf, err = d.readString(buf, wantStr) if err != nil { return err } } - hf.Value, buf, err = readString(buf) + hf.Value, buf, err = d.readString(buf, wantStr) if err != nil { return err } @@ -358,7 +424,18 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { d.dynTab.add(hf) } hf.Sensitive = it.sensitive() - d.emit(hf) + return d.callEmit(hf) +} + +func (d *Decoder) callEmit(hf HeaderField) error { + if d.maxStrLen != 0 { + if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { + return ErrStringLength + } + } + if d.emitEnabled { + d.emit(hf) + } return nil } @@ -420,7 +497,15 @@ func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { return 0, origP, errNeedMore } -func readString(p []byte) (s string, remain []byte, err error) { +// readString decodes an hpack string from p. +// +// wantStr is whether s will be used. If false, decompression and +// []byte->string garbage are skipped if s will be ignored +// anyway. This does mean that huffman decoding errors for non-indexed +// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server +// is returning an error anyway, and because they're not indexed, the error +// won't affect the decoding state. +func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { if len(p) == 0 { return "", p, errNeedMore } @@ -429,17 +514,29 @@ func readString(p []byte) (s string, remain []byte, err error) { if err != nil { return "", p, err } + if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { + return "", nil, ErrStringLength + } if uint64(len(p)) < strLen { return "", p, errNeedMore } if !isHuff { - return string(p[:strLen]), p[strLen:], nil + if wantStr { + s = string(p[:strLen]) + } + return s, p[strLen:], nil } - // TODO: optimize this garbage: - var buf bytes.Buffer - if _, err := HuffmanDecode(&buf, p[:strLen]); err != nil { - return "", nil, err + if wantStr { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() // don't trust others + defer bufPool.Put(buf) + if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { + buf.Reset() + return "", nil, err + } + s = buf.String() + buf.Reset() // be nice to GC } - return buf.String(), p[strLen:], nil + return s, p[strLen:], nil } diff --git a/vendor/src/golang.org/x/net/http2/hpack/huffman.go b/vendor/src/golang.org/x/net/http2/hpack/huffman.go index 9fe76f68ee..eb4b1f05cd 100644 --- a/vendor/src/golang.org/x/net/http2/hpack/huffman.go +++ b/vendor/src/golang.org/x/net/http2/hpack/huffman.go @@ -1,12 +1,12 @@ -// Copyright 2014 The Go Authors. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package hpack import ( "bytes" + "errors" "io" "sync" ) @@ -22,15 +22,46 @@ func HuffmanDecode(w io.Writer, v []byte) (int, error) { buf := bufPool.Get().(*bytes.Buffer) buf.Reset() defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return 0, err + } + return w.Write(buf.Bytes()) +} +// HuffmanDecodeToString decodes the string in v. +func HuffmanDecodeToString(v []byte) (string, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return "", err + } + return buf.String(), nil +} + +// ErrInvalidHuffman is returned for errors found decoding +// Huffman-encoded strings. +var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") + +// huffmanDecode decodes v to buf. +// If maxLen is greater than 0, attempts to write more to buf than +// maxLen bytes will return ErrStringLength. +func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { n := rootHuffmanNode cur, nbits := uint(0), uint8(0) for _, b := range v { cur = cur<<8 | uint(b) nbits += 8 for nbits >= 8 { - n = n.children[byte(cur>>(nbits-8))] + idx := byte(cur >> (nbits - 8)) + n = n.children[idx] + if n == nil { + return ErrInvalidHuffman + } if n.children == nil { + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } buf.WriteByte(n.sym) nbits -= n.codeLen n = rootHuffmanNode @@ -48,7 +79,7 @@ func HuffmanDecode(w io.Writer, v []byte) (int, error) { nbits -= n.codeLen n = rootHuffmanNode } - return w.Write(buf.Bytes()) + return nil } type node struct { @@ -67,10 +98,10 @@ func newInternalNode() *node { var rootHuffmanNode = newInternalNode() func init() { + if len(huffmanCodes) != 256 { + panic("unexpected size") + } for i, code := range huffmanCodes { - if i > 255 { - panic("too many huffman codes") - } addDecoderNode(byte(i), code, huffmanCodeLen[i]) } } diff --git a/vendor/src/golang.org/x/net/http2/hpack/tables.go b/vendor/src/golang.org/x/net/http2/hpack/tables.go index f898e25126..b9283a0233 100644 --- a/vendor/src/golang.org/x/net/http2/hpack/tables.go +++ b/vendor/src/golang.org/x/net/http2/hpack/tables.go @@ -1,7 +1,6 @@ -// Copyright 2014 The Go Authors. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package hpack @@ -10,7 +9,7 @@ func pair(name, value string) HeaderField { } // http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B -var staticTable = []HeaderField{ +var staticTable = [...]HeaderField{ pair(":authority", ""), // index 1 (1-based) pair(":method", "GET"), pair(":method", "POST"), @@ -74,7 +73,7 @@ var staticTable = []HeaderField{ pair("www-authenticate", ""), } -var huffmanCodes = []uint32{ +var huffmanCodes = [256]uint32{ 0x1ff8, 0x7fffd8, 0xfffffe2, @@ -333,7 +332,7 @@ var huffmanCodes = []uint32{ 0x3ffffee, } -var huffmanCodeLen = []uint8{ +var huffmanCodeLen = [256]uint8{ 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, diff --git a/vendor/src/golang.org/x/net/http2/http2.go b/vendor/src/golang.org/x/net/http2/http2.go index 35f9b26e28..9d0003123a 100644 --- a/vendor/src/golang.org/x/net/http2/http2.go +++ b/vendor/src/golang.org/x/net/http2/http2.go @@ -1,31 +1,51 @@ // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE // Package http2 implements the HTTP/2 protocol. // -// This is a work in progress. This package is low-level and intended -// to be used directly by very few people. Most users will use it -// indirectly through integration with the net/http package. See -// ConfigureServer. That ConfigureServer call will likely be automatic -// or available via an empty import in the future. +// This package is low-level and intended to be used directly by very +// few people. Most users will use it indirectly through the automatic +// use by the net/http package (from Go 1.6 and later). +// For use in earlier Go versions see ConfigureServer. (Transport support +// requires Go 1.6 or later) // -// See http://http2.github.io/ +// See https://http2.github.io/ for more information on HTTP/2. +// +// See https://http2.golang.org/ for a test server running this code. package http2 import ( "bufio" + "crypto/tls" + "errors" "fmt" "io" "net/http" + "os" + "sort" "strconv" + "strings" "sync" ) -var VerboseLogs = false +var ( + VerboseLogs bool + logFrameWrites bool + logFrameReads bool +) + +func init() { + e := os.Getenv("GODEBUG") + if strings.Contains(e, "http2debug=1") { + VerboseLogs = true + } + if strings.Contains(e, "http2debug=2") { + VerboseLogs = true + logFrameWrites = true + logFrameReads = true + } +} const ( // ClientPreface is the string that must be sent by new @@ -141,17 +161,63 @@ func (s SettingID) String() string { return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) } -func validHeader(v string) bool { +var ( + errInvalidHeaderFieldName = errors.New("http2: invalid header field name") + errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") +) + +// validHeaderFieldName reports whether v is a valid header field name (key). +// RFC 7230 says: +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +// Further, http2 says: +// "Just as in HTTP/1.x, header field names are strings of ASCII +// characters that are compared in a case-insensitive +// fashion. However, header field names MUST be converted to +// lowercase prior to their encoding in HTTP/2. " +func validHeaderFieldName(v string) bool { if len(v) == 0 { return false } for _, r := range v { - // "Just as in HTTP/1.x, header field names are - // strings of ASCII characters that are compared in a - // case-insensitive fashion. However, header field - // names MUST be converted to lowercase prior to their - // encoding in HTTP/2. " - if r >= 127 || ('A' <= r && r <= 'Z') { + if int(r) >= len(isTokenTable) || ('A' <= r && r <= 'Z') { + return false + } + if !isTokenTable[byte(r)] { + return false + } + } + return true +} + +// validHeaderFieldValue reports whether v is a valid header field value. +// +// RFC 7230 says: +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" +// +// http2 further says: "Similarly, HTTP/2 allows header field values +// that are not valid. While most of the values that can be encoded +// will not alter header field parsing, carriage return (CR, ASCII +// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII +// 0x0) might be exploited by an attacker if they are translated +// verbatim. Any request or response that contains a character not +// permitted in a header field value MUST be treated as malformed +// (Section 8.1.2.6). Valid characters are defined by the +// field-content ABNF rule in Section 3.2 of [RFC7230]." +// +// This function does not (yet?) properly handle the rejection of +// strings that begin or end with SP or HTAB. +func validHeaderFieldValue(v string) bool { + for i := 0; i < len(v); i++ { + if b := v[i]; b < ' ' && b != '\t' || b == 0x7f { return false } } @@ -247,3 +313,152 @@ func (w *bufferedWriter) Flush() error { w.bw = nil return err } + +func mustUint31(v int32) uint32 { + if v < 0 || v > 2147483647 { + panic("out of range") + } + return uint32(v) +} + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC2616, section 4.4. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +type httpError struct { + msg string + timeout bool +} + +func (e *httpError) Error() string { return e.msg } +func (e *httpError) Timeout() bool { return e.timeout } +func (e *httpError) Temporary() bool { return true } + +var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} + +var isTokenTable = [127]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +type connectionStater interface { + ConnectionState() tls.ConnectionState +} + +var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} + +type sorter struct { + v []string // owned by sorter +} + +func (s *sorter) Len() int { return len(s.v) } +func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } +func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } + +// Keys returns the sorted keys of h. +// +// The returned slice is only valid until s used again or returned to +// its pool. +func (s *sorter) Keys(h http.Header) []string { + keys := s.v[:0] + for k := range h { + keys = append(keys, k) + } + s.v = keys + sort.Sort(s) + return keys +} + +func (s *sorter) SortStrings(ss []string) { + // Our sorter works on s.v, which sorter owners, so + // stash it away while we sort the user's buffer. + save := s.v + s.v = ss + sort.Sort(s) + s.v = save +} diff --git a/vendor/src/golang.org/x/net/http2/not_go15.go b/vendor/src/golang.org/x/net/http2/not_go15.go new file mode 100644 index 0000000000..d0fa5c8906 --- /dev/null +++ b/vendor/src/golang.org/x/net/http2/not_go15.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package http2 + +import "net/http" + +func requestCancel(req *http.Request) <-chan struct{} { return nil } diff --git a/vendor/src/golang.org/x/net/http2/not_go16.go b/vendor/src/golang.org/x/net/http2/not_go16.go new file mode 100644 index 0000000000..db53c5b8cb --- /dev/null +++ b/vendor/src/golang.org/x/net/http2/not_go16.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.6 + +package http2 + +import "net/http" + +func configureTransport(t1 *http.Transport) (*Transport, error) { + return nil, errTransportVersion +} diff --git a/vendor/src/golang.org/x/net/http2/pipe.go b/vendor/src/golang.org/x/net/http2/pipe.go index ce9aad5336..69446e7a37 100644 --- a/vendor/src/golang.org/x/net/http2/pipe.go +++ b/vendor/src/golang.org/x/net/http2/pipe.go @@ -1,43 +1,147 @@ -// Copyright 2014 The Go Authors. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package http2 import ( + "errors" + "io" "sync" ) +// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like +// io.Pipe except there are no PipeReader/PipeWriter halves, and the +// underlying buffer is an interface. (io.Pipe is always unbuffered) type pipe struct { - b buffer - c sync.Cond - m sync.Mutex + mu sync.Mutex + c sync.Cond // c.L lazily initialized to &p.mu + b pipeBuffer + err error // read error once empty. non-nil means closed. + breakErr error // immediate read error (caller doesn't see rest of b) + donec chan struct{} // closed on error + readFn func() // optional code to run in Read before error +} + +type pipeBuffer interface { + Len() int + io.Writer + io.Reader } // Read waits until data is available and copies bytes // from the buffer into p. -func (r *pipe) Read(p []byte) (n int, err error) { - r.c.L.Lock() - defer r.c.L.Unlock() - for r.b.Len() == 0 && !r.b.closed { - r.c.Wait() +func (p *pipe) Read(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + for { + if p.breakErr != nil { + return 0, p.breakErr + } + if p.b.Len() > 0 { + return p.b.Read(d) + } + if p.err != nil { + if p.readFn != nil { + p.readFn() // e.g. copy trailers + p.readFn = nil // not sticky like p.err + } + return 0, p.err + } + p.c.Wait() } - return r.b.Read(p) } +var errClosedPipeWrite = errors.New("write on closed buffer") + // Write copies bytes from p into the buffer and wakes a reader. // It is an error to write more data than the buffer can hold. -func (w *pipe) Write(p []byte) (n int, err error) { - w.c.L.Lock() - defer w.c.L.Unlock() - defer w.c.Signal() - return w.b.Write(p) +func (p *pipe) Write(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if p.err != nil { + return 0, errClosedPipeWrite + } + return p.b.Write(d) } -func (c *pipe) Close(err error) { - c.c.L.Lock() - defer c.c.L.Unlock() - defer c.c.Signal() - c.b.Close(err) +// CloseWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err after all data has been +// read. +// +// The error must be non-nil. +func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } + +// BreakWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err immediately, without +// waiting for unread data. +func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } + +// closeWithErrorAndCode is like CloseWithError but also sets some code to run +// in the caller's goroutine before returning the error. +func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } + +func (p *pipe) closeWithError(dst *error, err error, fn func()) { + if err == nil { + panic("err must be non-nil") + } + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if *dst != nil { + // Already been done. + return + } + p.readFn = fn + *dst = err + p.closeDoneLocked() +} + +// requires p.mu be held. +func (p *pipe) closeDoneLocked() { + if p.donec == nil { + return + } + // Close if unclosed. This isn't racy since we always + // hold p.mu while closing. + select { + case <-p.donec: + default: + close(p.donec) + } +} + +// Err returns the error (if any) first set by BreakWithError or CloseWithError. +func (p *pipe) Err() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.breakErr != nil { + return p.breakErr + } + return p.err +} + +// Done returns a channel which is closed if and when this pipe is closed +// with CloseWithError. +func (p *pipe) Done() <-chan struct{} { + p.mu.Lock() + defer p.mu.Unlock() + if p.donec == nil { + p.donec = make(chan struct{}) + if p.err != nil || p.breakErr != nil { + // Already hit an error. + p.closeDoneLocked() + } + } + return p.donec } diff --git a/vendor/src/golang.org/x/net/http2/server.go b/vendor/src/golang.org/x/net/http2/server.go index 99cc673cc2..5f62b49e49 100644 --- a/vendor/src/golang.org/x/net/http2/server.go +++ b/vendor/src/golang.org/x/net/http2/server.go @@ -1,16 +1,13 @@ // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE // TODO: replace all <-sc.doneServing with reads from the stream's cw // instead, and make sure that on close we close all open // streams. then remove doneServing? -// TODO: finish GOAWAY support. Consider each incoming frame type and -// whether it should be ignored during a shutdown race. +// TODO: re-audit GOAWAY support. Consider each incoming frame type and +// whether it should be ignored during graceful shutdown. // TODO: disconnect idle clients. GFE seems to do 4 minutes. make // configurable? or maximum number of idle clients and remove the @@ -49,7 +46,11 @@ import ( "log" "net" "net/http" + "net/textproto" "net/url" + "os" + "reflect" + "runtime" "strconv" "strings" "sync" @@ -68,7 +69,8 @@ const ( var ( errClientDisconnected = errors.New("client disconnected") errClosedBody = errors.New("body closed by handler") - errStreamBroken = errors.New("http2: stream broken") + errHandlerComplete = errors.New("http2: request body closed due to handler exiting") + errStreamClosed = errors.New("http2: stream closed") ) var responseWriterStatePool = sync.Pool{ @@ -133,12 +135,33 @@ func (s *Server) maxConcurrentStreams() uint32 { // The configuration conf may be nil. // // ConfigureServer must be called before s begins serving. -func ConfigureServer(s *http.Server, conf *Server) { +func ConfigureServer(s *http.Server, conf *Server) error { if conf == nil { conf = new(Server) } + if s.TLSConfig == nil { s.TLSConfig = new(tls.Config) + } else if s.TLSConfig.CipherSuites != nil { + // If they already provided a CipherSuite list, return + // an error if it has a bad order or is missing + // ECDHE_RSA_WITH_AES_128_GCM_SHA256. + const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + haveRequired := false + sawBad := false + for i, cs := range s.TLSConfig.CipherSuites { + if cs == requiredCipher { + haveRequired = true + } + if isBadCipher(cs) { + sawBad = true + } else if sawBad { + return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) + } + } + if !haveRequired { + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256") + } } // Note: not setting MinVersion to tls.VersionTLS12, @@ -148,22 +171,7 @@ func ConfigureServer(s *http.Server, conf *Server) { // during next-proto selection, but using TLS <1.2 with // HTTP/2 is still the client's bug. - // Be sure we advertise tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - // at least. - // TODO: enable PreferServerCipherSuites? - if s.TLSConfig.CipherSuites != nil { - const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - haveRequired := false - for _, v := range s.TLSConfig.CipherSuites { - if v == requiredCipher { - haveRequired = true - break - } - } - if !haveRequired { - s.TLSConfig.CipherSuites = append(s.TLSConfig.CipherSuites, requiredCipher) - } - } + s.TLSConfig.PreferServerCipherSuites = true haveNPN := false for _, p := range s.TLSConfig.NextProtos { @@ -186,28 +194,76 @@ func ConfigureServer(s *http.Server, conf *Server) { if testHookOnConn != nil { testHookOnConn() } - conf.handleConn(hs, c, h) + conf.ServeConn(c, &ServeConnOpts{ + Handler: h, + BaseConfig: hs, + }) } s.TLSNextProto[NextProtoTLS] = protoHandler s.TLSNextProto["h2-14"] = protoHandler // temporary; see above. + return nil } -func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) { +// ServeConnOpts are options for the Server.ServeConn method. +type ServeConnOpts struct { + // BaseConfig optionally sets the base configuration + // for values. If nil, defaults are used. + BaseConfig *http.Server + + // Handler specifies which handler to use for processing + // requests. If nil, BaseConfig.Handler is used. If BaseConfig + // or BaseConfig.Handler is nil, http.DefaultServeMux is used. + Handler http.Handler +} + +func (o *ServeConnOpts) baseConfig() *http.Server { + if o != nil && o.BaseConfig != nil { + return o.BaseConfig + } + return new(http.Server) +} + +func (o *ServeConnOpts) handler() http.Handler { + if o != nil { + if o.Handler != nil { + return o.Handler + } + if o.BaseConfig != nil && o.BaseConfig.Handler != nil { + return o.BaseConfig.Handler + } + } + return http.DefaultServeMux +} + +// ServeConn serves HTTP/2 requests on the provided connection and +// blocks until the connection is no longer readable. +// +// ServeConn starts speaking HTTP/2 assuming that c has not had any +// reads or writes. It writes its initial settings frame and expects +// to be able to read the preface and settings frame from the +// client. If c has a ConnectionState method like a *tls.Conn, the +// ConnectionState is used to verify the TLS ciphersuite and to set +// the Request.TLS field in Handlers. +// +// ServeConn does not support h2c by itself. Any h2c support must be +// implemented in terms of providing a suitably-behaving net.Conn. +// +// The opts parameter is optional. If nil, default values are used. +func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { sc := &serverConn{ - srv: srv, - hs: hs, + srv: s, + hs: opts.baseConfig(), conn: c, remoteAddrStr: c.RemoteAddr().String(), bw: newBufferedWriter(c), - handler: h, + handler: opts.handler(), streams: make(map[uint32]*stream), - readFrameCh: make(chan frameAndGate), - readFrameErrCh: make(chan error, 1), // must be buffered for 1 + readFrameCh: make(chan readFrameResult), wantWriteFrameCh: make(chan frameWriteMsg, 8), - wroteFrameCh: make(chan struct{}, 1), // buffered; one send in reading goroutine - bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way + wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync + bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way doneServing: make(chan struct{}), - advMaxStreams: srv.maxConcurrentStreams(), + advMaxStreams: s.maxConcurrentStreams(), writeSched: writeScheduler{ maxFrameSize: initialMaxFrameSize, }, @@ -219,13 +275,14 @@ func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) { sc.flow.add(initialWindowSize) sc.inflow.add(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - sc.hpackDecoder = hpack.NewDecoder(initialHeaderTableSize, sc.onNewHeaderField) fr := NewFramer(sc.bw, c) - fr.SetMaxReadFrameSize(srv.maxReadFrameSize()) + fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.MaxHeaderListSize = sc.maxHeaderListSize() + fr.SetMaxReadFrameSize(s.maxReadFrameSize()) sc.framer = fr - if tc, ok := c.(*tls.Conn); ok { + if tc, ok := c.(connectionStater); ok { sc.tlsState = new(tls.ConnectionState) *sc.tlsState = tc.ConnectionState() // 9.2 Use of TLS Features @@ -255,7 +312,7 @@ func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) { // So for now, do nothing here again. } - if !srv.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." @@ -302,23 +359,13 @@ func isBadCipher(cipher uint16) bool { } func (sc *serverConn) rejectConn(err ErrCode, debug string) { - log.Printf("REJECTING conn: %v, %s", err, debug) + sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) // ignoring errors. hanging up anyway. sc.framer.WriteGoAway(0, err, []byte(debug)) sc.bw.Flush() sc.conn.Close() } -// frameAndGates coordinates the readFrames and serve -// goroutines. Because the Framer interface only permits the most -// recently-read Frame from being accessed, the readFrames goroutine -// blocks until it has a frame, passes it to serve, and then waits for -// serve to be done with it before reading the next one. -type frameAndGate struct { - f Frame - g gate -} - type serverConn struct { // Immutable: srv *Server @@ -327,17 +374,15 @@ type serverConn struct { bw *bufferedWriter // writing to conn handler http.Handler framer *Framer - hpackDecoder *hpack.Decoder - doneServing chan struct{} // closed when serverConn.serve ends - readFrameCh chan frameAndGate // written by serverConn.readFrames - readFrameErrCh chan error - wantWriteFrameCh chan frameWriteMsg // from handlers -> serve - wroteFrameCh chan struct{} // from writeFrameAsync -> serve, tickles more frame writes - bodyReadCh chan bodyReadMsg // from handlers -> serve - testHookCh chan func() // code to run on the serve loop - flow flow // conn-wide (not stream-specific) outbound flow control - inflow flow // conn-wide inbound flow control - tlsState *tls.ConnectionState // shared by all handlers, like net/http + doneServing chan struct{} // closed when serverConn.serve ends + readFrameCh chan readFrameResult // written by serverConn.readFrames + wantWriteFrameCh chan frameWriteMsg // from handlers -> serve + wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes + bodyReadCh chan bodyReadMsg // from handlers -> serve + testHookCh chan func(int) // code to run on the serve loop + flow flow // conn-wide (not stream-specific) outbound flow control + inflow flow // conn-wide inbound flow control + tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string // Everything following is owned by the serve loop; use serveG.check(): @@ -353,9 +398,8 @@ type serverConn struct { streams map[uint32]*stream initialWindowSize int32 headerTableSize uint32 - maxHeaderListSize uint32 // zero means unknown (default) + peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case - req requestParam // non-zero while reading request headers writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh needsFrameFlush bool // last frame write wasn't a flush writeSched writeScheduler @@ -364,24 +408,23 @@ type serverConn struct { goAwayCode ErrCode shutdownTimerCh <-chan time.Time // nil until used shutdownTimer *time.Timer // nil until used + freeRequestBodyBuf []byte // if non-nil, a free initialWindowSize buffer for getRequestBodyBuf // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer hpackEncoder *hpack.Encoder } -// requestParam is the state of the next request, initialized over -// potentially several frames HEADERS + zero or more CONTINUATION -// frames. -type requestParam struct { - // stream is non-nil if we're reading (HEADER or CONTINUATION) - // frames for a request (but not DATA). - stream *stream - header http.Header - method, path string - scheme, authority string - sawRegularHeader bool // saw a non-pseudo header already - invalidHeader bool // an invalid header was seen +func (sc *serverConn) maxHeaderListSize() uint32 { + n := sc.hs.MaxHeaderBytes + if n <= 0 { + n = http.DefaultMaxHeaderBytes + } + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return uint32(n + typicalHeaders*perFieldOverhead) } // stream represents a stream. This is the minimal metadata needed by @@ -393,20 +436,27 @@ type requestParam struct { // responseWriter's state field. type stream struct { // immutable: + sc *serverConn id uint32 body *pipe // non-nil if expecting DATA frames cw closeWaiter // closed wait stream transitions to closed state // owned by serverConn's serve loop: - bodyBytes int64 // body bytes seen so far - declBodyBytes int64 // or -1 if undeclared - flow flow // limits writing from Handler to client - inflow flow // what the client is allowed to POST/etc to us - parent *stream // or nil - weight uint8 - state streamState - sentReset bool // only true once detached from streams map - gotReset bool // only true once detacted from streams map + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow flow // limits writing from Handler to client + inflow flow // what the client is allowed to POST/etc to us + parent *stream // or nil + numTrailerValues int64 + weight uint8 + state streamState + sentReset bool // only true once detached from streams map + gotReset bool // only true once detacted from streams map + gotTrailerHeader bool // HEADER frame for trailers was seen + reqBuf []byte + + trailer http.Header // accumulated trailers + reqTrailer http.Header // handler's Request.Trailer } func (sc *serverConn) Framer() *Framer { return sc.framer } @@ -434,6 +484,15 @@ func (sc *serverConn) state(streamID uint32) (streamState, *stream) { return stateIdle, nil } +// setConnState calls the net/http ConnState hook for this connection, if configured. +// Note that the net/http package does StateNew and StateClosed for us. +// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. +func (sc *serverConn) setConnState(state http.ConnState) { + if sc.hs.ConnState != nil { + sc.hs.ConnState(sc.conn, state) + } +} + func (sc *serverConn) vlogf(format string, args ...interface{}) { if VerboseLogs { sc.logf(format, args...) @@ -448,12 +507,55 @@ func (sc *serverConn) logf(format string, args ...interface{}) { } } +// errno returns v's underlying uintptr, else 0. +// +// TODO: remove this helper function once http2 can use build +// tags. See comment in isClosedConnError. +func errno(v error) uintptr { + if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { + return uintptr(rv.Uint()) + } + return 0 +} + +// isClosedConnError reports whether err is an error from use of a closed +// network connection. +func isClosedConnError(err error) bool { + if err == nil { + return false + } + + // TODO: remove this string search and be more like the Windows + // case below. That might involve modifying the standard library + // to return better error types. + str := err.Error() + if strings.Contains(str, "use of closed network connection") { + return true + } + + // TODO(bradfitz): x/tools/cmd/bundle doesn't really support + // build tags, so I can't make an http2_windows.go file with + // Windows-specific stuff. Fix that and move this, once we + // have a way to bundle this into std's net/http somehow. + if runtime.GOOS == "windows" { + if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { + if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { + const WSAECONNABORTED = 10053 + const WSAECONNRESET = 10054 + if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { + return true + } + } + } + } + return false +} + func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { if err == nil { return } - str := err.Error() - if err == io.EOF || strings.Contains(str, "use of closed network connection") { + if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { // Boring, expected errors. sc.vlogf(format, args...) } else { @@ -461,57 +563,6 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { } } -func (sc *serverConn) onNewHeaderField(f hpack.HeaderField) { - sc.serveG.check() - sc.vlogf("got header field %+v", f) - switch { - case !validHeader(f.Name): - sc.req.invalidHeader = true - case strings.HasPrefix(f.Name, ":"): - if sc.req.sawRegularHeader { - sc.logf("pseudo-header after regular header") - sc.req.invalidHeader = true - return - } - var dst *string - switch f.Name { - case ":method": - dst = &sc.req.method - case ":path": - dst = &sc.req.path - case ":scheme": - dst = &sc.req.scheme - case ":authority": - dst = &sc.req.authority - default: - // 8.1.2.1 Pseudo-Header Fields - // "Endpoints MUST treat a request or response - // that contains undefined or invalid - // pseudo-header fields as malformed (Section - // 8.1.2.6)." - sc.logf("invalid pseudo-header %q", f.Name) - sc.req.invalidHeader = true - return - } - if *dst != "" { - sc.logf("duplicate pseudo-header %q sent", f.Name) - sc.req.invalidHeader = true - return - } - *dst = f.Value - case f.Name == "cookie": - sc.req.sawRegularHeader = true - if s, ok := sc.req.header["Cookie"]; ok && len(s) == 1 { - s[0] = s[0] + "; " + f.Value - } else { - sc.req.header.Add("Cookie", f.Value) - } - default: - sc.req.sawRegularHeader = true - sc.req.header.Add(sc.canonicalHeader(f.Name), f.Value) - } -} - func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() cv, ok := commonCanonHeader[v] @@ -530,41 +581,54 @@ func (sc *serverConn) canonicalHeader(v string) string { return cv } +type readFrameResult struct { + f Frame // valid until readMore is called + err error + + // readMore should be called once the consumer no longer needs or + // retains f. After readMore, f is invalid and more frames can be + // read. + readMore func() +} + // readFrames is the loop that reads incoming frames. +// It takes care to only read one frame at a time, blocking until the +// consumer is done with the frame. // It's run on its own goroutine. func (sc *serverConn) readFrames() { - g := make(gate, 1) + gate := make(gate) + gateDone := gate.Done for { f, err := sc.framer.ReadFrame() - if err != nil { - sc.readFrameErrCh <- err - close(sc.readFrameCh) + select { + case sc.readFrameCh <- readFrameResult{f, err, gateDone}: + case <-sc.doneServing: + return + } + select { + case <-gate: + case <-sc.doneServing: + return + } + if terminalReadFrameError(err) { return } - sc.readFrameCh <- frameAndGate{f, g} - // We can't read another frame until this one is - // processed, as the ReadFrame interface doesn't copy - // memory. The Frame accessor methods access the last - // frame's (shared) buffer. So we wait for the - // serve goroutine to tell us it's done: - g.Wait() } } +// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. +type frameWriteResult struct { + wm frameWriteMsg // what was written (or attempted) + err error // result of the writeFrame call +} + // writeFrameAsync runs in its own goroutine and writes a single frame // and then reports when it's done. // At most one goroutine can be running writeFrameAsync at a time per // serverConn. func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) { err := wm.write.writeFrame(sc) - if ch := wm.done; ch != nil { - select { - case ch <- err: - default: - panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write)) - } - } - sc.wroteFrameCh <- struct{}{} // tickle frame selection scheduler + sc.wroteFrameCh <- frameWriteResult{wm, err} } func (sc *serverConn) closeAllStreamsOnConnClose() { @@ -582,6 +646,7 @@ func (sc *serverConn) stopShutdownTimer() { } func (sc *serverConn) notePanic() { + // Note: this is for serverConn.serve panicking, not http.Handler code. if testHookOnPanicMu != nil { testHookOnPanicMu.Lock() defer testHookOnPanicMu.Unlock() @@ -603,12 +668,15 @@ func (sc *serverConn) serve() { defer sc.stopShutdownTimer() defer close(sc.doneServing) // unblocks handlers trying to send - sc.vlogf("HTTP/2 connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) + if VerboseLogs { + sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) + } sc.writeFrame(frameWriteMsg{ write: writeSettings{ {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, // TODO: more actual settings, notably // SettingInitialWindowSize, but then we also @@ -619,30 +687,32 @@ func (sc *serverConn) serve() { sc.unackedSettings++ if err := sc.readPreface(); err != nil { - sc.condlogf(err, "error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) + sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) return } + // Now that we've got the preface, get us out of the + // "StateNew" state. We can't go directly to idle, though. + // Active means we read some data and anticipate a request. We'll + // do another Active when we get a HEADERS frame. + sc.setConnState(http.StateActive) + sc.setConnState(http.StateIdle) go sc.readFrames() // closed by defer sc.conn.Close above settingsTimer := time.NewTimer(firstSettingsTimeout) + loopNum := 0 for { + loopNum++ select { case wm := <-sc.wantWriteFrameCh: sc.writeFrame(wm) - case <-sc.wroteFrameCh: - if sc.writingFrame != true { - panic("internal error: expected to be already writing a frame") - } - sc.writingFrame = false - sc.scheduleFrameWrite() - case fg, ok := <-sc.readFrameCh: - if !ok { - sc.readFrameCh = nil - } - if !sc.processFrameFromReader(fg, ok) { + case res := <-sc.wroteFrameCh: + sc.wroteFrame(res) + case res := <-sc.readFrameCh: + if !sc.processFrameFromReader(res) { return } + res.readMore() if settingsTimer.C != nil { settingsTimer.Stop() settingsTimer.C = nil @@ -656,7 +726,7 @@ func (sc *serverConn) serve() { sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) return case fn := <-sc.testHookCh: - fn() + fn(loopNum) } } } @@ -683,38 +753,62 @@ func (sc *serverConn) readPreface() error { return errors.New("timeout waiting for client preface") case err := <-errc: if err == nil { - sc.vlogf("client %v said hello", sc.conn.RemoteAddr()) + if VerboseLogs { + sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) + } } return err } } -// writeDataFromHandler writes the data described in req to stream.id. -// -// The provided ch is used to avoid allocating new channels for each -// write operation. It's expected that the caller reuses writeData and ch -// over time. -// -// The flow control currently happens in the Handler where it waits -// for 1 or more bytes to be available to then write here. So at this -// point we know that we have flow control. But this might have to -// change when priority is implemented, so the serve goroutine knows -// the total amount of bytes waiting to be sent and can can have more -// scheduling decisions available. -func (sc *serverConn) writeDataFromHandler(stream *stream, writeData *writeData, ch chan error) error { - sc.writeFrameFromHandler(frameWriteMsg{ - write: writeData, +var errChanPool = sync.Pool{ + New: func() interface{} { return make(chan error, 1) }, +} + +var writeDataPool = sync.Pool{ + New: func() interface{} { return new(writeData) }, +} + +// writeDataFromHandler writes DATA response frames from a handler on +// the given stream. +func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { + ch := errChanPool.Get().(chan error) + writeArg := writeDataPool.Get().(*writeData) + *writeArg = writeData{stream.id, data, endStream} + err := sc.writeFrameFromHandler(frameWriteMsg{ + write: writeArg, stream: stream, done: ch, }) - select { - case err := <-ch: + if err != nil { return err + } + var frameWriteDone bool // the frame write is done (successfully or not) + select { + case err = <-ch: + frameWriteDone = true case <-sc.doneServing: return errClientDisconnected case <-stream.cw: - return errStreamBroken + // If both ch and stream.cw were ready (as might + // happen on the final Write after an http.Handler + // ends), prefer the write result. Otherwise this + // might just be us successfully closing the stream. + // The writeFrameAsync and serve goroutines guarantee + // that the ch send will happen before the stream.cw + // close. + select { + case err = <-ch: + frameWriteDone = true + default: + return errStreamClosed + } } + errChanPool.Put(ch) + if frameWriteDone { + writeDataPool.Put(writeArg) + } + return err } // writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts @@ -724,12 +818,15 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, writeData *writeData, // deadlock writing to sc.wantWriteFrameCh (which is only mildly // buffered and is read by serve itself). If you're on the serve // goroutine, call writeFrame instead. -func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) { +func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error { sc.serveG.checkNotOn() // NOT select { case sc.wantWriteFrameCh <- wm: + return nil case <-sc.doneServing: + // Serve loop is gone. // Client has closed their connection to the server. + return errClientDisconnected } } @@ -755,7 +852,6 @@ func (sc *serverConn) startFrameWrite(wm frameWriteMsg) { if sc.writingFrame { panic("internal error: can only be writing one frame at a time") } - sc.writingFrame = true st := wm.stream if st != nil { @@ -764,16 +860,53 @@ func (sc *serverConn) startFrameWrite(wm frameWriteMsg) { panic("internal error: attempt to send frame on half-closed-local stream") case stateClosed: if st.sentReset || st.gotReset { - // Skip this frame. But fake the frame write to reschedule: - sc.wroteFrameCh <- struct{}{} + // Skip this frame. + sc.scheduleFrameWrite() return } panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm)) } } + sc.writingFrame = true sc.needsFrameFlush = true - if endsStream(wm.write) { + go sc.writeFrameAsync(wm) +} + +// errHandlerPanicked is the error given to any callers blocked in a read from +// Request.Body when the main goroutine panics. Since most handlers read in the +// the main ServeHTTP goroutine, this will show up rarely. +var errHandlerPanicked = errors.New("http2: handler panicked") + +// wroteFrame is called on the serve goroutine with the result of +// whatever happened on writeFrameAsync. +func (sc *serverConn) wroteFrame(res frameWriteResult) { + sc.serveG.check() + if !sc.writingFrame { + panic("internal error: expected to be already writing a frame") + } + sc.writingFrame = false + + wm := res.wm + st := wm.stream + + closeStream := endsStream(wm.write) + + if _, ok := wm.write.(handlerPanicRST); ok { + sc.closeStream(st, errHandlerPanicked) + } + + // Reply (if requested) to the blocked ServeHTTP goroutine. + if ch := wm.done; ch != nil { + select { + case ch <- res.err: + default: + panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write)) + } + } + wm.write = nil // prevent use (assume it's tainted after wm.done send) + + if closeStream { if st == nil { panic("internal error: expecting non-nil stream") } @@ -791,10 +924,11 @@ func (sc *serverConn) startFrameWrite(wm frameWriteMsg) { errCancel := StreamError{st.id, ErrCodeCancel} sc.resetStream(errCancel) case stateHalfClosedRemote: - sc.closeStream(st, nil) + sc.closeStream(st, errHandlerComplete) } } - go sc.writeFrameAsync(wm) + + sc.scheduleFrameWrite() } // scheduleFrameWrite tickles the frame writing scheduler. @@ -874,32 +1008,18 @@ func (sc *serverConn) resetStream(se StreamError) { } } -// curHeaderStreamID returns the stream ID of the header block we're -// currently in the middle of reading. If this returns non-zero, the -// next frame must be a CONTINUATION with this stream id. -func (sc *serverConn) curHeaderStreamID() uint32 { - sc.serveG.check() - st := sc.req.stream - if st == nil { - return 0 - } - return st.id -} - // processFrameFromReader processes the serve loop's read from readFrameCh from the // frame-reading goroutine. // processFrameFromReader returns whether the connection should be kept open. -func (sc *serverConn) processFrameFromReader(fg frameAndGate, fgValid bool) bool { +func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { sc.serveG.check() - var clientGone bool - var err error - if !fgValid { - err = <-sc.readFrameErrCh + err := res.err + if err != nil { if err == ErrFrameTooLarge { sc.goAway(ErrCodeFrameSize) return true // goAway will close the loop } - clientGone = err == io.EOF || strings.Contains(err.Error(), "use of closed network connection") + clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) if clientGone { // TODO: could we also get into this state if // the peer does a half close @@ -911,13 +1031,12 @@ func (sc *serverConn) processFrameFromReader(fg frameAndGate, fgValid bool) bool // just for testing we could have a non-TLS mode. return false } - } - - if fgValid { - f := fg.f - sc.vlogf("got %v: %#v", f.Header(), f) + } else { + f := res.f + if VerboseLogs { + sc.vlogf("http2: server read frame %v", summarizeFrame(f)) + } err = sc.processFrame(f) - fg.g.Done() // unblock the readFrames goroutine if err == nil { return true } @@ -931,17 +1050,17 @@ func (sc *serverConn) processFrameFromReader(fg frameAndGate, fgValid bool) bool sc.goAway(ErrCodeFlowControl) return true case ConnectionError: - sc.logf("%v: %v", sc.conn.RemoteAddr(), ev) + sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) sc.goAway(ErrCode(ev)) return true // goAway will handle shutdown default: - if !fgValid { - sc.logf("disconnecting; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) + if res.err != nil { + sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) } else { - sc.logf("disconnection due to other error: %v", err) + sc.logf("http2: server closing client connection: %v", err) } + return false } - return false } func (sc *serverConn) processFrame(f Frame) error { @@ -955,21 +1074,11 @@ func (sc *serverConn) processFrame(f Frame) error { sc.sawFirstSettings = true } - if s := sc.curHeaderStreamID(); s != 0 { - if cf, ok := f.(*ContinuationFrame); !ok { - return ConnectionError(ErrCodeProtocol) - } else if cf.Header().StreamID != s { - return ConnectionError(ErrCodeProtocol) - } - } - switch f := f.(type) { case *SettingsFrame: return sc.processSettings(f) - case *HeadersFrame: + case *MetaHeadersFrame: return sc.processHeaders(f) - case *ContinuationFrame: - return sc.processContinuation(f) case *WindowUpdateFrame: return sc.processWindowUpdate(f) case *PingFrame: @@ -985,14 +1094,14 @@ func (sc *serverConn) processFrame(f Frame) error { // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. return ConnectionError(ErrCodeProtocol) default: - log.Printf("Ignoring frame: %v", f.Header()) + sc.vlogf("http2: server ignoring frame: %v", f.Header()) return nil } } func (sc *serverConn) processPing(f *PingFrame) error { sc.serveG.check() - if f.Flags.Has(FlagSettingsAck) { + if f.IsAck() { // 6.7 PING: " An endpoint MUST NOT respond to PING frames // containing this flag." return nil @@ -1060,12 +1169,27 @@ func (sc *serverConn) closeStream(st *stream, err error) { } st.state = stateClosed sc.curOpenStreams-- + if sc.curOpenStreams == 0 { + sc.setConnState(http.StateIdle) + } delete(sc.streams, st.id) if p := st.body; p != nil { - p.Close(err) + p.CloseWithError(err) } st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc sc.writeSched.forgetStream(st.id) + if st.reqBuf != nil { + // Stash this request body buffer (64k) away for reuse + // by a future POST/PUT/etc. + // + // TODO(bradfitz): share on the server? sync.Pool? + // Server requires locks and might hurt contention. + // sync.Pool might work, or might be worse, depending + // on goroutine CPU migrations. (get and put on + // separate CPUs). Maybe a mix of strategies. But + // this is an easy win for now. + sc.freeRequestBodyBuf = st.reqBuf + } } func (sc *serverConn) processSettings(f *SettingsFrame) error { @@ -1093,7 +1217,9 @@ func (sc *serverConn) processSetting(s Setting) error { if err := s.Valid(); err != nil { return err } - sc.vlogf("processing setting %v", s) + if VerboseLogs { + sc.vlogf("http2: server processing setting %v", s) + } switch s.ID { case SettingHeaderTableSize: sc.headerTableSize = s.Val @@ -1107,11 +1233,14 @@ func (sc *serverConn) processSetting(s Setting) error { case SettingMaxFrameSize: sc.writeSched.maxFrameSize = s.Val case SettingMaxHeaderListSize: - sc.maxHeaderListSize = s.Val + sc.peerMaxHeaderListSize = s.Val default: // Unknown setting: "An endpoint that receives a SETTINGS // frame with any unknown or unsupported identifier MUST // ignore that setting." + if VerboseLogs { + sc.vlogf("http2: server ignoring unknown setting %v", s) + } } return nil } @@ -1151,7 +1280,7 @@ func (sc *serverConn) processData(f *DataFrame) error { // with a stream error (Section 5.4.2) of type STREAM_CLOSED." id := f.Header().StreamID st, ok := sc.streams[id] - if !ok || st.state != stateOpen { + if !ok || st.state != stateOpen || st.gotTrailerHeader { // This includes sending a RST_STREAM if the stream is // in stateHalfClosedLocal (which currently means that // the http.Handler returned, so it's done reading & @@ -1166,7 +1295,7 @@ func (sc *serverConn) processData(f *DataFrame) error { // Sender sending more than they'd declared? if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { - st.body.Close(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) + st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) return StreamError{id, ErrCodeStreamClosed} } if len(data) > 0 { @@ -1185,18 +1314,39 @@ func (sc *serverConn) processData(f *DataFrame) error { st.bodyBytes += int64(len(data)) } if f.StreamEnded() { - if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { - st.body.Close(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", - st.declBodyBytes, st.bodyBytes)) - } else { - st.body.Close(io.EOF) - } - st.state = stateHalfClosedRemote + st.endStream() } return nil } -func (sc *serverConn) processHeaders(f *HeadersFrame) error { +// endStream closes a Request.Body's pipe. It is called when a DATA +// frame says a request body is over (or after trailers). +func (st *stream) endStream() { + sc := st.sc + sc.serveG.check() + + if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { + st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", + st.declBodyBytes, st.bodyBytes)) + } else { + st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) + st.body.CloseWithError(io.EOF) + } + st.state = stateHalfClosedRemote +} + +// copyTrailersToHandlerRequest is run in the Handler's goroutine in +// its Request.Body.Read just before it gets io.EOF. +func (st *stream) copyTrailersToHandlerRequest() { + for k, vv := range st.trailer { + if _, ok := st.reqTrailer[k]; ok { + // Only copy it over it was pre-declared. + st.reqTrailer[k] = vv + } + } +} + +func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { sc.serveG.check() id := f.Header().StreamID if sc.inGoAway { @@ -1204,20 +1354,34 @@ func (sc *serverConn) processHeaders(f *HeadersFrame) error { return nil } // http://http2.github.io/http2-spec/#rfc.section.5.1.1 - if id%2 != 1 || id <= sc.maxStreamID || sc.req.stream != nil { - // Streams initiated by a client MUST use odd-numbered - // stream identifiers. [...] The identifier of a newly - // established stream MUST be numerically greater than all - // streams that the initiating endpoint has opened or - // reserved. [...] An endpoint that receives an unexpected - // stream identifier MUST respond with a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. + // Streams initiated by a client MUST use odd-numbered stream + // identifiers. [...] An endpoint that receives an unexpected + // stream identifier MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + if id%2 != 1 { return ConnectionError(ErrCodeProtocol) } - if id > sc.maxStreamID { - sc.maxStreamID = id + // A HEADERS frame can be used to create a new stream or + // send a trailer for an open one. If we already have a stream + // open, let it process its own HEADERS frame (trailers at this + // point, if it's valid). + st := sc.streams[f.Header().StreamID] + if st != nil { + return st.processTrailerHeaders(f) } - st := &stream{ + + // [...] The identifier of a newly established stream MUST be + // numerically greater than all streams that the initiating + // endpoint has opened or reserved. [...] An endpoint that + // receives an unexpected stream identifier MUST respond with + // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + if id <= sc.maxStreamID { + return ConnectionError(ErrCodeProtocol) + } + sc.maxStreamID = id + + st = &stream{ + sc: sc, id: id, state: stateOpen, } @@ -1236,36 +1400,9 @@ func (sc *serverConn) processHeaders(f *HeadersFrame) error { adjustStreamPriority(sc.streams, st.id, f.Priority) } sc.curOpenStreams++ - sc.req = requestParam{ - stream: st, - header: make(http.Header), + if sc.curOpenStreams == 1 { + sc.setConnState(http.StateActive) } - return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded()) -} - -func (sc *serverConn) processContinuation(f *ContinuationFrame) error { - sc.serveG.check() - st := sc.streams[f.Header().StreamID] - if st == nil || sc.curHeaderStreamID() != st.id { - return ConnectionError(ErrCodeProtocol) - } - return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded()) -} - -func (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bool) error { - sc.serveG.check() - if _, err := sc.hpackDecoder.Write(frag); err != nil { - // TODO: convert to stream error I assume? - return err - } - if !end { - return nil - } - if err := sc.hpackDecoder.Close(); err != nil { - // TODO: convert to stream error I assume? - return err - } - defer sc.resetPendingRequest() if sc.curOpenStreams > sc.advMaxStreams { // "Endpoints MUST NOT exceed the limit set by their // peer. An endpoint that receives a HEADERS frame @@ -1285,13 +1422,48 @@ func (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bo return StreamError{st.id, ErrCodeRefusedStream} } - rw, req, err := sc.newWriterAndRequest() + rw, req, err := sc.newWriterAndRequest(st, f) if err != nil { return err } + st.reqTrailer = req.Trailer + if st.reqTrailer != nil { + st.trailer = make(http.Header) + } st.body = req.Body.(*requestBody).pipe // may be nil st.declBodyBytes = req.ContentLength - go sc.runHandler(rw, req) + + handler := sc.handler.ServeHTTP + if f.Truncated { + // Their header list was too long. Send a 431 error. + handler = handleHeaderListTooLong + } + + go sc.runHandler(rw, req, handler) + return nil +} + +func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { + sc := st.sc + sc.serveG.check() + if st.gotTrailerHeader { + return ConnectionError(ErrCodeProtocol) + } + st.gotTrailerHeader = true + if !f.StreamEnded() { + return StreamError{st.id, ErrCodeProtocol} + } + + if len(f.PseudoFields()) > 0 { + return StreamError{st.id, ErrCodeProtocol} + } + if st.trailer != nil { + for _, hf := range f.RegularFields() { + key := sc.canonicalHeader(hf.Name) + st.trailer[key] = append(st.trailer[key], hf.Value) + } + } + st.endStream() return nil } @@ -1336,19 +1508,21 @@ func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority } } -// resetPendingRequest zeros out all state related to a HEADERS frame -// and its zero or more CONTINUATION frames sent to start a new -// request. -func (sc *serverConn) resetPendingRequest() { +func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { sc.serveG.check() - sc.req = requestParam{} -} -func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, error) { - sc.serveG.check() - rp := &sc.req - if rp.invalidHeader || rp.method == "" || rp.path == "" || - (rp.scheme != "https" && rp.scheme != "http") { + method := f.PseudoValue("method") + path := f.PseudoValue("path") + scheme := f.PseudoValue("scheme") + authority := f.PseudoValue("authority") + + isConnect := method == "CONNECT" + if isConnect { + if path != "" || scheme != "" || authority == "" { + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + } + } else if method == "" || path == "" || + (scheme != "https" && scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: // // Malformed requests or responses that are detected @@ -1359,52 +1533,99 @@ func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, err // "All HTTP/2 requests MUST include exactly one valid // value for the :method, :scheme, and :path // pseudo-header fields" - return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + } + + bodyOpen := !f.StreamEnded() + if method == "HEAD" && bodyOpen { + // HEAD requests can't have bodies + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} } var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { + + if scheme == "https" { tlsState = sc.tlsState } - authority := rp.authority + + header := make(http.Header) + for _, hf := range f.RegularFields() { + header.Add(sc.canonicalHeader(hf.Name), hf.Value) + } + if authority == "" { - authority = rp.header.Get("Host") + authority = header.Get("Host") } - needsContinue := rp.header.Get("Expect") == "100-continue" + needsContinue := header.Get("Expect") == "100-continue" if needsContinue { - rp.header.Del("Expect") + header.Del("Expect") } - bodyOpen := rp.stream.state == stateOpen + // Merge Cookie headers into one "; "-delimited value. + if cookies := header["Cookie"]; len(cookies) > 1 { + header.Set("Cookie", strings.Join(cookies, "; ")) + } + + // Setup Trailers + var trailer http.Header + for _, v := range header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = http.CanonicalHeaderKey(strings.TrimSpace(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(http.Header) + } + trailer[key] = nil + } + } + } + delete(header, "Trailer") + body := &requestBody{ conn: sc, - stream: rp.stream, + stream: st, needsContinue: needsContinue, } - // TODO: handle asterisk '*' requests + test - url, err := url.ParseRequestURI(rp.path) - if err != nil { - // TODO: find the right error code? - return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} + var url_ *url.URL + var requestURI string + if isConnect { + url_ = &url.URL{Host: authority} + requestURI = authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(path) + if err != nil { + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + } + requestURI = path } req := &http.Request{ - Method: rp.method, - URL: url, + Method: method, + URL: url_, RemoteAddr: sc.remoteAddrStr, - Header: rp.header, - RequestURI: rp.path, + Header: header, + RequestURI: requestURI, Proto: "HTTP/2.0", ProtoMajor: 2, ProtoMinor: 0, TLS: tlsState, Host: authority, Body: body, + Trailer: trailer, } if bodyOpen { - body.pipe = &pipe{ - b: buffer{buf: make([]byte, initialWindowSize)}, // TODO: share/remove XXX - } - body.pipe.c.L = &body.pipe.m + // Disabled, per golang.org/issue/14960: + // st.reqBuf = sc.getRequestBodyBuf() + // TODO: remove this 64k of garbage per request (again, but without a data race): + buf := make([]byte, initialWindowSize) - if vv, ok := rp.header["Content-Length"]; ok { + body.pipe = &pipe{ + b: &fixedBuffer{buf: buf}, + } + + if vv, ok := header["Content-Length"]; ok { req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) } else { req.ContentLength = -1 @@ -1417,25 +1638,59 @@ func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, err rws.conn = sc rws.bw = bwSave rws.bw.Reset(chunkWriter{rws}) - rws.stream = rp.stream + rws.stream = st rws.req = req rws.body = body - rws.frameWriteCh = make(chan error, 1) rw := &responseWriter{rws: rws} return rw, req, nil } +func (sc *serverConn) getRequestBodyBuf() []byte { + sc.serveG.check() + if buf := sc.freeRequestBodyBuf; buf != nil { + sc.freeRequestBodyBuf = nil + return buf + } + return make([]byte, initialWindowSize) +} + // Run on its own goroutine. -func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request) { - defer rw.handlerDone() - // TODO: catch panics like net/http.Server - sc.handler.ServeHTTP(rw, req) +func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + didPanic := true + defer func() { + if didPanic { + e := recover() + // Same as net/http: + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + sc.writeFrameFromHandler(frameWriteMsg{ + write: handlerPanicRST{rw.rws.stream.id}, + stream: rw.rws.stream, + }) + sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) + return + } + rw.handlerDone() + }() + handler(rw, req) + didPanic = false +} + +func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { + // 10.5.1 Limits on Header Block Size: + // .. "A server that receives a larger header block than it is + // willing to handle can send an HTTP 431 (Request Header Fields Too + // Large) status code" + const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ + w.WriteHeader(statusRequestHeaderFieldsTooLarge) + io.WriteString(w, "

HTTP Error 431

Request Header Field(s) Too Large

") } // called from handler goroutines. // h may be nil. -func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders, tempCh chan error) { +func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { sc.serveG.checkNotOn() // NOT on var errc chan error if headerData.h != nil { @@ -1443,22 +1698,27 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders, temp // waiting for this frame to be written, so an http.Flush mid-handler // writes out the correct value of keys, before a handler later potentially // mutates it. - errc = tempCh + errc = errChanPool.Get().(chan error) } - sc.writeFrameFromHandler(frameWriteMsg{ + if err := sc.writeFrameFromHandler(frameWriteMsg{ write: headerData, stream: st, done: errc, - }) + }); err != nil { + return err + } if errc != nil { select { - case <-errc: - // Ignore. Just for synchronization. - // Any error will be handled in the writing goroutine. + case err := <-errc: + errChanPool.Put(errc) + return err case <-sc.doneServing: - // Client has closed the connection. + return errClientDisconnected + case <-st.cw: + return errStreamClosed } } + return nil } // called from handler goroutines. @@ -1481,7 +1741,10 @@ type bodyReadMsg struct { // and schedules flow control tokens to be sent. func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) { sc.serveG.checkNotOn() // NOT on - sc.bodyReadCh <- bodyReadMsg{st, n} + select { + case sc.bodyReadCh <- bodyReadMsg{st, n}: + case <-sc.doneServing: + } } func (sc *serverConn) noteBodyRead(st *stream, n int) { @@ -1548,7 +1811,7 @@ type requestBody struct { func (b *requestBody) Close() error { if b.pipe != nil { - b.pipe.Close(errClosedBody) + b.pipe.CloseWithError(errClosedBody) } b.closed = true return nil @@ -1599,12 +1862,14 @@ type responseWriterState struct { // mutated by http.Handler goroutine: handlerHeader http.Header // nil until called snapHeader http.Header // snapshot of handlerHeader at WriteHeader time + trailers []string // set in writeChunk status int // status code passed to WriteHeader wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished - curWrite writeData - frameWriteCh chan error // re-used whenever we need to block on a frame being written + + sentContentLen int64 // non-zero if handler set a Content-Length header + wroteBytes int64 closeNotifierMu sync.Mutex // guards closeNotifierCh closeNotifierCh chan bool // nil until first used @@ -1614,6 +1879,23 @@ type chunkWriter struct{ rws *responseWriterState } func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } +func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } + +// declareTrailer is called for each Trailer header when the +// response header is written. It notes that a header will need to be +// written in the trailers at the end of the response. +func (rws *responseWriterState) declareTrailer(k string) { + k = http.CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Content-Length", "Trailer": + // Forbidden by RFC 2616 14.40. + return + } + if !strSliceContains(rws.trailers, k) { + rws.trailers = append(rws.trailers, k) + } +} + // writeChunk writes chunks from the bufio.Writer. But because // bufio.Writer may bypass its chunking, sometimes p may be // arbitrarily large. @@ -1624,41 +1906,137 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if !rws.wroteHeader { rws.writeHeader(200) } + + isHeadResp := rws.req.Method == "HEAD" if !rws.sentHeader { rws.sentHeader = true - var ctype, clen string // implicit ones, if we can calculate it - if rws.handlerDone && rws.snapHeader.Get("Content-Length") == "" { + var ctype, clen string + if clen = rws.snapHeader.Get("Content-Length"); clen != "" { + rws.snapHeader.Del("Content-Length") + clen64, err := strconv.ParseInt(clen, 10, 64) + if err == nil && clen64 >= 0 { + rws.sentContentLen = clen64 + } else { + clen = "" + } + } + if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { clen = strconv.Itoa(len(p)) } - if rws.snapHeader.Get("Content-Type") == "" { + _, hasContentType := rws.snapHeader["Content-Type"] + if !hasContentType && bodyAllowedForStatus(rws.status) { ctype = http.DetectContentType(p) } - endStream := rws.handlerDone && len(p) == 0 - rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + var date string + if _, ok := rws.snapHeader["Date"]; !ok { + // TODO(bradfitz): be faster here, like net/http? measure. + date = time.Now().UTC().Format(http.TimeFormat) + } + + for _, v := range rws.snapHeader["Trailer"] { + foreachHeaderElement(v, rws.declareTrailer) + } + + endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, httpResCode: rws.status, h: rws.snapHeader, endStream: endStream, contentType: ctype, contentLength: clen, - }, rws.frameWriteCh) + date: date, + }) + if err != nil { + return 0, err + } if endStream { return 0, nil } } + if isHeadResp { + return len(p), nil + } if len(p) == 0 && !rws.handlerDone { return 0, nil } - curWrite := &rws.curWrite - curWrite.streamID = rws.stream.id - curWrite.p = p - curWrite.endStream = rws.handlerDone - if err := rws.conn.writeDataFromHandler(rws.stream, curWrite, rws.frameWriteCh); err != nil { - return 0, err + + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + + endStream := rws.handlerDone && !rws.hasTrailers() + if len(p) > 0 || endStream { + // only send a 0 byte DATA frame if we're ending the stream. + if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + return 0, err + } + } + + if rws.handlerDone && rws.hasTrailers() { + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + h: rws.handlerHeader, + trailers: rws.trailers, + endStream: true, + }) + return len(p), err } return len(p), nil } +// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys +// that, if present, signals that the map entry is actually for +// the response trailers, and not the response headers. The prefix +// is stripped after the ServeHTTP call finishes and the values are +// sent in the trailers. +// +// This mechanism is intended only for trailers that are not known +// prior to the headers being written. If the set of trailers is fixed +// or known before the header is written, the normal Go trailers mechanism +// is preferred: +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +const TrailerPrefix = "Trailer:" + +// promoteUndeclaredTrailers permits http.Handlers to set trailers +// after the header has already been flushed. Because the Go +// ResponseWriter interface has no way to set Trailers (only the +// Header), and because we didn't want to expand the ResponseWriter +// interface, and because nobody used trailers, and because RFC 2616 +// says you SHOULD (but not must) predeclare any trailers in the +// header, the official ResponseWriter rules said trailers in Go must +// be predeclared, and then we reuse the same ResponseWriter.Header() +// map to mean both Headers and Trailers. When it's time to write the +// Trailers, we pick out the fields of Headers that were declared as +// trailers. That worked for a while, until we found the first major +// user of Trailers in the wild: gRPC (using them only over http2), +// and gRPC libraries permit setting trailers mid-stream without +// predeclarnig them. So: change of plans. We still permit the old +// way, but we also permit this hack: if a Header() key begins with +// "Trailer:", the suffix of that key is a Trailer. Because ':' is an +// invalid token byte anyway, there is no ambiguity. (And it's already +// filtered out) It's mildly hacky, but not terrible. +// +// This method runs after the Handler is done and promotes any Header +// fields to be trailers. +func (rws *responseWriterState) promoteUndeclaredTrailers() { + for k, vv := range rws.handlerHeader { + if !strings.HasPrefix(k, TrailerPrefix) { + continue + } + trailerKey := strings.TrimPrefix(k, TrailerPrefix) + rws.declareTrailer(trailerKey) + rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv + } + + if len(rws.trailers) > 1 { + sorter := sorterPool.Get().(*sorter) + sorter.SortStrings(rws.trailers) + sorterPool.Put(sorter) + } +} + func (w *responseWriter) Flush() { rws := w.rws if rws == nil { @@ -1761,6 +2139,15 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, if !rws.wroteHeader { w.WriteHeader(200) } + if !bodyAllowedForStatus(rws.status) { + return 0, http.ErrBodyNotAllowed + } + rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set + if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { + // TODO: send a RST_STREAM + return 0, errors.New("http2: handler wrote more than declared Content-Length") + } + if dataB != nil { return rws.bw.Write(dataB) } else { @@ -1770,11 +2157,26 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, func (w *responseWriter) handlerDone() { rws := w.rws - if rws == nil { - panic("handlerDone called twice") - } rws.handlerDone = true w.Flush() w.rws = nil responseWriterStatePool.Put(rws) } + +// foreachHeaderElement splits v according to the "#rule" construction +// in RFC 2616 section 2.1 and calls fn for each non-empty element. +func foreachHeaderElement(v string, fn func(string)) { + v = textproto.TrimString(v) + if v == "" { + return + } + if !strings.Contains(v, ",") { + fn(v) + return + } + for _, f := range strings.Split(v, ",") { + if f = textproto.TrimString(f); f != "" { + fn(f) + } + } +} diff --git a/vendor/src/golang.org/x/net/http2/transport.go b/vendor/src/golang.org/x/net/http2/transport.go index 73f358eefe..b5907dae65 100644 --- a/vendor/src/golang.org/x/net/http2/transport.go +++ b/vendor/src/golang.org/x/net/http2/transport.go @@ -1,55 +1,156 @@ -// Copyright 2015 The Go Authors. -// See https://go.googlesource.com/go/+/master/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://go.googlesource.com/go/+/master/LICENSE +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code. package http2 import ( "bufio" "bytes" + "compress/gzip" "crypto/tls" "errors" "fmt" "io" + "io/ioutil" "log" "net" "net/http" + "sort" "strconv" "strings" "sync" + "time" "golang.org/x/net/http2/hpack" ) +const ( + // transportDefaultConnFlow is how many connection-level flow control + // tokens we give the server at start-up, past the default 64k. + transportDefaultConnFlow = 1 << 30 + + // transportDefaultStreamFlow is how many stream-level flow + // control tokens we announce to the peer, and how many bytes + // we buffer per stream. + transportDefaultStreamFlow = 4 << 20 + + // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send + // a stream-level WINDOW_UPDATE for at a time. + transportDefaultStreamMinRefresh = 4 << 10 + + defaultUserAgent = "Go-http-client/2.0" +) + +// Transport is an HTTP/2 Transport. +// +// A Transport internally caches connections to servers. It is safe +// for concurrent use by multiple goroutines. type Transport struct { - Fallback http.RoundTripper + // DialTLS specifies an optional dial function for creating + // TLS connections for requests. + // + // If DialTLS is nil, tls.Dial is used. + // + // If the returned net.Conn has a ConnectionState method like tls.Conn, + // it will be used to set http.Response.TLS. + DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) - // TODO: remove this and make more general with a TLS dial hook, like http - InsecureTLSDial bool + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config - connMu sync.Mutex - conns map[string][]*clientConn // key is host:port + // ConnPool optionally specifies an alternate connection pool to use. + // If nil, the default is used. + ConnPool ClientConnPool + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to + // send in the initial settings frame. It is how many bytes + // of response headers are allow. Unlike the http2 spec, zero here + // means to use a default limit (currently 10MB). If you actually + // want to advertise an ulimited value to the peer, Transport + // interprets the highest possible value here (0xffffffff or 1<<32-1) + // to mean no limit. + MaxHeaderListSize uint32 + + // t1, if non-nil, is the standard library Transport using + // this transport. Its settings are used (but not its + // RoundTrip method, etc). + t1 *http.Transport + + connPoolOnce sync.Once + connPoolOrDef ClientConnPool // non-nil version of ConnPool } -type clientConn struct { - t *Transport - tconn *tls.Conn - tlsState *tls.ConnectionState - connKey []string // key(s) this connection is cached in, in t.conns +func (t *Transport) maxHeaderListSize() uint32 { + if t.MaxHeaderListSize == 0 { + return 10 << 20 + } + if t.MaxHeaderListSize == 0xffffffff { + return 0 + } + return t.MaxHeaderListSize +} +func (t *Transport) disableCompression() bool { + return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) +} + +var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") + +// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. +// It requires Go 1.6 or later and returns an error if the net/http package is too old +// or if t1 has already been HTTP/2-enabled. +func ConfigureTransport(t1 *http.Transport) error { + _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go + return err +} + +func (t *Transport) connPool() ClientConnPool { + t.connPoolOnce.Do(t.initConnPool) + return t.connPoolOrDef +} + +func (t *Transport) initConnPool() { + if t.ConnPool != nil { + t.connPoolOrDef = t.ConnPool + } else { + t.connPoolOrDef = &clientConnPool{t: t} + } +} + +// ClientConn is the state of a single HTTP/2 client connection to an +// HTTP/2 server. +type ClientConn struct { + t *Transport + tconn net.Conn // usually *tls.Conn, except specialized impls + tlsState *tls.ConnectionState // nil only for specialized impls + + // readLoop goroutine fields: readerDone chan struct{} // closed on error readerErr error // set before readerDone is closed - hdec *hpack.Decoder - nextRes *http.Response - mu sync.Mutex + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow flow // our conn-level flow control quota (cs.flow is per stream) + inflow flow // peer's conn-level flow control closed bool - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - streams map[uint32]*clientStream + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + streams map[uint32]*clientStream // client-initiated nextStreamID uint32 bw *bufio.Writer - werr error // first write error that has occurred br *bufio.Reader fr *Framer // Settings from peer: @@ -58,13 +159,78 @@ type clientConn struct { initialWindowSize uint32 hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder + freeBuf [][]byte + + wmu sync.Mutex // held while writing; acquire AFTER mu if holding both + werr error // first write error that has occurred } +// clientStream is the state for a single HTTP/2 stream. One of these +// is created for each Transport.RoundTrip call. type clientStream struct { - ID uint32 - resc chan resAndError - pw *io.PipeWriter - pr *io.PipeReader + cc *ClientConn + req *http.Request + ID uint32 + resc chan resAndError + bufPipe pipe // buffered pipe with the flow-controlled response payload + requestedGzip bool + + flow flow // guarded by cc.mu + inflow flow // guarded by cc.mu + bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read + readErr error // sticky read error; owned by transportResponseBody.Read + stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu + + peerReset chan struct{} // closed on peer reset + resetErr error // populated before peerReset is closed + + done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu + + // owned by clientConnReadLoop: + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + + trailer http.Header // accumulated trailers + resTrailer *http.Header // client's Response.Trailer +} + +// awaitRequestCancel runs in its own goroutine and waits for the user +// to either cancel a RoundTrip request (using the provided +// Request.Cancel channel), or for the request to be done (any way it +// might be removed from the cc.streams map: peer reset, successful +// completion, TCP connection breakage, etc) +func (cs *clientStream) awaitRequestCancel(cancel <-chan struct{}) { + if cancel == nil { + return + } + select { + case <-cancel: + cs.bufPipe.CloseWithError(errRequestCanceled) + cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + case <-cs.done: + } +} + +// checkReset reports any error sent in a RST_STREAM frame by the +// server. +func (cs *clientStream) checkReset() error { + select { + case <-cs.peerReset: + return cs.resetErr + default: + return nil + } +} + +func (cs *clientStream) abortRequestBodyWrite(err error) { + if err == nil { + panic("nil error") + } + cc := cs.cc + cc.mu.Lock() + cs.stopReqBody = err + cc.cond.Broadcast() + cc.mu.Unlock() } type stickyErrWriter struct { @@ -81,30 +247,49 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { return } +var ErrNoCachedConn = errors.New("http2: no cached connection was available") + +// RoundTripOpt are options for the Transport.RoundTripOpt method. +type RoundTripOpt struct { + // OnlyCachedConn controls whether RoundTripOpt may + // create a new TCP connection. If set true and + // no cached connection is available, RoundTripOpt + // will return ErrNoCachedConn. + OnlyCachedConn bool +} + func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.RoundTripOpt(req, RoundTripOpt{}) +} + +// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) +// and returns a host:port. The port 443 is added if needed. +func authorityAddr(authority string) (addr string) { + if _, _, err := net.SplitHostPort(authority); err == nil { + return authority + } + return net.JoinHostPort(authority, "443") +} + +// RoundTripOpt is like RoundTrip, but takes options. +func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { if req.URL.Scheme != "https" { - if t.Fallback == nil { - return nil, errors.New("http2: unsupported scheme and no Fallback") - } - return t.Fallback.RoundTrip(req) - } - - host, port, err := net.SplitHostPort(req.URL.Host) - if err != nil { - host = req.URL.Host - port = "443" + return nil, errors.New("http2: unsupported scheme") } + addr := authorityAddr(req.URL.Host) for { - cc, err := t.getClientConn(host, port) + cc, err := t.connPool().GetClientConn(req, addr) if err != nil { + t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } - res, err := cc.roundTrip(req) - if shouldRetryRequest(err) { // TODO: or clientconn is overloaded (too many outstanding requests)? + res, err := cc.RoundTrip(req) + if shouldRetryRequest(req, err) { continue } if err != nil { + t.vlogf("RoundTrip failure: %v", err) return nil, err } return res, nil @@ -115,106 +300,96 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { // connected from previous requests but are now sitting idle. // It does not interrupt any connections currently in use. func (t *Transport) CloseIdleConnections() { - t.connMu.Lock() - defer t.connMu.Unlock() - for _, vv := range t.conns { - for _, cc := range vv { - cc.closeIfIdle() - } + if cp, ok := t.connPool().(*clientConnPool); ok { + cp.closeIdleConnections() } } -var errClientConnClosed = errors.New("http2: client conn is closed") +var ( + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") +) -func shouldRetryRequest(err error) bool { - // TODO: or GOAWAY graceful shutdown stuff - return err == errClientConnClosed +func shouldRetryRequest(req *http.Request, err error) bool { + // TODO: retry GET requests (no bodies) more aggressively, if shutdown + // before response. + return err == errClientConnUnusable } -func (t *Transport) removeClientConn(cc *clientConn) { - t.connMu.Lock() - defer t.connMu.Unlock() - for _, key := range cc.connKey { - vv, ok := t.conns[key] - if !ok { - continue - } - newList := filterOutClientConn(vv, cc) - if len(newList) > 0 { - t.conns[key] = newList - } else { - delete(t.conns, key) - } - } -} - -func filterOutClientConn(in []*clientConn, exclude *clientConn) []*clientConn { - out := in[:0] - for _, v := range in { - if v != exclude { - out = append(out, v) - } - } - return out -} - -func (t *Transport) getClientConn(host, port string) (*clientConn, error) { - t.connMu.Lock() - defer t.connMu.Unlock() - - key := net.JoinHostPort(host, port) - - for _, cc := range t.conns[key] { - if cc.canTakeNewRequest() { - return cc, nil - } - } - if t.conns == nil { - t.conns = make(map[string][]*clientConn) - } - cc, err := t.newClientConn(host, port, key) +func (t *Transport) dialClientConn(addr string) (*ClientConn, error) { + host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err } - t.conns[key] = append(t.conns[key], cc) - return cc, nil -} - -func (t *Transport) newClientConn(host, port, key string) (*clientConn, error) { - cfg := &tls.Config{ - ServerName: host, - NextProtos: []string{NextProtoTLS}, - InsecureSkipVerify: t.InsecureTLSDial, - } - tconn, err := tls.Dial("tcp", net.JoinHostPort(host, port), cfg) + tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) if err != nil { return nil, err } - if err := tconn.Handshake(); err != nil { + return t.NewClientConn(tconn) +} + +func (t *Transport) newTLSConfig(host string) *tls.Config { + cfg := new(tls.Config) + if t.TLSClientConfig != nil { + *cfg = *t.TLSClientConfig + } + if !strSliceContains(cfg.NextProtos, NextProtoTLS) { + cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) + } + if cfg.ServerName == "" { + cfg.ServerName = host + } + return cfg +} + +func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { + if t.DialTLS != nil { + return t.DialTLS + } + return t.dialTLSDefault +} + +func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { + cn, err := tls.Dial(network, addr, cfg) + if err != nil { return nil, err } - if !t.InsecureTLSDial { - if err := tconn.VerifyHostname(cfg.ServerName); err != nil { + if err := cn.Handshake(); err != nil { + return nil, err + } + if !cfg.InsecureSkipVerify { + if err := cn.VerifyHostname(cfg.ServerName); err != nil { return nil, err } } - state := tconn.ConnectionState() + state := cn.ConnectionState() if p := state.NegotiatedProtocol; p != NextProtoTLS { - // TODO(bradfitz): fall back to Fallback - return nil, fmt.Errorf("bad protocol: %v", p) + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) } if !state.NegotiatedProtocolIsMutual { - return nil, errors.New("could not negotiate protocol mutually") + return nil, errors.New("http2: could not negotiate protocol mutually") } - if _, err := tconn.Write(clientPreface); err != nil { + return cn, nil +} + +// disableKeepAlives reports whether connections should be closed as +// soon as possible after handling the first request. +func (t *Transport) disableKeepAlives() bool { + return t.t1 != nil && t.t1.DisableKeepAlives +} + +func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { + if VerboseLogs { + t.vlogf("http2: Transport creating client conn to %v", c.RemoteAddr()) + } + if _, err := c.Write(clientPreface); err != nil { + t.vlogf("client preface write error: %v", err) return nil, err } - cc := &clientConn{ + cc := &ClientConn{ t: t, - tconn: tconn, - connKey: []string{key}, // TODO: cert's validated hostnames too - tlsState: &state, + tconn: c, readerDone: make(chan struct{}), nextStreamID: 1, maxFrameSize: 16 << 10, // spec default @@ -222,14 +397,36 @@ func (t *Transport) newClientConn(host, port, key string) (*clientConn, error) { maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. streams: make(map[uint32]*clientStream), } - cc.bw = bufio.NewWriter(stickyErrWriter{tconn, &cc.werr}) - cc.br = bufio.NewReader(tconn) + cc.cond = sync.NewCond(&cc.mu) + cc.flow.add(int32(initialWindowSize)) + + // TODO: adjust this writer size to account for frame size + + // MTU + crypto/tls record padding. + cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) + cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) + cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + cc.fr.MaxHeaderListSize = t.maxHeaderListSize() + + // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on + // henc in response to SETTINGS frames? cc.henc = hpack.NewEncoder(&cc.hbuf) - cc.fr.WriteSettings() - // TODO: re-send more conn-level flow control tokens when server uses all these. - cc.fr.WriteWindowUpdate(0, 1<<30) // um, 0x7fffffff doesn't work to Google? it hangs? + if cs, ok := c.(connectionStater); ok { + state := cs.ConnectionState() + cc.tlsState = &state + } + + initialSettings := []Setting{ + {ID: SettingEnablePush, Val: 0}, + {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, + } + if max := t.maxHeaderListSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) + } + cc.fr.WriteSettings(initialSettings...) + cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) + cc.inflow.add(transportDefaultConnFlow + initialWindowSize) cc.bw.Flush() if cc.werr != nil { return nil, cc.werr @@ -256,33 +453,35 @@ func (t *Transport) newClientConn(host, port, key string) (*clientConn, error) { case SettingInitialWindowSize: cc.initialWindowSize = s.Val default: - // TODO(bradfitz): handle more - log.Printf("Unhandled Setting: %v", s) + // TODO(bradfitz): handle more; at least SETTINGS_HEADER_TABLE_SIZE? + t.vlogf("Unhandled Setting: %v", s) } return nil }) - // TODO: figure out henc size - cc.hdec = hpack.NewDecoder(initialHeaderTableSize, cc.onNewHeaderField) go cc.readLoop() return cc, nil } -func (cc *clientConn) setGoAway(f *GoAwayFrame) { +func (cc *ClientConn) setGoAway(f *GoAwayFrame) { cc.mu.Lock() defer cc.mu.Unlock() cc.goAway = f } -func (cc *clientConn) canTakeNewRequest() bool { +func (cc *ClientConn) CanTakeNewRequest() bool { cc.mu.Lock() defer cc.mu.Unlock() - return cc.goAway == nil && + return cc.canTakeNewRequestLocked() +} + +func (cc *ClientConn) canTakeNewRequestLocked() bool { + return cc.goAway == nil && !cc.closed && int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && cc.nextStreamID < 2147483647 } -func (cc *clientConn) closeIfIdle() { +func (cc *ClientConn) closeIfIdle() { cc.mu.Lock() if len(cc.streams) > 0 { cc.mu.Unlock() @@ -295,96 +494,533 @@ func (cc *clientConn) closeIfIdle() { cc.tconn.Close() } -func (cc *clientConn) roundTrip(req *http.Request) (*http.Response, error) { - cc.mu.Lock() +const maxAllocFrameSize = 512 << 10 - if cc.closed { +// frameBuffer returns a scratch buffer suitable for writing DATA frames. +// They're capped at the min of the peer's max frame size or 512KB +// (kinda arbitrarily), but definitely capped so we don't allocate 4GB +// bufers. +func (cc *ClientConn) frameScratchBuffer() []byte { + cc.mu.Lock() + size := cc.maxFrameSize + if size > maxAllocFrameSize { + size = maxAllocFrameSize + } + for i, buf := range cc.freeBuf { + if len(buf) >= int(size) { + cc.freeBuf[i] = nil + cc.mu.Unlock() + return buf[:size] + } + } + cc.mu.Unlock() + return make([]byte, size) +} + +func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { + cc.mu.Lock() + defer cc.mu.Unlock() + const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. + if len(cc.freeBuf) < maxBufs { + cc.freeBuf = append(cc.freeBuf, buf) + return + } + for i, old := range cc.freeBuf { + if old == nil { + cc.freeBuf[i] = buf + return + } + } + // forget about it. +} + +// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not +// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. +var errRequestCanceled = errors.New("net/http: request canceled") + +func commaSeparatedTrailers(req *http.Request) (string, error) { + keys := make([]string, 0, len(req.Trailer)) + for k := range req.Trailer { + k = http.CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", &badStringError{"invalid Trailer key", k} + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + // TODO: could do better allocation-wise here, but trailers are rare, + // so being lazy for now. + return strings.Join(keys, ","), nil + } + return "", nil +} + +func (cc *ClientConn) responseHeaderTimeout() time.Duration { + if cc.t.t1 != nil { + return cc.t.t1.ResponseHeaderTimeout + } + // No way to do this (yet?) with just an http2.Transport. Probably + // no need. Request.Cancel this is the new way. We only need to support + // this for compatibility with the old http.Transport fields when + // we're doing transparent http2. + return 0 +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. +// Certain headers are special-cased as okay but not transmitted later. +func checkConnHeaders(req *http.Request) error { + if v := req.Header.Get("Upgrade"); v != "" { + return errors.New("http2: invalid Upgrade request header") + } + if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 { + return errors.New("http2: invalid Transfer-Encoding request header") + } + if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 { + return errors.New("http2: invalid Connection request header") + } + return nil +} + +func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + if err := checkConnHeaders(req); err != nil { + return nil, err + } + + trailers, err := commaSeparatedTrailers(req) + if err != nil { + return nil, err + } + hasTrailers := trailers != "" + + var body io.Reader = req.Body + contentLen := req.ContentLength + if req.Body != nil && contentLen == 0 { + // Test to see if it's actually zero or just unset. + var buf [1]byte + n, rerr := io.ReadFull(body, buf[:]) + if rerr != nil && rerr != io.EOF { + contentLen = -1 + body = errorReader{rerr} + } else if n == 1 { + // Oh, guess there is data in this Body Reader after all. + // The ContentLength field just wasn't set. + // Stich the Body back together again, re-attaching our + // consumed byte. + contentLen = -1 + body = io.MultiReader(bytes.NewReader(buf[:]), body) + } else { + // Body is actually empty. + body = nil + } + } + + cc.mu.Lock() + if cc.closed || !cc.canTakeNewRequestLocked() { cc.mu.Unlock() - return nil, errClientConnClosed + return nil, errClientConnUnusable } cs := cc.newStream() - hasBody := false // TODO + cs.req = req + hasBody := body != nil - // we send: HEADERS[+CONTINUATION] + (DATA?) - hdrs := cc.encodeHeaders(req) - first := true - for len(hdrs) > 0 { + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + req.Method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + cs.requestedGzip = true + } + + // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is + // sent by writeRequestBody below, along with any Trailers, + // again in form HEADERS{1}, CONTINUATION{0,}) + hdrs := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen) + cc.wmu.Lock() + endStream := !hasBody && !hasTrailers + werr := cc.writeHeaders(cs.ID, endStream, hdrs) + cc.wmu.Unlock() + cc.mu.Unlock() + + if werr != nil { + if hasBody { + req.Body.Close() // per RoundTripper contract + } + cc.forgetStreamID(cs.ID) + // Don't bother sending a RST_STREAM (our write already failed; + // no need to keep writing) + return nil, werr + } + + var respHeaderTimer <-chan time.Time + var bodyCopyErrc chan error // result of body copy + if hasBody { + bodyCopyErrc = make(chan error, 1) + go func() { + bodyCopyErrc <- cs.writeRequestBody(body, req.Body) + }() + } else { + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + + readLoopResCh := cs.resc + requestCanceledCh := requestCancel(req) + bodyWritten := false + + for { + select { + case re := <-readLoopResCh: + res := re.res + if re.err != nil || res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + cs.abortRequestBodyWrite(errStopReqBodyWrite) + } + if re.err != nil { + cc.forgetStreamID(cs.ID) + return nil, re.err + } + res.Request = req + res.TLS = cc.tlsState + return res, nil + case <-respHeaderTimer: + cc.forgetStreamID(cs.ID) + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + return nil, errTimeout + case <-requestCanceledCh: + cc.forgetStreamID(cs.ID) + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + return nil, errRequestCanceled + case <-cs.peerReset: + // processResetStream already removed the + // stream from the streams map; no need for + // forgetStreamID. + return nil, cs.resetErr + case err := <-bodyCopyErrc: + if err != nil { + return nil, err + } + bodyWritten = true + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + } +} + +// requires cc.wmu be held +func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { + first := true // first frame written (HEADERS is first, then CONTINUATION) + frameSize := int(cc.maxFrameSize) + for len(hdrs) > 0 && cc.werr == nil { chunk := hdrs - if len(chunk) > int(cc.maxFrameSize) { - chunk = chunk[:cc.maxFrameSize] + if len(chunk) > frameSize { + chunk = chunk[:frameSize] } hdrs = hdrs[len(chunk):] endHeaders := len(hdrs) == 0 if first { cc.fr.WriteHeaders(HeadersFrameParam{ - StreamID: cs.ID, + StreamID: streamID, BlockFragment: chunk, - EndStream: !hasBody, + EndStream: endStream, EndHeaders: endHeaders, }) first = false } else { - cc.fr.WriteContinuation(cs.ID, endHeaders, chunk) + cc.fr.WriteContinuation(streamID, endHeaders, chunk) } } + // TODO(bradfitz): this Flush could potentially block (as + // could the WriteHeaders call(s) above), which means they + // wouldn't respond to Request.Cancel being readable. That's + // rare, but this should probably be in a goroutine. cc.bw.Flush() - werr := cc.werr - cc.mu.Unlock() - - if hasBody { - // TODO: write data. and it should probably be interleaved: - // go ... io.Copy(dataFrameWriter{cc, cs, ...}, req.Body) ... etc - } - - if werr != nil { - return nil, werr - } - - re := <-cs.resc - if re.err != nil { - return nil, re.err - } - res := re.res - res.Request = req - res.TLS = cc.tlsState - return res, nil + return cc.werr } +// internal error values; they don't escape to callers +var ( + // abort request body write; don't send cancel + errStopReqBodyWrite = errors.New("http2: aborting request body write") + + // abort request body write, but send stream reset of cancel. + errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") +) + +func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { + cc := cs.cc + sentEnd := false // whether we sent the final DATA frame w/ END_STREAM + buf := cc.frameScratchBuffer() + defer cc.putFrameScratchBuffer(buf) + + defer func() { + // TODO: write h12Compare test showing whether + // Request.Body is closed by the Transport, + // and in multiple cases: server replies <=299 and >299 + // while still writing request body + cerr := bodyCloser.Close() + if err == nil { + err = cerr + } + }() + + req := cs.req + hasTrailers := req.Trailer != nil + + var sawEOF bool + for !sawEOF { + n, err := body.Read(buf) + if err == io.EOF { + sawEOF = true + err = nil + } else if err != nil { + return err + } + + remain := buf[:n] + for len(remain) > 0 && err == nil { + var allowed int32 + allowed, err = cs.awaitFlowControl(len(remain)) + switch { + case err == errStopReqBodyWrite: + return err + case err == errStopReqBodyWriteAndCancel: + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + return err + case err != nil: + return err + } + cc.wmu.Lock() + data := remain[:allowed] + remain = remain[allowed:] + sentEnd = sawEOF && len(remain) == 0 && !hasTrailers + err = cc.fr.WriteData(cs.ID, sentEnd, data) + if err == nil { + // TODO(bradfitz): this flush is for latency, not bandwidth. + // Most requests won't need this. Make this opt-in or opt-out? + // Use some heuristic on the body type? Nagel-like timers? + // Based on 'n'? Only last chunk of this for loop, unless flow control + // tokens are low? For now, always: + err = cc.bw.Flush() + } + cc.wmu.Unlock() + } + if err != nil { + return err + } + } + + cc.wmu.Lock() + if !sentEnd { + var trls []byte + if hasTrailers { + cc.mu.Lock() + trls = cc.encodeTrailers(req) + cc.mu.Unlock() + } + + // Avoid forgetting to send an END_STREAM if the encoded + // trailers are 0 bytes. Both results produce and END_STREAM. + if len(trls) > 0 { + err = cc.writeHeaders(cs.ID, true, trls) + } else { + err = cc.fr.WriteData(cs.ID, true, nil) + } + } + if ferr := cc.bw.Flush(); ferr != nil && err == nil { + err = ferr + } + cc.wmu.Unlock() + + return err +} + +// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow +// control tokens from the server. +// It returns either the non-zero number of tokens taken or an error +// if the stream is dead. +func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if cc.closed { + return 0, errClientConnClosed + } + if cs.stopReqBody != nil { + return 0, cs.stopReqBody + } + if err := cs.checkReset(); err != nil { + return 0, err + } + if a := cs.flow.available(); a > 0 { + take := a + if int(take) > maxBytes { + + take = int32(maxBytes) // can't truncate int; take is int32 + } + if take > int32(cc.maxFrameSize) { + take = int32(cc.maxFrameSize) + } + cs.flow.take(take) + return take, nil + } + cc.cond.Wait() + } +} + +type badStringError struct { + what string + str string +} + +func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } + // requires cc.mu be held. -func (cc *clientConn) encodeHeaders(req *http.Request) []byte { +func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) []byte { cc.hbuf.Reset() - // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go host := req.Host if host == "" { host = req.URL.Host } - path := req.URL.Path - if path == "" { - path = "/" + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production (see Sections 3.3 and 3.4 of + // [RFC3986]). + cc.writeHeader(":authority", host) + cc.writeHeader(":method", req.Method) + if req.Method != "CONNECT" { + cc.writeHeader(":path", req.URL.RequestURI()) + cc.writeHeader(":scheme", "https") + } + if trailers != "" { + cc.writeHeader("trailer", trailers) } - cc.writeHeader(":authority", host) // probably not right for all sites - cc.writeHeader(":method", req.Method) - cc.writeHeader(":path", path) - cc.writeHeader(":scheme", "https") - + var didUA bool for k, vv := range req.Header { lowKey := strings.ToLower(k) - if lowKey == "host" { + switch lowKey { + case "host", "content-length": + // Host is :authority, already sent. + // Content-Length is automatic, set below. continue + case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive": + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + case "user-agent": + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } } for _, v := range vv { cc.writeHeader(lowKey, v) } } + if shouldSendReqContentLength(req.Method, contentLength) { + cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + cc.writeHeader("accept-encoding", "gzip") + } + if !didUA { + cc.writeHeader("user-agent", defaultUserAgent) + } + return cc.hbuf.Bytes() +} + +// shouldSendReqContentLength reports whether the http2.Transport should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// requires cc.mu be held. +func (cc *ClientConn) encodeTrailers(req *http.Request) []byte { + cc.hbuf.Reset() + for k, vv := range req.Trailer { + // Transfer-Encoding, etc.. have already been filter at the + // start of RoundTrip + lowKey := strings.ToLower(k) + for _, v := range vv { + cc.writeHeader(lowKey, v) + } + } return cc.hbuf.Bytes() } -func (cc *clientConn) writeHeader(name, value string) { - log.Printf("sending %q = %q", name, value) +func (cc *ClientConn) writeHeader(name, value string) { + if VerboseLogs { + log.Printf("http2: Transport encoding header %q = %q", name, value) + } cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) } @@ -394,160 +1030,635 @@ type resAndError struct { } // requires cc.mu be held. -func (cc *clientConn) newStream() *clientStream { +func (cc *ClientConn) newStream() *clientStream { cs := &clientStream{ - ID: cc.nextStreamID, - resc: make(chan resAndError, 1), + cc: cc, + ID: cc.nextStreamID, + resc: make(chan resAndError, 1), + peerReset: make(chan struct{}), + done: make(chan struct{}), } + cs.flow.add(int32(cc.initialWindowSize)) + cs.flow.setConnFlow(&cc.flow) + cs.inflow.add(transportDefaultStreamFlow) + cs.inflow.setConnFlow(&cc.inflow) cc.nextStreamID += 2 cc.streams[cs.ID] = cs return cs } -func (cc *clientConn) streamByID(id uint32, andRemove bool) *clientStream { +func (cc *ClientConn) forgetStreamID(id uint32) { + cc.streamByID(id, true) +} + +func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { cc.mu.Lock() defer cc.mu.Unlock() cs := cc.streams[id] - if andRemove { + if andRemove && cs != nil && !cc.closed { delete(cc.streams, id) + close(cs.done) } return cs } -// runs in its own goroutine. -func (cc *clientConn) readLoop() { - defer cc.t.removeClientConn(cc) +// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. +type clientConnReadLoop struct { + cc *ClientConn + activeRes map[uint32]*clientStream // keyed by streamID + closeWhenIdle bool +} + +// readLoop runs in its own goroutine and reads and dispatches frames. +func (cc *ClientConn) readLoop() { + rl := &clientConnReadLoop{ + cc: cc, + activeRes: make(map[uint32]*clientStream), + } + + defer rl.cleanup() + cc.readerErr = rl.run() + if ce, ok := cc.readerErr.(ConnectionError); ok { + cc.wmu.Lock() + cc.fr.WriteGoAway(0, ErrCode(ce), nil) + cc.wmu.Unlock() + } +} + +func (rl *clientConnReadLoop) cleanup() { + cc := rl.cc + defer cc.tconn.Close() + defer cc.t.connPool().MarkDead(cc) defer close(cc.readerDone) - activeRes := map[uint32]*clientStream{} // keyed by streamID // Close any response bodies if the server closes prematurely. // TODO: also do this if we've written the headers but not // gotten a response yet. - defer func() { - err := cc.readerErr - if err == io.EOF { - err = io.ErrUnexpectedEOF + err := cc.readerErr + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + cc.mu.Lock() + for _, cs := range rl.activeRes { + cs.bufPipe.CloseWithError(err) + } + for _, cs := range cc.streams { + select { + case cs.resc <- resAndError{err: err}: + default: } - for _, cs := range activeRes { - cs.pw.CloseWithError(err) - } - }() - - // continueStreamID is the stream ID we're waiting for - // continuation frames for. - var continueStreamID uint32 + close(cs.done) + } + cc.closed = true + cc.cond.Broadcast() + cc.mu.Unlock() +} +func (rl *clientConnReadLoop) run() error { + cc := rl.cc + rl.closeWhenIdle = cc.t.disableKeepAlives() + gotReply := false // ever saw a reply for { f, err := cc.fr.ReadFrame() if err != nil { - cc.readerErr = err - return + cc.vlogf("Transport readFrame error: (%T) %v", err, err) } - log.Printf("Transport received %v: %#v", f.Header(), f) - - streamID := f.Header().StreamID - - _, isContinue := f.(*ContinuationFrame) - if isContinue { - if streamID != continueStreamID { - log.Printf("Protocol violation: got CONTINUATION with id %d; want %d", streamID, continueStreamID) - cc.readerErr = ConnectionError(ErrCodeProtocol) - return + if se, ok := err.(StreamError); ok { + if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { + rl.endStreamError(cs, cc.fr.errDetail) } - } else if continueStreamID != 0 { - // Continue frames need to be adjacent in the stream - // and we were in the middle of headers. - log.Printf("Protocol violation: got %T for stream %d, want CONTINUATION for %d", f, streamID, continueStreamID) - cc.readerErr = ConnectionError(ErrCodeProtocol) - return - } - - if streamID%2 == 0 { - // Ignore streams pushed from the server for now. - // These always have an even stream id. continue + } else if err != nil { + return err } - streamEnded := false - if ff, ok := f.(streamEnder); ok { - streamEnded = ff.StreamEnded() - } - - cs := cc.streamByID(streamID, streamEnded) - if cs == nil { - log.Printf("Received frame for untracked stream ID %d", streamID) - continue + if VerboseLogs { + cc.vlogf("http2: Transport received %s", summarizeFrame(f)) } + maybeIdle := false // whether frame might transition us to idle switch f := f.(type) { - case *HeadersFrame: - cc.nextRes = &http.Response{ - Proto: "HTTP/2.0", - ProtoMajor: 2, - Header: make(http.Header), - } - cs.pr, cs.pw = io.Pipe() - cc.hdec.Write(f.HeaderBlockFragment()) - case *ContinuationFrame: - cc.hdec.Write(f.HeaderBlockFragment()) + case *MetaHeadersFrame: + err = rl.processHeaders(f) + maybeIdle = true + gotReply = true case *DataFrame: - log.Printf("DATA: %q", f.Data()) - cs.pw.Write(f.Data()) + err = rl.processData(f) + maybeIdle = true case *GoAwayFrame: - cc.t.removeClientConn(cc) - if f.ErrCode != 0 { - // TODO: deal with GOAWAY more. particularly the error code - log.Printf("transport got GOAWAY with error code = %v", f.ErrCode) - } - cc.setGoAway(f) + err = rl.processGoAway(f) + maybeIdle = true + case *RSTStreamFrame: + err = rl.processResetStream(f) + maybeIdle = true + case *SettingsFrame: + err = rl.processSettings(f) + case *PushPromiseFrame: + err = rl.processPushPromise(f) + case *WindowUpdateFrame: + err = rl.processWindowUpdate(f) + case *PingFrame: + err = rl.processPing(f) default: - log.Printf("Transport: unhandled response frame type %T", f) + cc.logf("Transport: unhandled response frame type %T", f) } - headersEnded := false - if he, ok := f.(headersEnder); ok { - headersEnded = he.HeadersEnded() - if headersEnded { - continueStreamID = 0 - } else { - continueStreamID = streamID - } - } - - if streamEnded { - cs.pw.Close() - delete(activeRes, streamID) - } - if headersEnded { - if cs == nil { - panic("couldn't find stream") // TODO be graceful - } - // TODO: set the Body to one which notes the - // Close and also sends the server a - // RST_STREAM - cc.nextRes.Body = cs.pr - res := cc.nextRes - activeRes[streamID] = cs - cs.resc <- resAndError{res: res} - } - } -} - -func (cc *clientConn) onNewHeaderField(f hpack.HeaderField) { - // TODO: verifiy pseudo headers come before non-pseudo headers - // TODO: verifiy the status is set - log.Printf("Header field: %+v", f) - if f.Name == ":status" { - code, err := strconv.Atoi(f.Value) if err != nil { - panic("TODO: be graceful") + return err + } + if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { + cc.closeIfIdle() } - cc.nextRes.Status = f.Value + " " + http.StatusText(code) - cc.nextRes.StatusCode = code - return } - if strings.HasPrefix(f.Name, ":") { - // "Endpoints MUST NOT generate pseudo-header fields other than those defined in this document." - // TODO: treat as invalid? - return - } - cc.nextRes.Header.Add(http.CanonicalHeaderKey(f.Name), f.Value) } + +func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + if cs == nil { + // We'd get here if we canceled a request while the + // server had its response still in flight. So if this + // was just something we canceled, ignore it. + return nil + } + if !cs.pastHeaders { + cs.pastHeaders = true + } else { + return rl.processTrailers(cs, f) + } + + res, err := rl.handleResponse(cs, f) + if err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // Any other error type is a stream error. + cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cs.resc <- resAndError{err: err} + return nil // return nil from process* funcs to keep conn alive + } + if res == nil { + // (nil, nil) special case. See handleResponse docs. + return nil + } + if res.Body != noBody { + rl.activeRes[cs.ID] = cs + } + cs.resTrailer = &res.Trailer + cs.resc <- resAndError{res: res} + return nil +} + +// may return error types nil, or ConnectionError. Any other error value +// is a StreamError of type ErrCodeProtocol. The returned error in that case +// is the detail. +// +// As a special case, handleResponse may return (nil, nil) to skip the +// frame (currently only used for 100 expect continue). This special +// case is going away after Issue 13851 is fixed. +func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { + if f.Truncated { + return nil, errResponseHeaderListSize + } + + status := f.PseudoValue("status") + if status == "" { + return nil, errors.New("missing status pseudo header") + } + statusCode, err := strconv.Atoi(status) + if err != nil { + return nil, errors.New("malformed non-numeric status pseudo header") + } + + if statusCode == 100 { + // Just skip 100-continue response headers for now. + // TODO: golang.org/issue/13851 for doing it properly. + cs.pastHeaders = false // do it all again + return nil, nil + } + + header := make(http.Header) + res := &http.Response{ + Proto: "HTTP/2.0", + ProtoMajor: 2, + Header: header, + StatusCode: statusCode, + Status: status + " " + http.StatusText(statusCode), + } + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + if key == "Trailer" { + t := res.Trailer + if t == nil { + t = make(http.Header) + res.Trailer = t + } + foreachHeaderElement(hf.Value, func(v string) { + t[http.CanonicalHeaderKey(v)] = nil + }) + } else { + header[key] = append(header[key], hf.Value) + } + } + + streamEnded := f.StreamEnded() + isHead := cs.req.Method == "HEAD" + if !streamEnded || isHead { + res.ContentLength = -1 + if clens := res.Header["Content-Length"]; len(clens) == 1 { + if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { + res.ContentLength = clen64 + } else { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } else if len(clens) > 1 { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } + + if streamEnded || isHead { + res.Body = noBody + return res, nil + } + + buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage + cs.bufPipe = pipe{b: buf} + cs.bytesRemain = res.ContentLength + res.Body = transportResponseBody{cs} + go cs.awaitRequestCancel(requestCancel(cs.req)) + + if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Body = &gzipReader{body: res.Body} + } + return res, nil +} + +func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { + if cs.pastTrailers { + // Too many HEADERS frames for this stream. + return ConnectionError(ErrCodeProtocol) + } + cs.pastTrailers = true + if !f.StreamEnded() { + // We expect that any headers for trailers also + // has END_STREAM. + return ConnectionError(ErrCodeProtocol) + } + if len(f.PseudoFields()) > 0 { + // No pseudo header fields are defined for trailers. + // TODO: ConnectionError might be overly harsh? Check. + return ConnectionError(ErrCodeProtocol) + } + + trailer := make(http.Header) + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + trailer[key] = append(trailer[key], hf.Value) + } + cs.trailer = trailer + + rl.endStream(cs) + return nil +} + +// transportResponseBody is the concrete type of Transport.RoundTrip's +// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. +// On Close it sends RST_STREAM if EOF wasn't already seen. +type transportResponseBody struct { + cs *clientStream +} + +func (b transportResponseBody) Read(p []byte) (n int, err error) { + cs := b.cs + cc := cs.cc + + if cs.readErr != nil { + return 0, cs.readErr + } + n, err = b.cs.bufPipe.Read(p) + if cs.bytesRemain != -1 { + if int64(n) > cs.bytesRemain { + n = int(cs.bytesRemain) + if err == nil { + err = errors.New("net/http: server replied with more than declared Content-Length; truncated") + cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) + } + cs.readErr = err + return int(cs.bytesRemain), err + } + cs.bytesRemain -= int64(n) + if err == io.EOF && cs.bytesRemain > 0 { + err = io.ErrUnexpectedEOF + cs.readErr = err + return n, err + } + } + if n == 0 { + // No flow control tokens to send back. + return + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + var connAdd, streamAdd int32 + // Check the conn-level first, before the stream-level. + if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { + connAdd = transportDefaultConnFlow - v + cc.inflow.add(connAdd) + } + if err == nil { // No need to refresh if the stream is over or failed. + if v := cs.inflow.available(); v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { + streamAdd = transportDefaultStreamFlow - v + cs.inflow.add(streamAdd) + } + } + if connAdd != 0 || streamAdd != 0 { + cc.wmu.Lock() + defer cc.wmu.Unlock() + if connAdd != 0 { + cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) + } + if streamAdd != 0 { + cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) + } + cc.bw.Flush() + } + return +} + +var errClosedResponseBody = errors.New("http2: response body closed") + +func (b transportResponseBody) Close() error { + cs := b.cs + if cs.bufPipe.Err() != io.EOF { + // TODO: write test for this + cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } + cs.bufPipe.BreakWithError(errClosedResponseBody) + return nil +} + +func (rl *clientConnReadLoop) processData(f *DataFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + if cs == nil { + cc.mu.Lock() + neverSent := cc.nextStreamID + cc.mu.Unlock() + if f.StreamID >= neverSent { + // We never asked for this. + cc.logf("http2: Transport received unsolicited DATA frame; closing connection") + return ConnectionError(ErrCodeProtocol) + } + // We probably did ask for this, but canceled. Just ignore it. + // TODO: be stricter here? only silently ignore things which + // we canceled, but not things which were closed normally + // by the peer? Tough without accumulating too much state. + return nil + } + if data := f.Data(); len(data) > 0 { + if cs.bufPipe.b == nil { + // Data frame after it's already closed? + cc.logf("http2: Transport received DATA frame for closed stream; closing connection") + return ConnectionError(ErrCodeProtocol) + } + + // Check connection-level flow control. + cc.mu.Lock() + if cs.inflow.available() >= int32(len(data)) { + cs.inflow.take(int32(len(data))) + } else { + cc.mu.Unlock() + return ConnectionError(ErrCodeFlowControl) + } + cc.mu.Unlock() + + if _, err := cs.bufPipe.Write(data); err != nil { + rl.endStreamError(cs, err) + return err + } + } + + if f.StreamEnded() { + rl.endStream(cs) + } + return nil +} + +var errInvalidTrailers = errors.New("http2: invalid trailers") + +func (rl *clientConnReadLoop) endStream(cs *clientStream) { + // TODO: check that any declared content-length matches, like + // server.go's (*stream).endStream method. + rl.endStreamError(cs, nil) +} + +func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { + var code func() + if err == nil { + err = io.EOF + code = cs.copyTrailers + } + cs.bufPipe.closeWithErrorAndCode(err, code) + delete(rl.activeRes, cs.ID) + if cs.req.Close || cs.req.Header.Get("Connection") == "close" { + rl.closeWhenIdle = true + } +} + +func (cs *clientStream) copyTrailers() { + for k, vv := range cs.trailer { + t := cs.resTrailer + if *t == nil { + *t = make(http.Header) + } + (*t)[k] = vv + } +} + +func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { + cc := rl.cc + cc.t.connPool().MarkDead(cc) + if f.ErrCode != 0 { + // TODO: deal with GOAWAY more. particularly the error code + cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) + } + cc.setGoAway(f) + return nil +} + +func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + return f.ForeachSetting(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + cc.maxFrameSize = s.Val + case SettingMaxConcurrentStreams: + cc.maxConcurrentStreams = s.Val + case SettingInitialWindowSize: + // TODO: error if this is too large. + + // TODO: adjust flow control of still-open + // frames by the difference of the old initial + // window size and this one. + cc.initialWindowSize = s.Val + default: + // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. + cc.vlogf("Unhandled Setting: %v", s) + } + return nil + }) +} + +func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if f.StreamID != 0 && cs == nil { + return nil + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + fl := &cc.flow + if cs != nil { + fl = &cs.flow + } + if !fl.add(int32(f.Increment)) { + return ConnectionError(ErrCodeFlowControl) + } + cc.cond.Broadcast() + return nil +} + +func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { + cs := rl.cc.streamByID(f.StreamID, true) + if cs == nil { + // TODO: return error if server tries to RST_STEAM an idle stream + return nil + } + select { + case <-cs.peerReset: + // Already reset. + // This is the only goroutine + // which closes this, so there + // isn't a race. + default: + err := StreamError{cs.ID, f.ErrCode} + cs.resetErr = err + close(cs.peerReset) + cs.bufPipe.CloseWithError(err) + cs.cc.cond.Broadcast() // wake up checkReset via clientStream.awaitFlowControl + } + delete(rl.activeRes, cs.ID) + return nil +} + +func (rl *clientConnReadLoop) processPing(f *PingFrame) error { + if f.IsAck() { + // 6.7 PING: " An endpoint MUST NOT respond to PING frames + // containing this flag." + return nil + } + cc := rl.cc + cc.wmu.Lock() + defer cc.wmu.Unlock() + if err := cc.fr.WritePing(true, f.Data); err != nil { + return err + } + return cc.bw.Flush() +} + +func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { + // We told the peer we don't want them. + // Spec says: + // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH + // setting of the peer endpoint is set to 0. An endpoint that + // has set this setting and has received acknowledgement MUST + // treat the receipt of a PUSH_PROMISE frame as a connection + // error (Section 5.4.1) of type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) +} + +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { + // TODO: do something with err? send it as a debug frame to the peer? + // But that's only in GOAWAY. Invent a new frame type? Is there one already? + cc.wmu.Lock() + cc.fr.WriteRSTStream(streamID, code) + cc.bw.Flush() + cc.wmu.Unlock() +} + +var ( + errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") + errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") +) + +func (cc *ClientConn) logf(format string, args ...interface{}) { + cc.t.logf(format, args...) +} + +func (cc *ClientConn) vlogf(format string, args ...interface{}) { + cc.t.vlogf(format, args...) +} + +func (t *Transport) vlogf(format string, args ...interface{}) { + if VerboseLogs { + t.logf(format, args...) + } +} + +func (t *Transport) logf(format string, args ...interface{}) { + log.Printf(format, args...) +} + +var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) + +func strSliceContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +type erringRoundTripper struct{ err error } + +func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } + +// gzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +type gzipReader struct { + body io.ReadCloser // underlying Response.Body + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // sticky error +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zerr != nil { + return 0, gz.zerr + } + if gz.zr == nil { + gz.zr, err = gzip.NewReader(gz.body) + if err != nil { + gz.zerr = err + return 0, err + } + } + return gz.zr.Read(p) +} + +func (gz *gzipReader) Close() error { + return gz.body.Close() +} + +type errorReader struct{ err error } + +func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } diff --git a/vendor/src/golang.org/x/net/http2/write.go b/vendor/src/golang.org/x/net/http2/write.go index 02f0743de6..0143b24cd3 100644 --- a/vendor/src/golang.org/x/net/http2/write.go +++ b/vendor/src/golang.org/x/net/http2/write.go @@ -1,15 +1,13 @@ // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE package http2 import ( "bytes" "fmt" + "log" "net/http" "time" @@ -26,7 +24,11 @@ type writeFramer interface { // frame writing scheduler (see writeScheduler in writesched.go). // // This interface is implemented by *serverConn. -// TODO: use it from the client code too, once it exists. +// +// TODO: decide whether to a) use this in the client code (which didn't +// end up using this yet, because it has a simpler design, not +// currently implementing priorities), or b) delete this and +// make the server code a bit more concrete. type writeContext interface { Framer() *Framer Flush() error @@ -44,6 +46,11 @@ func endsStream(w writeFramer) bool { return v.endStream case *writeResHeaders: return v.endStream + case nil: + // This can only happen if the caller reuses w after it's + // been intentionally nil'ed out to prevent use. Keep this + // here to catch future refactoring breaking it. + panic("endsStream called on nil writeFramer") } return false } @@ -89,6 +96,16 @@ func (w *writeData) writeFrame(ctx writeContext) error { return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) } +// handlerPanicRST is the message sent from handler goroutines when +// the handler panics. +type handlerPanicRST struct { + StreamID uint32 +} + +func (hp handlerPanicRST) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) +} + func (se StreamError) writeFrame(ctx writeContext) error { return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) } @@ -106,40 +123,48 @@ func (writeSettingsAck) writeFrame(ctx writeContext) error { } // writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames -// for HTTP response headers from a server handler. +// for HTTP response headers or trailers from a server handler. type writeResHeaders struct { streamID uint32 - httpResCode int + httpResCode int // 0 means no ":status" line h http.Header // may be nil + trailers []string // if non-nil, which keys of h to write. nil means all. endStream bool + date string contentType string contentLength string } +func encKV(enc *hpack.Encoder, k, v string) { + if VerboseLogs { + log.Printf("http2: server encoding header %q = %q", k, v) + } + enc.WriteField(hpack.HeaderField{Name: k, Value: v}) +} + func (w *writeResHeaders) writeFrame(ctx writeContext) error { enc, buf := ctx.HeaderEncoder() buf.Reset() - enc.WriteField(hpack.HeaderField{Name: ":status", Value: httpCodeString(w.httpResCode)}) - for k, vv := range w.h { - k = lowerHeader(k) - for _, v := range vv { - // TODO: more of "8.1.2.2 Connection-Specific Header Fields" - if k == "transfer-encoding" && v != "trailers" { - continue - } - enc.WriteField(hpack.HeaderField{Name: k, Value: v}) - } + + if w.httpResCode != 0 { + encKV(enc, ":status", httpCodeString(w.httpResCode)) } + + encodeHeaders(enc, w.h, w.trailers) + if w.contentType != "" { - enc.WriteField(hpack.HeaderField{Name: "content-type", Value: w.contentType}) + encKV(enc, "content-type", w.contentType) } if w.contentLength != "" { - enc.WriteField(hpack.HeaderField{Name: "content-length", Value: w.contentLength}) + encKV(enc, "content-length", w.contentLength) + } + if w.date != "" { + encKV(enc, "date", w.date) } headerBlock := buf.Bytes() - if len(headerBlock) == 0 { + if len(headerBlock) == 0 && w.trailers == nil { panic("unexpected empty hpack") } @@ -185,7 +210,7 @@ type write100ContinueHeadersFrame struct { func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { enc, buf := ctx.HeaderEncoder() buf.Reset() - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"}) + encKV(enc, ":status", "100") return ctx.Framer().WriteHeaders(HeadersFrameParam{ StreamID: w.streamID, BlockFragment: buf.Bytes(), @@ -202,3 +227,36 @@ type writeWindowUpdate struct { func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) } + +func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { + if keys == nil { + sorter := sorterPool.Get().(*sorter) + // Using defer here, since the returned keys from the + // sorter.Keys method is only valid until the sorter + // is returned: + defer sorterPool.Put(sorter) + keys = sorter.Keys(h) + } + for _, k := range keys { + vv := h[k] + k = lowerHeader(k) + if !validHeaderFieldName(k) { + // TODO: return an error? golang.org/issue/14048 + // For now just omit it. + continue + } + isTE := k == "transfer-encoding" + for _, v := range vv { + if !validHeaderFieldValue(v) { + // TODO: return an error? golang.org/issue/14048 + // For now just omit it. + continue + } + // TODO: more of "8.1.2.2 Connection-Specific Header Fields" + if isTE && v != "trailers" { + continue + } + encKV(enc, k, v) + } + } +} diff --git a/vendor/src/golang.org/x/net/http2/writesched.go b/vendor/src/golang.org/x/net/http2/writesched.go index 0e1b7486fb..c24316ce7b 100644 --- a/vendor/src/golang.org/x/net/http2/writesched.go +++ b/vendor/src/golang.org/x/net/http2/writesched.go @@ -1,9 +1,6 @@ // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE package http2 diff --git a/vendor/src/golang.org/x/net/trace/trace.go b/vendor/src/golang.org/x/net/trace/trace.go index 0a232a1016..753e2214fe 100644 --- a/vendor/src/golang.org/x/net/trace/trace.go +++ b/vendor/src/golang.org/x/net/trace/trace.go @@ -95,11 +95,14 @@ var DebugUseAfterFinish = false // // The default AuthRequest function returns (true, true) iff the request comes from localhost/127.0.0.1/[::1]. var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. host, _, err := net.SplitHostPort(req.RemoteAddr) - switch { - case err != nil: // Badly formed address; fail closed. - return false, false - case host == "localhost" || host == "127.0.0.1" || host == "::1": + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": return true, true default: return false, false @@ -113,6 +116,7 @@ func init() { http.Error(w, "not allowed", http.StatusUnauthorized) return } + w.Header().Set("Content-Type", "text/html; charset=utf-8") Render(w, req, sensitive) }) http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) { @@ -121,6 +125,7 @@ func init() { http.Error(w, "not allowed", http.StatusUnauthorized) return } + w.Header().Set("Content-Type", "text/html; charset=utf-8") RenderEvents(w, req, sensitive) }) } @@ -172,7 +177,7 @@ func Render(w io.Writer, req *http.Request, sensitive bool) { completedMu.RLock() data.Families = make([]string, 0, len(completedTraces)) - for fam, _ := range completedTraces { + for fam := range completedTraces { data.Families = append(data.Families, fam) } completedMu.RUnlock() diff --git a/vendor/src/golang.org/x/net/websocket/websocket.go b/vendor/src/golang.org/x/net/websocket/websocket.go index 6068400980..e069b33008 100644 --- a/vendor/src/golang.org/x/net/websocket/websocket.go +++ b/vendor/src/golang.org/x/net/websocket/websocket.go @@ -144,6 +144,8 @@ type frameHandler interface { } // Conn represents a WebSocket connection. +// +// Multiple goroutines may invoke methods on a Conn simultaneously. type Conn struct { config *Config request *http.Request diff --git a/vendor/src/google.golang.org/grpc/.travis.yml b/vendor/src/google.golang.org/grpc/.travis.yml index 3f83776ec5..9bc2c12793 100644 --- a/vendor/src/google.golang.org/grpc/.travis.yml +++ b/vendor/src/google.golang.org/grpc/.travis.yml @@ -1,5 +1,9 @@ language: go +go: + - 1.5.3 + - 1.6 + before_install: - go get github.com/axw/gocov/gocov - go get github.com/mattn/goveralls @@ -11,4 +15,3 @@ install: script: - make test testrace - - make coverage diff --git a/vendor/src/google.golang.org/grpc/CONTRIBUTING.md b/vendor/src/google.golang.org/grpc/CONTRIBUTING.md index 407d384a7c..36cd6f7581 100644 --- a/vendor/src/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/src/google.golang.org/grpc/CONTRIBUTING.md @@ -1,16 +1,39 @@ # How to contribute -We definitely welcome patches and contribution to grpc! Here is some guideline +We definitely welcome patches and contribution to grpc! Here are some guidelines and information about how to do so. -## Getting started +## Sending patches -### Legal requirements +### Getting started + +1. Check out the code: + + $ go get google.golang.org/grpc + $ cd $GOPATH/src/google.golang.org/grpc + +1. Create a fork of the grpc-go repository. +1. Add your fork as a remote: + + $ git remote add fork git@github.com:$YOURGITHUBUSERNAME/grpc-go.git + +1. Make changes, commit them. +1. Run the test suite: + + $ make test + +1. Push your changes to your fork: + + $ git push fork ... + +1. Open a pull request. + +## Legal requirements In order to protect both you and ourselves, you will need to sign the [Contributor License Agreement](https://cla.developers.google.com/clas). -### Filing Issues +## Filing Issues When filing an issue, make sure to answer these five questions: 1. What version of Go are you using (`go version`)? diff --git a/vendor/src/google.golang.org/grpc/Makefile b/vendor/src/google.golang.org/grpc/Makefile index 12e84e4e5b..d26eb9081f 100644 --- a/vendor/src/google.golang.org/grpc/Makefile +++ b/vendor/src/google.golang.org/grpc/Makefile @@ -1,15 +1,3 @@ -.PHONY: \ - all \ - deps \ - updatedeps \ - testdeps \ - updatetestdeps \ - build \ - proto \ - test \ - testrace \ - clean \ - all: test testrace deps: @@ -32,7 +20,7 @@ proto: echo "error: protoc not installed" >&2; \ exit 1; \ fi - go get -v github.com/golang/protobuf/protoc-gen-go + go get -u -v github.com/golang/protobuf/protoc-gen-go for file in $$(git ls-files '*.proto'); do \ protoc -I $$(dirname $$file) --go_out=plugins=grpc:$$(dirname $$file) $$file; \ done @@ -44,7 +32,20 @@ testrace: testdeps go test -v -race -cpu 1,4 google.golang.org/grpc/... clean: - go clean google.golang.org/grpc/... + go clean -i google.golang.org/grpc/... coverage: testdeps ./coverage.sh --coveralls + +.PHONY: \ + all \ + deps \ + updatedeps \ + testdeps \ + updatetestdeps \ + build \ + proto \ + test \ + testrace \ + clean \ + coverage diff --git a/vendor/src/google.golang.org/grpc/README.md b/vendor/src/google.golang.org/grpc/README.md index 37b05f0953..90e9453d52 100644 --- a/vendor/src/google.golang.org/grpc/README.md +++ b/vendor/src/google.golang.org/grpc/README.md @@ -7,7 +7,7 @@ The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open s Installation ------------ -To install this package, you need to install Go 1.4 or above and setup your Go workspace on your computer. The simplest way to install the library is to run: +To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run: ``` $ go get google.golang.org/grpc @@ -16,7 +16,7 @@ $ go get google.golang.org/grpc Prerequisites ------------- -This requires Go 1.4 or above. +This requires Go 1.5 or later . Constraints ----------- diff --git a/vendor/src/google.golang.org/grpc/backoff.go b/vendor/src/google.golang.org/grpc/backoff.go new file mode 100644 index 0000000000..dc4858ebe3 --- /dev/null +++ b/vendor/src/google.golang.org/grpc/backoff.go @@ -0,0 +1,80 @@ +package grpc + +import ( + "math/rand" + "time" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var ( + DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, + baseDelay: 1.0 * time.Second, + factor: 1.6, + jitter: 0.2, + } +) + +// backoffStrategy defines the methodology for backing off after a grpc +// connection failure. +// +// This is unexported until the GRPC project decides whether or not to allow +// alternative backoff strategies. Once a decision is made, this type and its +// method may be exported. +type backoffStrategy interface { + // backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + backoff(retries int) time.Duration +} + +// BackoffConfig defines the parameters for the default GRPC backoff strategy. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration + + // TODO(stevvooe): The following fields are not exported, as allowing + // changes would violate the current GRPC specification for backoff. If + // GRPC decides to allow more interesting backoff strategies, these fields + // may be opened up in the future. + + // baseDelay is the amount of time to wait before retrying after the first + // failure. + baseDelay time.Duration + + // factor is applied to the backoff after each retry. + factor float64 + + // jitter provides a range to randomize backoff delays. + jitter float64 +} + +func setDefaults(bc *BackoffConfig) { + md := bc.MaxDelay + *bc = DefaultBackoffConfig + + if md > 0 { + bc.MaxDelay = md + } +} + +func (bc BackoffConfig) backoff(retries int) (t time.Duration) { + if retries == 0 { + return bc.baseDelay + } + backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay) + for backoff < max && retries > 0 { + backoff *= bc.factor + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + bc.jitter*(rand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/vendor/src/google.golang.org/grpc/call.go b/vendor/src/google.golang.org/grpc/call.go index 9d815af39d..9d0fc8eeea 100644 --- a/vendor/src/google.golang.org/grpc/call.go +++ b/vendor/src/google.golang.org/grpc/call.go @@ -34,13 +34,13 @@ package grpc import ( + "bytes" "io" "time" "golang.org/x/net/context" "golang.org/x/net/trace" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" "google.golang.org/grpc/transport" ) @@ -48,16 +48,16 @@ import ( // On error, it returns the error and indicates whether the call should be retried. // // TODO(zhaoq): Check whether the received message sequence is valid. -func recvResponse(codec Codec, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error { +func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error { // Try to acquire header metadata from the server if there is any. var err error c.headerMD, err = stream.Header() if err != nil { return err } - p := &parser{s: stream} + p := &parser{r: stream} for { - if err = recv(p, codec, reply); err != nil { + if err = recv(p, dopts.codec, stream, dopts.dc, reply); err != nil { if err == io.EOF { break } @@ -69,7 +69,7 @@ func recvResponse(codec Codec, t transport.ClientTransport, c *callInfo, stream } // sendRequest writes out various information of an RPC such as Context and Message. -func sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { +func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { stream, err := t.NewStream(ctx, callHdr) if err != nil { return nil, err @@ -81,8 +81,11 @@ func sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t } } }() - // TODO(zhaoq): Support compression. - outBuf, err := encode(codec, args, compressionNone) + var cbuf *bytes.Buffer + if compressor != nil { + cbuf = new(bytes.Buffer) + } + outBuf, err := encode(codec, args, compressor, cbuf) if err != nil { return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err) } @@ -94,16 +97,9 @@ func sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t return stream, nil } -// callInfo contains all related configuration and information about an RPC. -type callInfo struct { - failFast bool - headerMD metadata.MD - trailerMD metadata.MD - traceInfo traceInfo // in trace.go -} - -// Invoke is called by the generated code. It sends the RPC request on the -// wire and returns after response is received. +// Invoke sends the RPC request on the wire and returns after response is received. +// Invoke is called by generated code. Also users can call Invoke directly when it +// is really needed in their use cases. func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { var c callInfo for _, o := range opts { @@ -153,6 +149,9 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli Host: cc.authority, Method: method, } + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + } t, err = cc.dopts.picker.Pick(ctx) if err != nil { if lastErr != nil { @@ -164,7 +163,7 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) } - stream, err = sendRequest(ctx, cc.dopts.codec, callHdr, t, args, topts) + stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts) if err != nil { if _, ok := err.(transport.ConnectionError); ok { lastErr = err @@ -176,7 +175,7 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli return toRPCErr(err) } // Receive the response - lastErr = recvResponse(cc.dopts.codec, t, &c, stream, reply) + lastErr = recvResponse(cc.dopts, t, &c, stream, reply) if _, ok := lastErr.(transport.ConnectionError); ok { continue } @@ -187,6 +186,6 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli if lastErr != nil { return toRPCErr(lastErr) } - return Errorf(stream.StatusCode(), stream.StatusDesc()) + return Errorf(stream.StatusCode(), "%s", stream.StatusDesc()) } } diff --git a/vendor/src/google.golang.org/grpc/clientconn.go b/vendor/src/google.golang.org/grpc/clientconn.go index bf66914c11..6de86e9e4c 100644 --- a/vendor/src/google.golang.org/grpc/clientconn.go +++ b/vendor/src/google.golang.org/grpc/clientconn.go @@ -52,10 +52,10 @@ var ( // ErrUnspecTarget indicates that the target address is unspecified. ErrUnspecTarget = errors.New("grpc: target is unspecified") // ErrNoTransportSecurity indicates that there is no transport security - // being set for ClientConn. Users should either set one or explicityly + // being set for ClientConn. Users should either set one or explicitly // call WithInsecure DialOption to disable security. ErrNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") - // ErrCredentialsMisuse indicates that users want to transmit security infomation + // ErrCredentialsMisuse indicates that users want to transmit security information // (e.g., oauth2 token) which requires secure connection on an insecure // connection. ErrCredentialsMisuse = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportAuthenticator() to set)") @@ -73,6 +73,9 @@ var ( // values passed to Dial. type dialOptions struct { codec Codec + cp Compressor + dc Decompressor + bs backoffStrategy picker Picker block bool insecure bool @@ -89,12 +92,57 @@ func WithCodec(c Codec) DialOption { } } +// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message +// compressor. +func WithCompressor(cp Compressor) DialOption { + return func(o *dialOptions) { + o.cp = cp + } +} + +// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating +// message decompressor. +func WithDecompressor(dc Decompressor) DialOption { + return func(o *dialOptions) { + o.dc = dc + } +} + +// WithPicker returns a DialOption which sets a picker for connection selection. func WithPicker(p Picker) DialOption { return func(o *dialOptions) { o.picker = p } } +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up +// for use. +func WithBackoffConfig(b BackoffConfig) DialOption { + // Set defaults to ensure that provided BackoffConfig is valid and + // unexported fields get default values. + setDefaults(&b) + return withBackoff(b) +} + +// withBackoff sets the backoff strategy used for retries after a +// failed connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by GRPC. +func withBackoff(bs backoffStrategy) DialOption { + return func(o *dialOptions) { + o.bs = bs + } +} + // WithBlock returns a DialOption which makes caller of Dial blocks until the underlying // connection is up. Without this, Dial returns immediately and connecting the server // happens in background. @@ -104,6 +152,8 @@ func WithBlock() DialOption { } } +// WithInsecure returns a DialOption which disables transport security for this ClientConn. +// Note that transport security is required unless WithInsecure is set. func WithInsecure() DialOption { return func(o *dialOptions) { o.insecure = true @@ -159,6 +209,11 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { // Set the default codec. cc.dopts.codec = protoCodec{} } + + if cc.dopts.bs == nil { + cc.dopts.bs = DefaultBackoffConfig + } + if cc.dopts.picker == nil { cc.dopts.picker = &unicastPicker{ target: target, @@ -267,10 +322,9 @@ func NewConn(cc *ClientConn) (*Conn, error) { if !c.dopts.insecure { var ok bool for _, cd := range c.dopts.copts.AuthOptions { - if _, ok := cd.(credentials.TransportAuthenticator); !ok { - continue + if _, ok = cd.(credentials.TransportAuthenticator); ok { + break } - ok = true } if !ok { return nil, ErrNoTransportSecurity @@ -395,7 +449,7 @@ func (cc *Conn) resetTransport(closeTransport bool) error { return ErrClientConnTimeout } } - sleepTime := backoff(retries) + sleepTime := cc.dopts.bs.backoff(retries) timeout := sleepTime if timeout < minConnectTimeout { timeout = minConnectTimeout @@ -518,8 +572,9 @@ func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) { cc.mu.Unlock() return nil, ErrClientConnClosing case cc.state == Ready: + ct := cc.transport cc.mu.Unlock() - return cc.transport, nil + return ct, nil default: ready := cc.ready if ready == nil { diff --git a/vendor/src/google.golang.org/grpc/coverage.sh b/vendor/src/google.golang.org/grpc/coverage.sh index 9de8c918df..120235374a 100755 --- a/vendor/src/google.golang.org/grpc/coverage.sh +++ b/vendor/src/google.golang.org/grpc/coverage.sh @@ -4,15 +4,20 @@ set -e workdir=.cover profile="$workdir/cover.out" -mode=count +mode=set +end2endtest="google.golang.org/grpc/test" generate_cover_data() { rm -rf "$workdir" mkdir "$workdir" for pkg in "$@"; do - f="$workdir/$(echo $pkg | tr / -).cover" - go test -covermode="$mode" -coverprofile="$f" "$pkg" + if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ] + then + f="$workdir/$(echo $pkg | tr / -)" + go test -covermode="$mode" -coverprofile="$f.cover" "$pkg" + go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest" + fi done echo "mode: $mode" >"$profile" @@ -32,6 +37,8 @@ show_cover_report func case "$1" in "") ;; +--html) + show_cover_report html ;; --coveralls) push_to_coveralls ;; *) diff --git a/vendor/src/google.golang.org/grpc/credentials/credentials.go b/vendor/src/google.golang.org/grpc/credentials/credentials.go index cde38dc4e4..681f64e445 100644 --- a/vendor/src/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/src/google.golang.org/grpc/credentials/credentials.go @@ -87,19 +87,6 @@ type AuthInfo interface { AuthType() string } -type authInfoKey struct{} - -// NewContext creates a new context with authInfo attached. -func NewContext(ctx context.Context, authInfo AuthInfo) context.Context { - return context.WithValue(ctx, authInfoKey{}, authInfo) -} - -// FromContext returns the authInfo in ctx if it exists. -func FromContext(ctx context.Context) (authInfo AuthInfo, ok bool) { - authInfo, ok = ctx.Value(authInfoKey{}).(AuthInfo) - return -} - // TransportAuthenticator defines the common interface for all the live gRPC wire // protocols and supported transport security protocols (e.g., TLS, SSL). type TransportAuthenticator interface { diff --git a/vendor/src/google.golang.org/grpc/doc.go b/vendor/src/google.golang.org/grpc/doc.go index c63847745d..a35f218852 100644 --- a/vendor/src/google.golang.org/grpc/doc.go +++ b/vendor/src/google.golang.org/grpc/doc.go @@ -1,6 +1,6 @@ /* Package grpc implements an RPC system called gRPC. -See https://github.com/grpc/grpc for more information about gRPC. +See www.grpc.io for more information about gRPC. */ package grpc // import "google.golang.org/grpc" diff --git a/vendor/src/google.golang.org/grpc/grpclog/logger.go b/vendor/src/google.golang.org/grpc/grpclog/logger.go index ec089f70f8..3b2933079e 100644 --- a/vendor/src/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/src/google.golang.org/grpc/grpclog/logger.go @@ -42,6 +42,8 @@ import ( ) // Use golang's standard logger by default. +// Access is not mutex-protected: do not modify except in init() +// functions. var logger Logger = log.New(os.Stderr, "", log.LstdFlags) // Logger mimics golang's standard Logger as an interface. @@ -54,7 +56,8 @@ type Logger interface { Println(args ...interface{}) } -// SetLogger sets the logger that is used in grpc. +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. func SetLogger(l Logger) { logger = l } diff --git a/vendor/src/google.golang.org/grpc/interceptor.go b/vendor/src/google.golang.org/grpc/interceptor.go new file mode 100644 index 0000000000..588f59e5ab --- /dev/null +++ b/vendor/src/google.golang.org/grpc/interceptor.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "golang.org/x/net/context" +) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server interface{} + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. +type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/src/google.golang.org/grpc/internal/internal.go b/vendor/src/google.golang.org/grpc/internal/internal.go new file mode 100644 index 0000000000..5489143a85 --- /dev/null +++ b/vendor/src/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,49 @@ +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package internal contains gRPC-internal code for testing, to avoid polluting +// the godoc of the top-level grpc package. +package internal + +// TestingCloseConns closes all existing transports but keeps +// grpcServer.lis accepting new connections. +// +// The provided grpcServer must be of type *grpc.Server. It is untyped +// for circular dependency reasons. +var TestingCloseConns func(grpcServer interface{}) + +// TestingUseHandlerImpl enables the http.Handler-based server implementation. +// It must be called before Serve and requires TLS credentials. +// +// The provided grpcServer must be of type *grpc.Server. It is untyped +// for circular dependency reasons. +var TestingUseHandlerImpl func(grpcServer interface{}) diff --git a/vendor/src/google.golang.org/grpc/peer/peer.go b/vendor/src/google.golang.org/grpc/peer/peer.go new file mode 100644 index 0000000000..bfa6205ba9 --- /dev/null +++ b/vendor/src/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/vendor/src/google.golang.org/grpc/picker.go b/vendor/src/google.golang.org/grpc/picker.go index b83c859b43..50f315b44f 100644 --- a/vendor/src/google.golang.org/grpc/picker.go +++ b/vendor/src/google.golang.org/grpc/picker.go @@ -172,7 +172,7 @@ func (p *unicastNamingPicker) processUpdates() error { } p.mu.Unlock() default: - grpclog.Println("Unknown update.Op %d", update.Op) + grpclog.Println("Unknown update.Op ", update.Op) } } return nil diff --git a/vendor/src/google.golang.org/grpc/rpc_util.go b/vendor/src/google.golang.org/grpc/rpc_util.go index 46a6801b07..3192f01185 100644 --- a/vendor/src/google.golang.org/grpc/rpc_util.go +++ b/vendor/src/google.golang.org/grpc/rpc_util.go @@ -34,13 +34,14 @@ package grpc import ( + "bytes" + "compress/gzip" "encoding/binary" "fmt" "io" + "io/ioutil" "math" - "math/rand" "os" - "time" "github.com/golang/protobuf/proto" "golang.org/x/net/context" @@ -75,6 +76,71 @@ func (protoCodec) String() string { return "proto" } +// Compressor defines the interface gRPC uses to compress a message. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +func NewGZIPCompressor() Compressor { + return &gzipCompressor{} +} + +type gzipCompressor struct { +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := gzip.NewWriter(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + z, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + defer z.Close() + return ioutil.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + failFast bool + headerMD metadata.MD + trailerMD metadata.MD + traceInfo traceInfo // in trace.go +} + // CallOption configures a Call before it starts or extracts information from // a Call after it completes. type CallOption interface { @@ -118,36 +184,49 @@ type payloadFormat uint8 const ( compressionNone payloadFormat = iota // no compression - compressionFlate - // More formats + compressionMade ) // parser reads complelete gRPC messages from the underlying reader. type parser struct { - s io.Reader -} + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader -// recvMsg is to read a complete gRPC message from the stream. It is blocking if -// the message has not been complete yet. It returns the message and its type, -// EOF is returned with nil msg and 0 pf if the entire stream is done. Other -// non-nil error is returned if something is wrong on reading. -func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) { // The header of a gRPC message. Find more detail // at http://www.grpc.io/docs/guides/wire.html. - var buf [5]byte + header [5]byte +} - if _, err := io.ReadFull(p.s, buf[:]); err != nil { +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// * io.EOF, when no messages remain +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * of type transport.StreamError +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) { + if _, err := io.ReadFull(p.r, p.header[:]); err != nil { return 0, nil, err } - pf = payloadFormat(buf[0]) - length := binary.BigEndian.Uint32(buf[1:]) + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) if length == 0 { return pf, nil, nil } + // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead + // of making it for each message: msg = make([]byte, int(length)) - if _, err := io.ReadFull(p.s, msg); err != nil { + if _, err := io.ReadFull(p.r, msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } @@ -158,7 +237,7 @@ func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) { // encode serializes msg and prepends the message header. If msg is nil, it // generates the message header of 0 message length. -func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) { +func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte, error) { var b []byte var length uint if msg != nil { @@ -168,6 +247,12 @@ func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) { if err != nil { return nil, err } + if cp != nil { + if err := cp.Do(cbuf, b); err != nil { + return nil, err + } + b = cbuf.Bytes() + } length = uint(len(b)) } if length > math.MaxUint32 { @@ -182,7 +267,11 @@ func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) { var buf = make([]byte, payloadLen+sizeLen+len(b)) // Write payload format - buf[0] = byte(pf) + if cp == nil { + buf[0] = byte(compressionNone) + } else { + buf[0] = byte(compressionMade) + } // Write length of b into buf binary.BigEndian.PutUint32(buf[1:], uint32(length)) // Copy encoded msg to buf @@ -191,22 +280,38 @@ func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) { return buf, nil } -func recv(p *parser, c Codec, m interface{}) error { +func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error { + switch pf { + case compressionNone: + case compressionMade: + if recvCompress == "" { + return transport.StreamErrorf(codes.InvalidArgument, "grpc: invalid grpc-encoding %q with compression enabled", recvCompress) + } + if dc == nil || recvCompress != dc.Type() { + return transport.StreamErrorf(codes.InvalidArgument, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return transport.StreamErrorf(codes.InvalidArgument, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}) error { pf, d, err := p.recvMsg() if err != nil { return err } - switch pf { - case compressionNone: - if err := c.Unmarshal(d, m); err != nil { - if rErr, ok := err.(rpcError); ok { - return rErr - } else { - return Errorf(codes.Internal, "grpc: %v", err) - } + if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil { + return err + } + if pf == compressionMade { + d, err = dc.Do(bytes.NewReader(d)) + if err != nil { + return transport.StreamErrorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } - default: - return Errorf(codes.Internal, "gprc: compression is not supported yet.") + } + if err := c.Unmarshal(d, m); err != nil { + return transport.StreamErrorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) } return nil } @@ -218,7 +323,7 @@ type rpcError struct { } func (e rpcError) Error() string { - return fmt.Sprintf("rpc error: code = %d desc = %q", e.code, e.desc) + return fmt.Sprintf("rpc error: code = %d desc = %s", e.code, e.desc) } // Code returns the error code for err if it was produced by the rpc system. @@ -304,34 +409,10 @@ func convertCode(err error) codes.Code { return codes.Unknown } -const ( - // how long to wait after the first failure before retrying - baseDelay = 1.0 * time.Second - // upper bound of backoff delay - maxDelay = 120 * time.Second - // backoff increases by this factor on each retry - backoffFactor = 1.6 - // backoff is randomized downwards by this factor - backoffJitter = 0.2 -) - -func backoff(retries int) (t time.Duration) { - if retries == 0 { - return baseDelay - } - backoff, max := float64(baseDelay), float64(maxDelay) - for backoff < max && retries > 0 { - backoff *= backoffFactor - retries-- - } - if backoff > max { - backoff = max - } - // Randomize backoff delays so that if a cluster of requests start at - // the same time, they won't operate in lockstep. - backoff *= 1 + backoffJitter*(rand.Float64()*2-1) - if backoff < 0 { - return 0 - } - return time.Duration(backoff) -} +// SupportPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the grpc package. +// +// This constant may be renamed in the future if a change in the generated code +// requires a synchronised update of grpc-go and protoc-gen-go. This constant +// should not be referenced from any other code. +const SupportPackageIsVersion2 = true diff --git a/vendor/src/google.golang.org/grpc/server.go b/vendor/src/google.golang.org/grpc/server.go index 655e7d865e..d3a8073d9c 100644 --- a/vendor/src/google.golang.org/grpc/server.go +++ b/vendor/src/google.golang.org/grpc/server.go @@ -34,10 +34,12 @@ package grpc import ( + "bytes" "errors" "fmt" "io" "net" + "net/http" "reflect" "runtime" "strings" @@ -45,15 +47,17 @@ import ( "time" "golang.org/x/net/context" + "golang.org/x/net/http2" "golang.org/x/net/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/transport" ) -type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { @@ -81,10 +85,11 @@ type service struct { // Server is a gRPC server to serve RPC requests. type Server struct { - opts options - mu sync.Mutex + opts options + + mu sync.Mutex // guards following lis map[net.Listener]bool - conns map[transport.ServerTransport]bool + conns map[io.Closer]bool m map[string]*service // service name -> service info events trace.EventLog } @@ -92,7 +97,12 @@ type Server struct { type options struct { creds credentials.Credentials codec Codec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor maxConcurrentStreams uint32 + useHandlerImpl bool // use http.Handler-based server } // A ServerOption sets options. @@ -105,6 +115,18 @@ func CustomCodec(codec Codec) ServerOption { } } +func RPCCompressor(cp Compressor) ServerOption { + return func(o *options) { + o.cp = cp + } +} + +func RPCDecompressor(dc Decompressor) ServerOption { + return func(o *options) { + o.dc = dc + } +} + // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { @@ -120,6 +142,29 @@ func Creds(c credentials.Credentials) ServerOption { } } +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return func(o *options) { + if o.unaryInt != nil { + panic("The unary server interceptor has been set.") + } + o.unaryInt = i + } +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return func(o *options) { + if o.streamInt != nil { + panic("The stream server interceptor has been set.") + } + o.streamInt = i + } +} + // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { @@ -134,7 +179,7 @@ func NewServer(opt ...ServerOption) *Server { s := &Server{ lis: make(map[net.Listener]bool), opts: opts, - conns: make(map[transport.ServerTransport]bool), + conns: make(map[io.Closer]bool), m: make(map[string]*service), } if EnableTracing { @@ -201,9 +246,17 @@ var ( ErrServerStopped = errors.New("grpc: the server has been stopped") ) +func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + creds, ok := s.opts.creds.(credentials.TransportAuthenticator) + if !ok { + return rawConn, nil, nil + } + return creds.ServerHandshake(rawConn) +} + // Serve accepts incoming connections on the listener lis, creating a new // ServerTransport and service goroutine for each. The service goroutines -// read gRPC request and then call the registered handlers to reply to them. +// read gRPC requests and then call the registered handlers to reply to them. // Service returns when lis.Accept fails. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() @@ -221,74 +274,167 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() for { - c, err := lis.Accept() + rawConn, err := lis.Accept() if err != nil { s.mu.Lock() s.printf("done serving; Accept = %v", err) s.mu.Unlock() return err } - var authInfo credentials.AuthInfo - if creds, ok := s.opts.creds.(credentials.TransportAuthenticator); ok { - var conn net.Conn - conn, authInfo, err = creds.ServerHandshake(c) - if err != nil { - s.mu.Lock() - s.errorf("ServerHandshake(%q) failed: %v", c.RemoteAddr(), err) - s.mu.Unlock() - grpclog.Println("grpc: Server.Serve failed to complete security handshake.") - continue - } - c = conn - } - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - c.Close() - return nil - } - st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo) - if err != nil { - s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) - s.mu.Unlock() - c.Close() - grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) - continue - } - s.conns[st] = true - s.mu.Unlock() - - go func() { - var wg sync.WaitGroup - st.HandleStreams(func(stream *transport.Stream) { - var trInfo *traceInfo - if EnableTracing { - trInfo = &traceInfo{ - tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()), - } - trInfo.firstLine.client = false - trInfo.firstLine.remoteAddr = st.RemoteAddr() - stream.TraceContext(trInfo.tr) - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = dl.Sub(time.Now()) - } - } - wg.Add(1) - go func() { - s.handleStream(st, stream, trInfo) - wg.Done() - }() - }) - wg.Wait() - s.mu.Lock() - delete(s.conns, st) - s.mu.Unlock() - }() + // Start a new goroutine to deal with rawConn + // so we don't stall this Accept loop goroutine. + go s.handleRawConn(rawConn) } } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, pf payloadFormat, opts *transport.Options) error { - p, err := encode(s.opts.codec, msg, pf) +// handleRawConn is run in its own goroutine and handles a just-accepted +// connection that has not had any I/O performed on it yet. +func (s *Server) handleRawConn(rawConn net.Conn) { + conn, authInfo, err := s.useTransportAuthenticator(rawConn) + if err != nil { + s.mu.Lock() + s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + s.mu.Unlock() + grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + rawConn.Close() + return + } + + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + conn.Close() + return + } + s.mu.Unlock() + + if s.opts.useHandlerImpl { + s.serveUsingHandler(conn) + } else { + s.serveNewHTTP2Transport(conn, authInfo) + } +} + +// serveNewHTTP2Transport sets up a new http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go) and +// serves streams on it. +// This is run in its own goroutine (it does network I/O in +// transport.NewServerTransport). +func (s *Server) serveNewHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) { + st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + c.Close() + grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) + return + } + if !s.addConn(st) { + st.Close() + return + } + s.serveStreams(st) +} + +func (s *Server) serveStreams(st transport.ServerTransport) { + defer s.removeConn(st) + defer st.Close() + var wg sync.WaitGroup + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + }) + wg.Wait() +} + +var _ http.Handler = (*Server)(nil) + +// serveUsingHandler is called from handleRawConn when s is configured +// to handle requests via the http.Handler interface. It sets up a +// net/http.Server to handle the just-accepted conn. The http.Server +// is configured to route all incoming requests (all HTTP/2 streams) +// to ServeHTTP, which creates a new ServerTransport for each stream. +// serveUsingHandler blocks until conn closes. +// +// This codepath is only used when Server.TestingUseHandlerImpl has +// been configured. This lets the end2end tests exercise the ServeHTTP +// method as one of the environment types. +// +// conn is the *tls.Conn that's already been authenticated. +func (s *Server) serveUsingHandler(conn net.Conn) { + if !s.addConn(conn) { + conn.Close() + return + } + defer s.removeConn(conn) + h2s := &http2.Server{ + MaxConcurrentStreams: s.opts.maxConcurrentStreams, + } + h2s.ServeConn(conn, &http2.ServeConnOpts{ + Handler: s, + }) +} + +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if !s.addConn(st) { + st.Close() + return + } + defer s.removeConn(st) + s.serveStreams(st) +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + if !EnableTracing { + return nil + } + trInfo = &traceInfo{ + tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()), + } + trInfo.firstLine.client = false + trInfo.firstLine.remoteAddr = st.RemoteAddr() + stream.TraceContext(trInfo.tr) + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = dl.Sub(time.Now()) + } + return trInfo +} + +func (s *Server) addConn(c io.Closer) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + return false + } + s.conns[c] = true + return true +} + +func (s *Server) removeConn(c io.Closer) { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns != nil { + delete(s.conns, c) + } +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error { + var cbuf *bytes.Buffer + if cp != nil { + cbuf = new(bytes.Buffer) + } + p, err := encode(s.opts.codec, msg, cp, cbuf) if err != nil { // This typically indicates a fatal issue (e.g., memory // corruption or hardware faults) the application program @@ -314,97 +460,130 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } }() } - p := &parser{s: stream} + p := &parser{r: stream} for { pf, req, err := p.recvMsg() if err == io.EOF { // The entire stream is done (for unary RPC only). return err } + if err == io.ErrUnexpectedEOF { + err = transport.StreamError{Code: codes.Internal, Desc: "io.ErrUnexpectedEOF"} + } if err != nil { switch err := err.(type) { case transport.ConnectionError: // Nothing to do here. case transport.StreamError: if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) } default: panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err)) } return err } - switch pf { - case compressionNone: - statusCode := codes.OK - statusDesc := "" - df := func(v interface{}) error { - if err := s.opts.codec.Unmarshal(req, v); err != nil { - return err + + if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { + switch err := err.(type) { + case transport.StreamError: + if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) - } - return nil - } - reply, appErr := md.Handler(srv.server, stream.Context(), df) - if appErr != nil { - if err, ok := appErr.(rpcError); ok { - statusCode = err.code - statusDesc = err.desc - } else { - statusCode = convertCode(appErr) - statusDesc = appErr.Error() - } - if trInfo != nil && statusCode != codes.OK { - trInfo.tr.LazyLog(stringer(statusDesc), true) - trInfo.tr.SetError() + default: + if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) } - if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) + } + return err + } + statusCode := codes.OK + statusDesc := "" + df := func(v interface{}) error { + if pf == compressionMade { + var err error + req, err = s.opts.dc.Do(bytes.NewReader(req)) + if err != nil { + if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) + } return err } - return nil } - if trInfo != nil { - trInfo.tr.LazyLog(stringer("OK"), false) - } - opts := &transport.Options{ - Last: true, - Delay: false, - } - if err := s.sendResponse(t, stream, reply, compressionNone, opts); err != nil { - switch err := err.(type) { - case transport.ConnectionError: - // Nothing to do here. - case transport.StreamError: - statusCode = err.Code - statusDesc = err.Desc - default: - statusCode = codes.Unknown - statusDesc = err.Error() - } + if err := s.opts.codec.Unmarshal(req, v); err != nil { return err } if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) } - return t.WriteStatus(stream, statusCode, statusDesc) - default: - panic(fmt.Sprintf("payload format to be supported: %d", pf)) + return nil } + reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) + if appErr != nil { + if err, ok := appErr.(rpcError); ok { + statusCode = err.code + statusDesc = err.desc + } else { + statusCode = convertCode(appErr) + statusDesc = appErr.Error() + } + if trInfo != nil && statusCode != codes.OK { + trInfo.tr.LazyLog(stringer(statusDesc), true) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) + return err + } + return nil + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{ + Last: true, + Delay: false, + } + if s.opts.cp != nil { + stream.SetSendCompress(s.opts.cp.Type()) + } + if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { + switch err := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + statusCode = err.Code + statusDesc = err.Desc + default: + statusCode = codes.Unknown + statusDesc = err.Error() + } + return err + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + return t.WriteStatus(stream, statusCode, statusDesc) } } func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { + if s.opts.cp != nil { + stream.SetSendCompress(s.opts.cp.Type()) + } ss := &serverStream{ t: t, s: stream, - p: &parser{s: stream}, + p: &parser{r: stream}, codec: s.opts.codec, + cp: s.opts.cp, + dc: s.opts.dc, trInfo: trInfo, } + if ss.cp != nil { + ss.cbuf = new(bytes.Buffer) + } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) defer func() { @@ -418,10 +597,24 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() }() } - if appErr := sd.Handler(srv.server, ss); appErr != nil { + var appErr error + if s.opts.streamInt == nil { + appErr = sd.Handler(srv.server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(srv.server, ss, info, sd.Handler) + } + if appErr != nil { if err, ok := appErr.(rpcError); ok { ss.statusCode = err.code ss.statusDesc = err.desc + } else if err, ok := appErr.(transport.StreamError); ok { + ss.statusCode = err.Code + ss.statusDesc = err.Desc } else { ss.statusCode = convertCode(appErr) ss.statusDesc = appErr.Error() @@ -509,8 +702,11 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } } -// Stop stops the gRPC server. Once Stop returns, the server stops accepting -// connection requests and closes all the connected connections. +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. func (s *Server) Stop() { s.mu.Lock() listeners := s.lis @@ -518,12 +714,14 @@ func (s *Server) Stop() { cs := s.conns s.conns = nil s.mu.Unlock() + for lis := range listeners { lis.Close() } for c := range cs { c.Close() } + s.mu.Lock() if s.events != nil { s.events.Finish() @@ -532,14 +730,23 @@ func (s *Server) Stop() { s.mu.Unlock() } -// TestingCloseConns closes all exiting transports but keeps s.lis accepting new -// connections. This is for test only now. -func (s *Server) TestingCloseConns() { +func init() { + internal.TestingCloseConns = func(arg interface{}) { + arg.(*Server).testingCloseConns() + } + internal.TestingUseHandlerImpl = func(arg interface{}) { + arg.(*Server).opts.useHandlerImpl = true + } +} + +// testingCloseConns closes all existing transports but keeps s.lis +// accepting new connections. +func (s *Server) testingCloseConns() { s.mu.Lock() for c := range s.conns { c.Close() + delete(s.conns, c) } - s.conns = make(map[transport.ServerTransport]bool) s.mu.Unlock() } diff --git a/vendor/src/google.golang.org/grpc/stream.go b/vendor/src/google.golang.org/grpc/stream.go index 0ee572c27b..565fc3cd0d 100644 --- a/vendor/src/google.golang.org/grpc/stream.go +++ b/vendor/src/google.golang.org/grpc/stream.go @@ -34,6 +34,7 @@ package grpc import ( + "bytes" "errors" "io" "sync" @@ -46,12 +47,14 @@ import ( "google.golang.org/grpc/transport" ) -type streamHandler func(srv interface{}, stream ServerStream) error +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. +type StreamHandler func(srv interface{}, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. type StreamDesc struct { StreamName string - Handler streamHandler + Handler StreamHandler // At least one of these is true. ServerStreams bool @@ -66,18 +69,19 @@ type Stream interface { // breaks. // On error, it aborts the stream and returns an RPC status on client // side. On server side, it simply returns the error to the caller. - // SendMsg is called by generated code. + // SendMsg is called by generated code. Also Users can call SendMsg + // directly when it is really needed in their use cases. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message or the stream is // done. On client side, it returns io.EOF when the stream is done. On - // any other error, it aborts the streama nd returns an RPC status. On + // any other error, it aborts the stream and returns an RPC status. On // server side, it simply returns the error to the caller. RecvMsg(m interface{}) error } // ClientStream defines the interface a client stream has to satify. type ClientStream interface { - // Header returns the header metedata received from the server if there + // Header returns the header metadata received from the server if there // is any. It blocks if the metadata is not ready to read. Header() (metadata.MD, error) // Trailer returns the trailer metadata from the server. It must be called @@ -108,12 +112,22 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, + Flush: desc.ServerStreams && desc.ClientStreams, + } + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() } cs := &clientStream{ desc: desc, codec: cc.dopts.codec, + cp: cc.dopts.cp, + dc: cc.dopts.dc, tracing: EnableTracing, } + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + cs.cbuf = new(bytes.Buffer) + } if cs.tracing { cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) cs.trInfo.firstLine.client = true @@ -125,16 +139,23 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } s, err := t.NewStream(ctx, callHdr) if err != nil { + cs.finish(err) return nil, toRPCErr(err) } cs.t = t cs.s = s - cs.p = &parser{s: s} + cs.p = &parser{r: s} // Listen on ctx.Done() to detect cancellation when there is no pending // I/O operations on this stream. go func() { - <-s.Context().Done() - cs.closeTransportStream(transport.ContextErr(s.Context().Err())) + select { + case <-t.Error(): + // Incur transport error, simply exit. + case <-s.Context().Done(): + err := s.Context().Err() + cs.finish(err) + cs.closeTransportStream(transport.ContextErr(err)) + } }() return cs, nil } @@ -146,6 +167,9 @@ type clientStream struct { p *parser desc *StreamDesc codec Codec + cp Compressor + cbuf *bytes.Buffer + dc Decompressor tracing bool // set to EnableTracing when the clientStream is created. @@ -183,6 +207,9 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { cs.mu.Unlock() } defer func() { + if err != nil { + cs.finish(err) + } if err == nil || err == io.EOF { return } @@ -191,7 +218,12 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { } err = toRPCErr(err) }() - out, err := encode(cs.codec, m, compressionNone) + out, err := encode(cs.codec, m, cs.cp, cs.cbuf) + defer func() { + if cs.cbuf != nil { + cs.cbuf.Reset() + } + }() if err != nil { return transport.StreamErrorf(codes.Internal, "grpc: %v", err) } @@ -199,7 +231,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { } func (cs *clientStream) RecvMsg(m interface{}) (err error) { - err = recv(cs.p, cs.codec, m) + err = recv(cs.p, cs.codec, cs.s, cs.dc, m) defer func() { // err != nil indicates the termination of the stream. if err != nil { @@ -218,16 +250,17 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { return } // Special handling for client streaming rpc. - err = recv(cs.p, cs.codec, m) + err = recv(cs.p, cs.codec, cs.s, cs.dc, m) cs.closeTransportStream(err) if err == nil { return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } if err == io.EOF { if cs.s.StatusCode() == codes.OK { + cs.finish(err) return nil } - return Errorf(cs.s.StatusCode(), cs.s.StatusDesc()) + return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc()) } return toRPCErr(err) } @@ -239,13 +272,18 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { // Returns io.EOF to indicate the end of the stream. return } - return Errorf(cs.s.StatusCode(), cs.s.StatusDesc()) + return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc()) } return toRPCErr(err) } func (cs *clientStream) CloseSend() (err error) { err = cs.t.Write(cs.s, nil, &transport.Options{Last: true}) + defer func() { + if err != nil { + cs.finish(err) + } + }() if err == nil || err == io.EOF { return } @@ -303,6 +341,9 @@ type serverStream struct { s *transport.Stream p *parser codec Codec + cp Compressor + dc Decompressor + cbuf *bytes.Buffer statusCode codes.Code statusDesc string trInfo *traceInfo @@ -341,7 +382,12 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { ss.mu.Unlock() } }() - out, err := encode(ss.codec, m, compressionNone) + out, err := encode(ss.codec, m, ss.cp, ss.cbuf) + defer func() { + if ss.cbuf != nil { + ss.cbuf.Reset() + } + }() if err != nil { err = transport.StreamErrorf(codes.Internal, "grpc: %v", err) return err @@ -364,5 +410,5 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { ss.mu.Unlock() } }() - return recv(ss.p, ss.codec, m) + return recv(ss.p, ss.codec, ss.s, ss.dc, m) } diff --git a/vendor/src/google.golang.org/grpc/transport/control.go b/vendor/src/google.golang.org/grpc/transport/control.go index f6b38a5a6d..7e9bdf3350 100644 --- a/vendor/src/google.golang.org/grpc/transport/control.go +++ b/vendor/src/google.golang.org/grpc/transport/control.go @@ -56,43 +56,33 @@ type windowUpdate struct { increment uint32 } -func (windowUpdate) isItem() bool { - return true -} +func (*windowUpdate) item() {} type settings struct { ack bool ss []http2.Setting } -func (settings) isItem() bool { - return true -} +func (*settings) item() {} type resetStream struct { streamID uint32 code http2.ErrCode } -func (resetStream) isItem() bool { - return true -} +func (*resetStream) item() {} type flushIO struct { } -func (flushIO) isItem() bool { - return true -} +func (*flushIO) item() {} type ping struct { ack bool data [8]byte } -func (ping) isItem() bool { - return true -} +func (*ping) item() {} // quotaPool is a pool which accumulates the quota and sends it to acquire() // when it is available. @@ -172,10 +162,6 @@ func (qb *quotaPool) acquire() <-chan int { type inFlow struct { // The inbound flow control limit for pending data. limit uint32 - // conn points to the shared connection-level inFlow that is shared - // by all streams on that conn. It is nil for the inFlow on the conn - // directly. - conn *inFlow mu sync.Mutex // pendingData is the overall data which have been received but not been @@ -186,75 +172,39 @@ type inFlow struct { pendingUpdate uint32 } -// onData is invoked when some data frame is received. It increments not only its -// own pendingData but also that of the associated connection-level flow. +// onData is invoked when some data frame is received. It updates pendingData. func (f *inFlow) onData(n uint32) error { - if n == 0 { - return nil - } f.mu.Lock() defer f.mu.Unlock() - if f.pendingData+f.pendingUpdate+n > f.limit { - return fmt.Errorf("recieved %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate+n, f.limit) - } - if f.conn != nil { - if err := f.conn.onData(n); err != nil { - return ConnectionErrorf("%v", err) - } - } f.pendingData += n + if f.pendingData+f.pendingUpdate > f.limit { + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit) + } return nil } -// connOnRead updates the connection level states when the application consumes data. -func (f *inFlow) connOnRead(n uint32) uint32 { - if n == 0 || f.conn != nil { - return 0 - } +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { f.mu.Lock() defer f.mu.Unlock() + if f.pendingData == 0 { + return 0 + } f.pendingData -= n f.pendingUpdate += n if f.pendingUpdate >= f.limit/4 { - ret := f.pendingUpdate + wu := f.pendingUpdate f.pendingUpdate = 0 - return ret + return wu } return 0 } -// onRead is invoked when the application reads the data. It returns the window updates -// for both stream and connection level. -func (f *inFlow) onRead(n uint32) (swu, cwu uint32) { - if n == 0 { - return - } - f.mu.Lock() - defer f.mu.Unlock() - if f.pendingData == 0 { - // pendingData has been adjusted by restoreConn. - return - } - f.pendingData -= n - f.pendingUpdate += n - if f.pendingUpdate >= f.limit/4 { - swu = f.pendingUpdate - f.pendingUpdate = 0 - } - cwu = f.conn.connOnRead(n) - return -} - -// restoreConn is invoked when a stream is terminated. It removes its stake in -// the connection-level flow and resets its own state. -func (f *inFlow) restoreConn() uint32 { - if f.conn == nil { - return 0 - } +func (f *inFlow) resetPendingData() uint32 { f.mu.Lock() defer f.mu.Unlock() n := f.pendingData f.pendingData = 0 - f.pendingUpdate = 0 - return f.conn.connOnRead(n) + return n } diff --git a/vendor/src/google.golang.org/grpc/transport/handler_server.go b/vendor/src/google.golang.org/grpc/transport/handler_server.go new file mode 100644 index 0000000000..fef541dba4 --- /dev/null +++ b/vendor/src/google.golang.org/grpc/transport/handler_server.go @@ -0,0 +1,383 @@ +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC +// from inside an http.Handler. It requires that the http Server +// supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) { + if r.ProtoMajor != 2 { + return nil, errors.New("gRPC requires HTTP/2") + } + if r.Method != "POST" { + return nil, errors.New("invalid gRPC request method") + } + if !strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { + return nil, errors.New("invalid gRPC request content-type") + } + if _, ok := w.(http.Flusher); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + } + if _, ok := w.(http.CloseNotifier); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier") + } + + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + } + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := timeoutDecode(v) + if err != nil { + return nil, StreamErrorf(codes.Internal, "malformed time-out: %v", err) + } + st.timeoutSet = true + st.timeout = to + } + + var metakv []string + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) { + continue + } + for _, v := range vv { + if k == "user-agent" { + // user-agent is special. Copying logic of http_util.go. + if i := strings.LastIndex(v, " "); i == -1 { + // There is no application user agent string being set + continue + } else { + v = v[:i] + } + } + metakv = append(metakv, k, v) + + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + didCommonHeaders bool + + headerMD metadata.MD + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() +} + +func (ht *serverHandlerTransport) Close() error { + ht.closeOnce.Do(ht.closeCloseChanOnce) + return nil +} + +func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + select { + case ht.writes <- fn: + return nil + case <-ht.closedCh: + return ErrConnClosing + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error { + err := ht.do(func() { + ht.writeCommonHeaders(s) + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode)) + if statusDesc != "" { + h.Set("Grpc-Message", statusDesc) + } + if md := s.Trailer(); len(md) > 0 { + for k, vv := range md { + for _, v := range vv { + // http2 ResponseWriter mechanism to + // send undeclared Trailers after the + // headers have possibly been written. + h.Add(http2.TrailerPrefix+k, v) + } + } + } + }) + close(ht.writes) + return err +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + if ht.didCommonHeaders { + return + } + ht.didCommonHeaders = true + + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", "application/grpc") + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error { + return ht.do(func() { + ht.writeCommonHeaders(s) + ht.rw.Write(data) + if !opts.Delay { + ht.rw.(http.Flusher).Flush() + } + }) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + return ht.do(func() { + ht.writeCommonHeaders(s) + h := ht.rw.Header() + for k, vv := range md { + for _, v := range vv { + h.Add(k, v) + } + } + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) +} + +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + var ctx context.Context + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(context.Background(), ht.timeout) + } else { + ctx, cancel = context.WithCancel(context.Background()) + } + + // requestOver is closed when either the request's context is done + // or the status has been written via WriteStatus. + requestOver := make(chan struct{}) + + // clientGone receives a single value if peer is gone, either + // because the underlying connection is dead or because the + // peer sends an http2 RST_STREAM. + clientGone := ht.rw.(http.CloseNotifier).CloseNotify() + go func() { + select { + case <-requestOver: + return + case <-ht.closedCh: + case <-clientGone: + } + cancel() + }() + + req := ht.req + + s := &Stream{ + id: 0, // irrelevant + windowHandler: func(int) {}, // nothing + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + } + pr := &peer.Peer{ + Addr: ht.RemoteAddr(), + } + if req.TLS != nil { + pr.AuthInfo = credentials.TLSInfo{*req.TLS} + } + ctx = metadata.NewContext(ctx, ht.headerMD) + ctx = peer.NewContext(ctx, pr) + s.ctx = newContextWithStream(ctx, s) + s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf} + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + + // TODO: minimize garbage, optimize recvBuffer code/ownership + const readSize = 8196 + for buf := make([]byte, readSize); ; { + n, err := req.Body.Read(buf) + if n > 0 { + s.buf.put(&recvMsg{data: buf[:n:n]}) + buf = buf[n:] + } + if err != nil { + s.buf.put(&recvMsg{err: mapRecvMsgError(err)}) + return + } + if len(buf) == 0 { + buf = make([]byte, readSize) + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn, ok := <-ht.writes: + if !ok { + return + } + fn() + case <-ht.closedCh: + return + } + } +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// * io.EOF +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * of type transport.StreamError +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return StreamError{ + Code: code, + Desc: se.Error(), + } + } + } + return ConnectionError{Desc: err.Error()} +} diff --git a/vendor/src/google.golang.org/grpc/transport/http2_client.go b/vendor/src/google.golang.org/grpc/transport/http2_client.go index 07b0c11710..8082fdc8e7 100644 --- a/vendor/src/google.golang.org/grpc/transport/http2_client.go +++ b/vendor/src/google.golang.org/grpc/transport/http2_client.go @@ -50,6 +50,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" ) // http2Client implements the ClientTransport interface with HTTP2. @@ -139,29 +140,6 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e conn.Close() } }() - // Send connection preface to server. - n, err := conn.Write(clientPreface) - if err != nil { - return nil, ConnectionErrorf("transport: %v", err) - } - if n != len(clientPreface) { - return nil, ConnectionErrorf("transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) - } - framer := newFramer(conn) - if initialWindowSize != defaultWindowSize { - err = framer.writeSettings(true, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)}) - } else { - err = framer.writeSettings(true) - } - if err != nil { - return nil, ConnectionErrorf("transport: %v", err) - } - // Adjust the connection flow control window if needed. - if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { - if err := framer.writeWindowUpdate(true, 0, delta); err != nil { - return nil, ConnectionErrorf("transport: %v", err) - } - } ua := primaryUA if opts.UserAgent != "" { ua = opts.UserAgent + " " + ua @@ -177,7 +155,7 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e writableChan: make(chan int, 1), shutdownChan: make(chan struct{}), errorChan: make(chan struct{}), - framer: framer, + framer: newFramer(conn), hBuf: &buf, hEnc: hpack.NewEncoder(&buf), controlBuf: newRecvBuffer(), @@ -190,27 +168,49 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e maxStreams: math.MaxInt32, streamSendQuota: defaultWindowSize, } + // Start the reader goroutine for incoming message. Each transport has + // a dedicated goroutine which reads HTTP2 frame from network. Then it + // dispatches the frame to the corresponding stream entity. + go t.reader() + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + t.Close() + return nil, ConnectionErrorf("transport: %v", err) + } + if n != len(clientPreface) { + t.Close() + return nil, ConnectionErrorf("transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + } + if initialWindowSize != defaultWindowSize { + err = t.framer.writeSettings(true, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)}) + } else { + err = t.framer.writeSettings(true) + } + if err != nil { + t.Close() + return nil, ConnectionErrorf("transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { + if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil { + t.Close() + return nil, ConnectionErrorf("transport: %v", err) + } + } go t.controller() t.writableChan <- 0 - // Start the reader goroutine for incoming message. The threading model - // on receiving is that each transport has a dedicated goroutine which - // reads HTTP2 frame from network. Then it dispatches the frame to the - // corresponding stream entity. - go t.reader() return t, nil } func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { - fc := &inFlow{ - limit: initialWindowSize, - conn: t.fc, - } // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &Stream{ id: t.nextID, method: callHdr.Method, + sendCompress: callHdr.SendCompress, buf: newRecvBuffer(), - fc: fc, + fc: &inFlow{limit: initialWindowSize}, sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), headerChan: make(chan struct{}), } @@ -234,14 +234,20 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea var timeout time.Duration if dl, ok := ctx.Deadline(); ok { timeout = dl.Sub(time.Now()) - if timeout <= 0 { - return nil, ContextErr(context.DeadlineExceeded) - } + } + select { + case <-ctx.Done(): + return nil, ContextErr(ctx.Err()) + default: + } + pr := &peer.Peer{ + Addr: t.conn.RemoteAddr(), } // Attach Auth info if there is any. if t.authInfo != nil { - ctx = credentials.NewContext(ctx, t.authInfo) + pr.AuthInfo = t.authInfo } + ctx = peer.NewContext(ctx, pr) authData := make(map[string]string) for _, c := range t.authCreds { // Construct URI required to get auth request metadata. @@ -317,10 +323,15 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"}) + if callHdr.SendCompress != "" { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + } if timeout > 0 { t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: timeoutEncode(timeout)}) } for k, v := range authData { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) } var ( @@ -344,6 +355,10 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } else { endHeaders = true } + var flush bool + if endHeaders && (hasMD || callHdr.Flush) { + flush = true + } if first { // Sends a HeadersFrame to server to start a new stream. p := http2.HeadersFrameParam{ @@ -355,11 +370,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea // Do a force flush for the buffered frames iff it is the last headers frame // and there is header metadata to be sent. Otherwise, there is flushing until // the corresponding data frame is written. - err = t.framer.writeHeaders(hasMD && endHeaders, p) + err = t.framer.writeHeaders(flush, p) first = false } else { // Sends Continuation frames for the leftover headers. - err = t.framer.writeContinuation(hasMD && endHeaders, s.id, endHeaders, t.hBuf.Next(size)) + err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size)) } if err != nil { t.notifyError(err) @@ -389,8 +404,10 @@ func (t *http2Client) CloseStream(s *Stream, err error) { // other goroutines. s.cancel() s.mu.Lock() - if q := s.fc.restoreConn(); q > 0 { - t.controlBuf.put(&windowUpdate{0, q}) + if q := s.fc.resetPendingData(); q > 0 { + if n := t.fc.onRead(q); n > 0 { + t.controlBuf.put(&windowUpdate{0, n}) + } } if s.state == streamDone { s.mu.Unlock() @@ -412,6 +429,9 @@ func (t *http2Client) CloseStream(s *Stream, err error) { // accessed any more. func (t *http2Client) Close() (err error) { t.mu.Lock() + if t.state == reachable { + close(t.errorChan) + } if t.state == closing { t.mu.Unlock() return errors.New("transport: Close() was already called") @@ -490,6 +510,10 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { t.framer.adjustNumWriters(1) // Got some quota. Try to acquire writing privilege on the transport. if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if _, ok := err.(StreamError); ok { + // Return the connection quota back. + t.sendQuotaPool.add(len(p)) + } if t.framer.adjustNumWriters(-1) == 0 { // This writer is the last one in this batch and has the // responsibility to flush the buffered frames. It queues @@ -499,6 +523,16 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { } return err } + select { + case <-s.ctx.Done(): + t.sendQuotaPool.add(len(p)) + if t.framer.adjustNumWriters(-1) == 0 { + t.controlBuf.put(&flushIO{}) + } + t.writableChan <- 0 + return ContextErr(s.ctx.Err()) + default: + } if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 { // Do a force flush iff this is last frame for the entire gRPC message // and the caller is the only writer at this moment. @@ -537,47 +571,52 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { t.mu.Lock() defer t.mu.Unlock() - if t.activeStreams == nil { - // The transport is closing. - return nil, false - } - if s, ok := t.activeStreams[f.Header().StreamID]; ok { - return s, true - } - return nil, false + s, ok := t.activeStreams[f.Header().StreamID] + return s, ok } // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. func (t *http2Client) updateWindow(s *Stream, n uint32) { - swu, cwu := s.fc.onRead(n) - if swu > 0 { - t.controlBuf.put(&windowUpdate{s.id, swu}) + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return } - if cwu > 0 { - t.controlBuf.put(&windowUpdate{0, cwu}) + if w := t.fc.onRead(n); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&windowUpdate{s.id, w}) } } func (t *http2Client) handleData(f *http2.DataFrame) { + size := len(f.Data()) + if err := t.fc.onData(uint32(size)); err != nil { + t.notifyError(ConnectionErrorf("%v", err)) + return + } // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } return } - size := len(f.Data()) if size > 0 { + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + // The stream has been closed. Release the corresponding quota. + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } + return + } if err := s.fc.onData(uint32(size)); err != nil { - if _, ok := err.(ConnectionError); ok { - t.notifyError(err) - return - } - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return - } s.state = streamDone s.statusCode = codes.Internal s.statusDesc = err.Error() @@ -586,6 +625,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) { t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) return } + s.mu.Unlock() // TODO(bradfitz, zhaoq): A copy is required here because there is no // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? @@ -624,9 +664,10 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { close(s.headerChan) s.headerDone = true } - s.statusCode, ok = http2RSTErrConvTab[http2.ErrCode(f.ErrCode)] + s.statusCode, ok = http2ErrConvTab[http2.ErrCode(f.ErrCode)] if !ok { grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) + s.statusCode = codes.Unknown } s.mu.Unlock() s.write(recvMsg{err: io.EOF}) @@ -667,52 +708,59 @@ func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { } } -// operateHeader takes action on the decoded headers. It returns the current -// stream if there are remaining headers on the wire (in the following -// Continuation frame). -func (t *http2Client) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool) (pendingStream *Stream) { - defer func() { - if pendingStream == nil { - hDec.state = decodeState{} - } - }() - endHeaders, err := hDec.decodeClientHTTP2Headers(frame) - if s == nil { - // s has been closed. - return nil +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s, ok := t.getStream(frame) + if !ok { + return } - if err != nil { - s.write(recvMsg{err: err}) + var state decodeState + for _, hf := range frame.Fields { + state.processHeaderField(hf) + } + if state.err != nil { + s.write(recvMsg{err: state.err}) // Something wrong. Stops reading even when there is remaining. - return nil - } - if !endHeaders { - return s + return } + endStream := frame.StreamEnded() + s.mu.Lock() + if !endStream { + s.recvCompress = state.encoding + } if !s.headerDone { - if !endStream && len(hDec.state.mdata) > 0 { - s.header = hDec.state.mdata + if !endStream && len(state.mdata) > 0 { + s.header = state.mdata } close(s.headerChan) s.headerDone = true } if !endStream || s.state == streamDone { s.mu.Unlock() - return nil + return } - if len(hDec.state.mdata) > 0 { - s.trailer = hDec.state.mdata + if len(state.mdata) > 0 { + s.trailer = state.mdata } s.state = streamDone - s.statusCode = hDec.state.statusCode - s.statusDesc = hDec.state.statusDesc + s.statusCode = state.statusCode + s.statusDesc = state.statusDesc s.mu.Unlock() s.write(recvMsg{err: io.EOF}) - return nil +} + +func handleMalformedHTTP2(s *Stream, err error) { + s.mu.Lock() + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.mu.Unlock() + s.write(recvMsg{err: err}) } // reader runs as a separate goroutine in charge of reading data from network @@ -735,25 +783,31 @@ func (t *http2Client) reader() { } t.handleSettings(sf) - hDec := newHPACKDecoder() - var curStream *Stream // loop to keep reading incoming messages on this transport. for { frame, err := t.framer.readFrame() if err != nil { - t.notifyError(err) - return + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + handleMalformedHTTP2(s, StreamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.errorDetail())) + } + continue + } else { + // Transport error. + t.notifyError(err) + return + } } switch frame := frame.(type) { - case *http2.HeadersFrame: - // operateHeaders has to be invoked regardless the value of curStream - // because the HPACK decoder needs to be updated using the received - // headers. - curStream, _ = t.getStream(frame) - endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream) - curStream = t.operateHeaders(hDec, curStream, frame, endStream) - case *http2.ContinuationFrame: - curStream = t.operateHeaders(hDec, curStream, frame, frame.HeadersEnded()) + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) case *http2.DataFrame: t.handleData(frame) case *http2.RSTStreamFrame: diff --git a/vendor/src/google.golang.org/grpc/transport/http2_server.go b/vendor/src/google.golang.org/grpc/transport/http2_server.go index e16c63cc93..21b631168c 100644 --- a/vendor/src/google.golang.org/grpc/transport/http2_server.go +++ b/vendor/src/google.golang.org/grpc/transport/http2_server.go @@ -49,6 +49,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" ) // ErrIllegalHeaderWrite indicates that setting header is illegal because of @@ -61,8 +62,8 @@ type http2Server struct { maxStreamID uint32 // max stream ID ever seen authInfo credentials.AuthInfo // auth info about the connection // writableChan synchronizes write access to the transport. - // A writer acquires the write lock by sending a value on writableChan - // and releases it by receiving from writableChan. + // A writer acquires the write lock by receiving a value on writableChan + // and releases it by sending on writableChan. writableChan chan int // shutdownChan is closed when Close is called. // Blocking operations should select on shutdownChan to avoid @@ -135,66 +136,69 @@ func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthI return t, nil } -// operateHeader takes action on the decoded headers. It returns the current -// stream if there are remaining headers on the wire (in the following -// Continuation frame). -func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool, handle func(*Stream)) (pendingStream *Stream) { - defer func() { - if pendingStream == nil { - hDec.state = decodeState{} - } - }() - endHeaders, err := hDec.decodeServerHTTP2Headers(frame) - if s == nil { - // s has been closed. - return nil +// operateHeader takes action on the decoded headers. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) { + buf := newRecvBuffer() + s := &Stream{ + id: frame.Header().StreamID, + st: t, + buf: buf, + fc: &inFlow{limit: initialWindowSize}, } - if err != nil { - grpclog.Printf("transport: http2Server.operateHeader found %v", err) + + var state decodeState + for _, hf := range frame.Fields { + state.processHeaderField(hf) + } + if err := state.err; err != nil { if se, ok := err.(StreamError); ok { t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) } - return nil + return } - if endStream { + + if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } - if !endHeaders { - return s - } - if hDec.state.timeoutSet { - s.ctx, s.cancel = context.WithTimeout(context.TODO(), hDec.state.timeout) + s.recvCompress = state.encoding + if state.timeoutSet { + s.ctx, s.cancel = context.WithTimeout(context.TODO(), state.timeout) } else { s.ctx, s.cancel = context.WithCancel(context.TODO()) } + pr := &peer.Peer{ + Addr: t.conn.RemoteAddr(), + } // Attach Auth info if there is any. if t.authInfo != nil { - s.ctx = credentials.NewContext(s.ctx, t.authInfo) + pr.AuthInfo = t.authInfo } + s.ctx = peer.NewContext(s.ctx, pr) // Cache the current stream to the context so that the server application // can find out. Required when the server wants to send some metadata // back to the client (unary call only). s.ctx = newContextWithStream(s.ctx, s) // Attach the received metadata to the context. - if len(hDec.state.mdata) > 0 { - s.ctx = metadata.NewContext(s.ctx, hDec.state.mdata) + if len(state.mdata) > 0 { + s.ctx = metadata.NewContext(s.ctx, state.mdata) } s.dec = &recvBufferReader{ ctx: s.ctx, recv: s.buf, } - s.method = hDec.state.method + s.recvCompress = state.encoding + s.method = state.method t.mu.Lock() if t.state != reachable { t.mu.Unlock() - return nil + return } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) - return nil + return } s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) t.activeStreams[s.id] = s @@ -203,7 +207,6 @@ func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame header t.updateWindow(s, uint32(n)) } handle(s) - return nil } // HandleStreams receives incoming streams using the given handler. This is @@ -236,16 +239,24 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) { } t.handleSettings(sf) - hDec := newHPACKDecoder() - var curStream *Stream for { frame, err := t.framer.readFrame() if err != nil { + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s) + } + t.controlBuf.put(&resetStream{se.StreamID, se.Code}) + continue + } t.Close() return } switch frame := frame.(type) { - case *http2.HeadersFrame: + case *http2.MetaHeadersFrame: id := frame.Header().StreamID if id%2 != 1 || id <= t.maxStreamID { // illegal gRPC stream id. @@ -254,21 +265,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) { break } t.maxStreamID = id - buf := newRecvBuffer() - fc := &inFlow{ - limit: initialWindowSize, - conn: t.fc, - } - curStream = &Stream{ - id: frame.Header().StreamID, - st: t, - buf: buf, - fc: fc, - } - endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream) - curStream = t.operateHeaders(hDec, curStream, frame, endStream, handle) - case *http2.ContinuationFrame: - curStream = t.operateHeaders(hDec, curStream, frame, frame.HeadersEnded(), handle) + t.operateHeaders(frame, handle) case *http2.DataFrame: t.handleData(frame) case *http2.RSTStreamFrame: @@ -306,33 +303,51 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. func (t *http2Server) updateWindow(s *Stream, n uint32) { - swu, cwu := s.fc.onRead(n) - if swu > 0 { - t.controlBuf.put(&windowUpdate{s.id, swu}) + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return } - if cwu > 0 { - t.controlBuf.put(&windowUpdate{0, cwu}) + if w := t.fc.onRead(n); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&windowUpdate{s.id, w}) } } func (t *http2Server) handleData(f *http2.DataFrame) { + size := len(f.Data()) + if err := t.fc.onData(uint32(size)); err != nil { + grpclog.Printf("transport: http2Server %v", err) + t.Close() + return + } // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } return } - size := len(f.Data()) if size > 0 { - if err := s.fc.onData(uint32(size)); err != nil { - if _, ok := err.(ConnectionError); ok { - grpclog.Printf("transport: http2Server %v", err) - t.Close() - return + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + // The stream has been closed. Release the corresponding quota. + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) } + return + } + if err := s.fc.onData(uint32(size)); err != nil { + s.mu.Unlock() t.closeStream(s) t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) return } + s.mu.Unlock() // TODO(bradfitz, zhaoq): A copy is required here because there is no // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? @@ -441,6 +456,9 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { t.hBuf.Reset() t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + if s.sendCompress != "" { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } for k, v := range md { for _, entry := range v { t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) @@ -503,6 +521,10 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { // TODO(zhaoq): Support multi-writers for a single stream. var writeHeaderFrame bool s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return StreamErrorf(codes.Unknown, "the stream has been done") + } if !s.headerOk { writeHeaderFrame = true s.headerOk = true @@ -515,6 +537,9 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { t.hBuf.Reset() t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + if s.sendCompress != "" { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } p := http2.HeadersFrameParam{ StreamID: s.id, BlockFragment: t.hBuf.Bytes(), @@ -567,6 +592,10 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { // Got some quota. Try to acquire writing privilege on the // transport. if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if _, ok := err.(StreamError); ok { + // Return the connection quota back. + t.sendQuotaPool.add(ps) + } if t.framer.adjustNumWriters(-1) == 0 { // This writer is the last one in this batch and has the // responsibility to flush the buffered frames. It queues @@ -576,6 +605,16 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { } return err } + select { + case <-s.ctx.Done(): + t.sendQuotaPool.add(ps) + if t.framer.adjustNumWriters(-1) == 0 { + t.controlBuf.put(&flushIO{}) + } + t.writableChan <- 0 + return ContextErr(s.ctx.Err()) + default: + } var forceFlush bool if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last { forceFlush = true @@ -673,20 +712,22 @@ func (t *http2Server) closeStream(s *Stream) { t.mu.Lock() delete(t.activeStreams, s.id) t.mu.Unlock() - if q := s.fc.restoreConn(); q > 0 { - t.controlBuf.put(&windowUpdate{0, q}) - } + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() s.mu.Lock() + if q := s.fc.resetPendingData(); q > 0 { + if w := t.fc.onRead(q); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } + } if s.state == streamDone { s.mu.Unlock() return } s.state = streamDone s.mu.Unlock() - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() } func (t *http2Server) RemoteAddr() net.Addr { diff --git a/vendor/src/google.golang.org/grpc/transport/http_util.go b/vendor/src/google.golang.org/grpc/transport/http_util.go index fec4e4755d..7a3594acea 100644 --- a/vendor/src/google.golang.org/grpc/transport/http_util.go +++ b/vendor/src/google.golang.org/grpc/transport/http_util.go @@ -62,13 +62,14 @@ const ( ) var ( - clientPreface = []byte(http2.ClientPreface) - http2RSTErrConvTab = map[http2.ErrCode]codes.Code{ + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ http2.ErrCodeNo: codes.Internal, http2.ErrCodeProtocol: codes.Internal, http2.ErrCodeInternal: codes.Internal, http2.ErrCodeFlowControl: codes.ResourceExhausted, http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, http2.ErrCodeFrameSize: codes.Internal, http2.ErrCodeRefusedStream: codes.Unavailable, http2.ErrCodeCancel: codes.Canceled, @@ -76,6 +77,7 @@ var ( http2.ErrCodeConnect: codes.Internal, http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.FailedPrecondition, } statusCodeConvTab = map[codes.Code]http2.ErrCode{ codes.Internal: http2.ErrCodeInternal, @@ -89,6 +91,9 @@ var ( // Records the states during HPACK decoding. Must be reset once the // decoding of the entire headers are finished. type decodeState struct { + err error // first error encountered decoding + + encoding string // statusCode caches the stream status received from the trailer // the server sent. Client side only. statusCode codes.Code @@ -101,25 +106,11 @@ type decodeState struct { mdata map[string][]string } -// An hpackDecoder decodes HTTP2 headers which may span multiple frames. -type hpackDecoder struct { - h *hpack.Decoder - state decodeState - err error // The err when decoding -} - -// A headerFrame is either a http2.HeaderFrame or http2.ContinuationFrame. -type headerFrame interface { - Header() http2.FrameHeader - HeaderBlockFragment() []byte - HeadersEnded() bool -} - // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. func isReservedHeader(hdr string) bool { - if hdr[0] == ':' { + if hdr != "" && hdr[0] == ':' { return true } switch hdr { @@ -136,98 +127,62 @@ func isReservedHeader(hdr string) bool { } } -func newHPACKDecoder() *hpackDecoder { - d := &hpackDecoder{} - d.h = hpack.NewDecoder(http2InitHeaderTableSize, func(f hpack.HeaderField) { - switch f.Name { - case "content-type": - if !strings.Contains(f.Value, "application/grpc") { - d.err = StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected header") - return - } - case "grpc-status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.err = StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err) - return - } - d.state.statusCode = codes.Code(code) - case "grpc-message": - d.state.statusDesc = f.Value - case "grpc-timeout": - d.state.timeoutSet = true - var err error - d.state.timeout, err = timeoutDecode(f.Value) - if err != nil { - d.err = StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err) - return - } - case ":path": - d.state.method = f.Value - default: - if !isReservedHeader(f.Name) { - if f.Name == "user-agent" { - i := strings.LastIndex(f.Value, " ") - if i == -1 { - // There is no application user agent string being set. - return - } - // Extract the application user agent string. - f.Value = f.Value[:i] - } - if d.state.mdata == nil { - d.state.mdata = make(map[string][]string) - } - k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) - if err != nil { - grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) +func (d *decodeState) setErr(err error) { + if d.err == nil { + d.err = err + } +} + +func (d *decodeState) processHeaderField(f hpack.HeaderField) { + switch f.Name { + case "content-type": + if !strings.Contains(f.Value, "application/grpc") { + d.setErr(StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)) + return + } + case "grpc-encoding": + d.encoding = f.Value + case "grpc-status": + code, err := strconv.Atoi(f.Value) + if err != nil { + d.setErr(StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)) + return + } + d.statusCode = codes.Code(code) + case "grpc-message": + d.statusDesc = f.Value + case "grpc-timeout": + d.timeoutSet = true + var err error + d.timeout, err = timeoutDecode(f.Value) + if err != nil { + d.setErr(StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err)) + return + } + case ":path": + d.method = f.Value + default: + if !isReservedHeader(f.Name) { + if f.Name == "user-agent" { + i := strings.LastIndex(f.Value, " ") + if i == -1 { + // There is no application user agent string being set. return } - d.state.mdata[k] = append(d.state.mdata[k], v) + // Extract the application user agent string. + f.Value = f.Value[:i] } + if d.mdata == nil { + d.mdata = make(map[string][]string) + } + k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) + if err != nil { + grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) + return + } + d.mdata[k] = append(d.mdata[k], v) } - }) - return d -} - -func (d *hpackDecoder) decodeClientHTTP2Headers(frame headerFrame) (endHeaders bool, err error) { - d.err = nil - _, err = d.h.Write(frame.HeaderBlockFragment()) - if err != nil { - err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err) } - - if frame.HeadersEnded() { - if closeErr := d.h.Close(); closeErr != nil && err == nil { - err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr) - } - endHeaders = true - } - - if err == nil && d.err != nil { - err = d.err - } - return -} - -func (d *hpackDecoder) decodeServerHTTP2Headers(frame headerFrame) (endHeaders bool, err error) { - d.err = nil - _, err = d.h.Write(frame.HeaderBlockFragment()) - if err != nil { - err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err) - } - - if frame.HeadersEnded() { - if closeErr := d.h.Close(); closeErr != nil && err == nil { - err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr) - } - endHeaders = true - } - - if err == nil && d.err != nil { - err = d.err - } - return } type timeoutUnit uint8 @@ -318,10 +273,11 @@ type framer struct { func newFramer(conn net.Conn) *framer { f := &framer{ - reader: conn, + reader: bufio.NewReaderSize(conn, http2IOBufSize), writer: bufio.NewWriterSize(conn, http2IOBufSize), } f.fr = http2.NewFramer(f.writer, f.reader) + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) return f } @@ -449,3 +405,7 @@ func (f *framer) flushWrite() error { func (f *framer) readFrame() (http2.Frame, error) { return f.fr.ReadFrame() } + +func (f *framer) errorDetail() error { + return f.fr.ErrorDetail() +} diff --git a/vendor/src/google.golang.org/grpc/transport/transport.go b/vendor/src/google.golang.org/grpc/transport/transport.go index e1e7f5761a..87fdf532ca 100644 --- a/vendor/src/google.golang.org/grpc/transport/transport.go +++ b/vendor/src/google.golang.org/grpc/transport/transport.go @@ -63,13 +63,11 @@ type recvMsg struct { err error } -func (recvMsg) isItem() bool { - return true -} +func (*recvMsg) item() {} // All items in an out of a recvBuffer should be the same type. type item interface { - isItem() bool + item() } // recvBuffer is an unbounded channel of item. @@ -89,12 +87,14 @@ func newRecvBuffer() *recvBuffer { func (b *recvBuffer) put(r item) { b.mu.Lock() defer b.mu.Unlock() - b.backlog = append(b.backlog, r) - select { - case b.c <- b.backlog[0]: - b.backlog = b.backlog[1:] - default: + if len(b.backlog) == 0 { + select { + case b.c <- r: + return + default: + } } + b.backlog = append(b.backlog, r) } func (b *recvBuffer) load() { @@ -170,11 +170,13 @@ type Stream struct { ctx context.Context cancel context.CancelFunc // method records the associated RPC method of the stream. - method string - buf *recvBuffer - dec io.Reader - fc *inFlow - recvQuota uint32 + method string + recvCompress string + sendCompress string + buf *recvBuffer + dec io.Reader + fc *inFlow + recvQuota uint32 // The accumulated inbound quota pending for window update. updateQuota uint32 // The handler to control the window update procedure for both this @@ -201,6 +203,17 @@ type Stream struct { statusDesc string } +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str +} + // Header acquires the key-value pairs of header metadata once it // is available. It blocks until i) the metadata is ready or ii) there is no // header metadata or iii) the stream is cancelled/expired. @@ -286,20 +299,18 @@ func (s *Stream) Read(p []byte) (n int, err error) { return } -type key int - // The key to save transport.Stream in the context. -const streamKey = key(0) +type streamKey struct{} // newContextWithStream creates a new context from ctx and attaches stream // to it. func newContextWithStream(ctx context.Context, stream *Stream) context.Context { - return context.WithValue(ctx, streamKey, stream) + return context.WithValue(ctx, streamKey{}, stream) } // StreamFromContext returns the stream saved in ctx. func StreamFromContext(ctx context.Context) (s *Stream, ok bool) { - s, ok = ctx.Value(streamKey).(*Stream) + s, ok = ctx.Value(streamKey{}).(*Stream) return } @@ -339,20 +350,40 @@ func NewClientTransport(target string, opts *ConnectOptions) (ClientTransport, e // Options provides additional hints and information for message // transmission. type Options struct { - // Indicate whether it is the last piece for this stream. + // Last indicates whether this write is the last piece for + // this stream. Last bool - // The hint to transport impl whether the data could be buffered for - // batching write. Transport impl can feel free to ignore it. + + // Delay is a hint to the transport implementation for whether + // the data could be buffered for a batching write. The + // Transport implementation may ignore the hint. Delay bool } // CallHdr carries the information of a particular RPC. type CallHdr struct { - Host string // peer host - Method string // the operation to perform on the specified host + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // RecvCompress specifies the compression algorithm applied on + // inbound messages. + RecvCompress string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Flush indicates whether a new stream command should be sent + // to the peer without waiting for the first data. This is + // only a hint. The transport may modify the flush decision + // for performance purposes. + Flush bool } -// ClientTransport is the common interface for all gRPC client side transport +// ClientTransport is the common interface for all gRPC client-side transport // implementations. type ClientTransport interface { // Close tears down this transport. Once it returns, the transport @@ -381,21 +412,33 @@ type ClientTransport interface { Error() <-chan struct{} } -// ServerTransport is the common interface for all gRPC server side transport +// ServerTransport is the common interface for all gRPC server-side transport // implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. type ServerTransport interface { - // WriteStatus sends the status of a stream to the client. - WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error - // Write sends the data for the given stream. - Write(s *Stream, data []byte, opts *Options) error - // WriteHeader sends the header metedata for the given stream. - WriteHeader(s *Stream, md metadata.MD) error // HandleStreams receives incoming streams using the given handler. HandleStreams(func(*Stream)) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, data []byte, opts *Options) error + + // WriteStatus sends the status of a stream to the client. + // WriteStatus is the final call made on a stream and always + // occurs. + WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error + // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. Close() error + // RemoteAddr returns the remote network address. RemoteAddr() net.Addr }