vendor: google.golang.org/grpc v1.53.0

Also updating google.golang.org/genproto:
full diff: 10f96fb3db...7f2fa6fef1

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn 2023-07-20 00:26:26 +02:00
parent 5b138189b9
commit 3cf34c6d9d
No known key found for this signature in database
GPG Key ID: 76698F39D527CE8C
55 changed files with 1042 additions and 665 deletions

View File

@ -74,7 +74,7 @@ require (
golang.org/x/crypto v0.2.0 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/genproto v0.0.0-20220706185917-7780775163c4 // indirect
google.golang.org/grpc v1.50.1 // indirect
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
google.golang.org/grpc v1.53.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
)

View File

@ -49,7 +49,6 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@ -77,11 +76,6 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
github.com/cloudflare/cfssl v0.0.0-20180323000720-5d63dbd981b5 h1:PqZ3bA4yzwywivzk7PBQWngJp2/PAS0bWRZerKteicY=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
@ -122,15 +116,12 @@ github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7Bv
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fvbommel/sortorder v1.0.2 h1:mV4o8B2hKboCdkJm+a7uX/SIpZob4JzUpc5GGnM45eo=
github.com/fvbommel/sortorder v1.0.2/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@ -196,7 +187,6 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -212,13 +202,11 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@ -352,7 +340,6 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
@ -407,7 +394,6 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
@ -482,7 +468,6 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
@ -546,10 +531,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -569,7 +552,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
@ -672,15 +654,14 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20220706185917-7780775163c4 h1:7YDGQC/0sigNGzsEWyb9s72jTxlFdwVEYNJHbfQ+Dtg=
google.golang.org/genproto v0.0.0-20220706185917-7780775163c4/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA=
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@ -694,11 +675,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY=
google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -711,8 +689,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
@ -732,7 +708,6 @@ gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1/go.mod h1:WbjuEoo1oadwzQ4apSDU+JTvmllE
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@ -1,4 +1,4 @@
// Copyright 2020 Google LLC
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc v3.12.2
// protoc v3.21.9
// source: google/rpc/status.proto
package status
@ -48,11 +48,13 @@ type Status struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
// The status code, which should be an enum value of
// [google.rpc.Code][google.rpc.Code].
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
// A developer-facing error message, which should be in English. Any
// user-facing error message should be localized and sent in the
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized
// by the client.
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
// A list of messages that carry the error details. There is a common set of
// message types for APIs to use.

View File

@ -19,7 +19,7 @@
// Package attributes defines a generic key/value store used in various gRPC
// components.
//
// Experimental
// # Experimental
//
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
// later release.

View File

@ -48,7 +48,7 @@ type BackoffConfig struct {
// here for more details:
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.

View File

@ -110,6 +110,11 @@ type SubConn interface {
UpdateAddresses([]resolver.Address)
// Connect starts the connecting for this SubConn.
Connect()
// GetOrBuildProducer returns a reference to the existing Producer for this
// ProducerBuilder in this SubConn, or, if one does not currently exist,
// creates a new one and returns it. Returns a close function which must
// be called when the Producer is no longer needed.
GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
}
// NewSubConnOptions contains options to create new SubConn.
@ -274,6 +279,14 @@ type PickResult struct {
// type, Done may not be called. May be nil if the balancer does not wish
// to be notified when the RPC completes.
Done func(DoneInfo)
// Metadata provides a way for LB policies to inject arbitrary per-call
// metadata. Any metadata returned here will be merged with existing
// metadata added by the client application.
//
// LB policies with child policies are responsible for propagating metadata
// injected by their children to the ClientConn, as part of Pick().
Metatada metadata.MD
}
// TransientFailureError returns e. It exists for backward compatibility and
@ -371,3 +384,21 @@ type ClientConnState struct {
// ErrBadResolverState may be returned by UpdateClientConnState to indicate a
// problem with the provided name resolver data.
var ErrBadResolverState = errors.New("bad resolver state")
// A ProducerBuilder is a simple constructor for a Producer. It is used by the
// SubConn to create producers when needed.
type ProducerBuilder interface {
// Build creates a Producer. The first parameter is always a
// grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
// associated SubConn), but is declared as interface{} to avoid a
// dependency cycle. Should also return a close function that will be
// called when all references to the Producer have been given up.
Build(grpcClientConnInterface interface{}) (p Producer, close func())
}
// A Producer is a type shared among potentially many consumers. It is
// associated with a SubConn, and an implementation will typically contain
// other methods to provide additional functionality, e.g. configuration or
// subscription registration.
type Producer interface {
}

View File

@ -157,8 +157,8 @@ func (b *baseBalancer) mergeErrors() error {
// regeneratePicker takes a snapshot of the balancer, and generates a picker
// from it. The picker is
// - errPicker if the balancer is in TransientFailure,
// - built by the pickerBuilder with all READY SubConns otherwise.
// - errPicker if the balancer is in TransientFailure,
// - built by the pickerBuilder with all READY SubConns otherwise.
func (b *baseBalancer) regeneratePicker() {
if b.state == connectivity.TransientFailure {
b.picker = NewErrPicker(b.mergeErrors())

View File

@ -34,10 +34,10 @@ type ConnectivityStateEvaluator struct {
// RecordTransition records state change happening in subConn and based on that
// it evaluates what aggregated state should be.
//
// - If at least one SubConn in Ready, the aggregated state is Ready;
// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
// - Else if at least one SubConn is Idle, the aggregated state is Idle;
// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure.
// - If at least one SubConn in Ready, the aggregated state is Ready;
// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
// - Else if at least one SubConn is Idle, the aggregated state is Idle;
// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure.
//
// Shutdown is not considered.
func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
@ -55,7 +55,11 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne
cse.numIdle += updateVal
}
}
return cse.CurrentState()
}
// CurrentState returns the current aggregate conn state by evaluating the counters
func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State {
// Evaluate.
if cse.numReady > 0 {
return connectivity.Ready

View File

@ -19,17 +19,20 @@
package grpc
import (
"context"
"fmt"
"strings"
"sync"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/internal/balancer/gracefulswitch"
"google.golang.org/grpc/internal/buffer"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/status"
)
// ccBalancerWrapper sits between the ClientConn and the Balancer.
@ -305,7 +308,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
return nil, err
}
acbw := &acBalancerWrapper{ac: ac}
acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)}
acbw.ac.mu.Lock()
ac.acbw = acbw
acbw.ac.mu.Unlock()
@ -359,8 +362,9 @@ func (ccb *ccBalancerWrapper) Target() string {
// acBalancerWrapper is a wrapper on top of ac for balancers.
// It implements balancer.SubConn interface.
type acBalancerWrapper struct {
mu sync.Mutex
ac *addrConn
mu sync.Mutex
ac *addrConn
producers map[balancer.ProducerBuilder]*refCountedProducer
}
func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
@ -414,3 +418,64 @@ func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
defer acbw.mu.Unlock()
return acbw.ac
}
var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected")
// NewStream begins a streaming RPC on the addrConn. If the addrConn is not
// ready, returns errSubConnNotReady.
func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
transport := acbw.ac.getReadyTransport()
if transport == nil {
return nil, errSubConnNotReady
}
return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
}
// Invoke performs a unary RPC. If the addrConn is not ready, returns
// errSubConnNotReady.
func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error {
cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...)
if err != nil {
return err
}
if err := cs.SendMsg(args); err != nil {
return err
}
return cs.RecvMsg(reply)
}
type refCountedProducer struct {
producer balancer.Producer
refs int // number of current refs to the producer
close func() // underlying producer's close function
}
func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
acbw.mu.Lock()
defer acbw.mu.Unlock()
// Look up existing producer from this builder.
pData := acbw.producers[pb]
if pData == nil {
// Not found; create a new one and add it to the producers map.
p, close := pb.Build(acbw)
pData = &refCountedProducer{producer: p, close: close}
acbw.producers[pb] = pData
}
// Account for this new reference.
pData.refs++
// Return a cleanup function wrapped in a OnceFunc to remove this reference
// and delete the refCountedProducer from the map if the total reference
// count goes to zero.
unref := func() {
acbw.mu.Lock()
pData.refs--
if pData.refs == 0 {
defer pData.close() // Run outside the acbw mutex
delete(acbw.producers, pb)
}
acbw.mu.Unlock()
}
return pData.producer, grpcsync.OnceFunc(unref)
}

View File

@ -18,14 +18,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc-gen-go v1.28.1
// protoc v3.14.0
// source: grpc/binlog/v1/binarylog.proto
package grpc_binarylog_v1
import (
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
durationpb "google.golang.org/protobuf/types/known/durationpb"
@ -41,10 +40,6 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// Enumerates the type of event
// Note the terminology is different from the RPC semantics
// definition, but the same meaning is expressed here.
@ -261,6 +256,7 @@ type GrpcLogEntry struct {
// according to the type of the log entry.
//
// Types that are assignable to Payload:
//
// *GrpcLogEntry_ClientHeader
// *GrpcLogEntry_ServerHeader
// *GrpcLogEntry_Message
@ -694,12 +690,12 @@ func (x *Message) GetData() []byte {
// Header keys added by gRPC are omitted. To be more specific,
// implementations will not log the following entries, and this is
// not to be treated as a truncation:
// - entries handled by grpc that are not user visible, such as those
// that begin with 'grpc-' (with exception of grpc-trace-bin)
// or keys like 'lb-token'
// - transport specific entries, including but not limited to:
// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
// - entries added for call credentials
// - entries handled by grpc that are not user visible, such as those
// that begin with 'grpc-' (with exception of grpc-trace-bin)
// or keys like 'lb-token'
// - transport specific entries, including but not limited to:
// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
// - entries added for call credentials
//
// Implementations must always log grpc-trace-bin if it is present.
// Practically speaking it will only be visible on server side because

View File

@ -23,7 +23,7 @@
// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by
// the `internal/channelz` package.
//
// Experimental
// # Experimental
//
// Notice: All APIs in this package are experimental and may be removed in a
// later release.

View File

@ -256,7 +256,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
if err != nil {
return nil, err
}
cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts)
cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint(), cc.target, cc.dopts)
if err != nil {
return nil, err
}
@ -503,7 +503,7 @@ type ClientConn struct {
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
// ctx expires. A true value is returned in former case and false in latter.
//
// Experimental
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -522,7 +522,7 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec
// GetState returns the connectivity.State of ClientConn.
//
// Experimental
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
// release.
@ -534,7 +534,7 @@ func (cc *ClientConn) GetState() connectivity.State {
// the channel is idle. Does not wait for the connection attempts to begin
// before returning.
//
// Experimental
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
// release.
@ -761,7 +761,7 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric {
// Target returns the target string of the ClientConn.
//
// Experimental
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -788,10 +788,16 @@ func (cc *ClientConn) incrCallsFailed() {
func (ac *addrConn) connect() error {
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
if logger.V(2) {
logger.Infof("connect called on shutdown addrConn; ignoring.")
}
ac.mu.Unlock()
return errConnClosing
}
if ac.state != connectivity.Idle {
if logger.V(2) {
logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state)
}
ac.mu.Unlock()
return nil
}
@ -831,9 +837,9 @@ func equalAddresses(a, b []resolver.Address) bool {
//
// If ac is Ready, it checks whether current connected address of ac is in the
// new addrs list.
// - If true, it updates ac.addrs and returns true. The ac will keep using
// the existing connection.
// - If false, it does nothing and returns false.
// - If true, it updates ac.addrs and returns true. The ac will keep using
// the existing connection.
// - If false, it does nothing and returns false.
func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
ac.mu.Lock()
defer ac.mu.Unlock()
@ -928,7 +934,7 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
return cc.sc.healthCheckConfig
}
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) {
return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
Ctx: ctx,
FullMethodName: method,
@ -998,7 +1004,7 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
// However, if a previously unavailable network becomes available, this may be
// used to trigger an immediate reconnect.
//
// Experimental
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -1228,111 +1234,79 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
// address was not successfully connected, or updates ac appropriately with the
// new transport.
func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error {
// TODO: Delete prefaceReceived and move the logic to wait for it into the
// transport.
prefaceReceived := grpcsync.NewEvent()
connClosed := grpcsync.NewEvent()
addr.ServerName = ac.cc.getServerName(addr)
hctx, hcancel := context.WithCancel(ac.ctx)
hcStarted := false // protected by ac.mu
onClose := func() {
onClose := func(r transport.GoAwayReason) {
ac.mu.Lock()
defer ac.mu.Unlock()
defer connClosed.Fire()
defer hcancel()
if !hcStarted || hctx.Err() != nil {
// We didn't start the health check or set the state to READY, so
// no need to do anything else here.
//
// OR, we have already cancelled the health check context, meaning
// we have already called onClose once for this transport. In this
// case it would be dangerous to clear the transport and update the
// state, since there may be a new transport in this addrConn.
// adjust params based on GoAwayReason
ac.adjustParams(r)
if ac.state == connectivity.Shutdown {
// Already shut down. tearDown() already cleared the transport and
// canceled hctx via ac.ctx, and we expected this connection to be
// closed, so do nothing here.
return
}
hcancel()
if ac.transport == nil {
// We're still connecting to this address, which could error. Do
// not update the connectivity state or resolve; these will happen
// at the end of the tryAllAddrs connection loop in the event of an
// error.
return
}
ac.transport = nil
// Refresh the name resolver
// Refresh the name resolver on any connection loss.
ac.cc.resolveNow(resolver.ResolveNowOptions{})
if ac.state != connectivity.Shutdown {
ac.updateConnectivityState(connectivity.Idle, nil)
}
}
onGoAway := func(r transport.GoAwayReason) {
ac.mu.Lock()
ac.adjustParams(r)
ac.mu.Unlock()
onClose()
// Always go idle and wait for the LB policy to initiate a new
// connection attempt.
ac.updateConnectivityState(connectivity.Idle, nil)
}
connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
defer cancel()
copts.ChannelzParentID = ac.channelzID
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose)
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose)
if err != nil {
if logger.V(2) {
logger.Infof("Creating new client transport to %q: %v", addr, err)
}
// newTr is either nil, or closed.
hcancel()
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err)
return err
}
select {
case <-connectCtx.Done():
// We didn't get the preface in time.
ac.mu.Lock()
defer ac.mu.Unlock()
if ac.state == connectivity.Shutdown {
// This can happen if the subConn was removed while in `Connecting`
// state. tearDown() would have set the state to `Shutdown`, but
// would not have closed the transport since ac.transport would not
// have been set at that point.
//
// We run this in a goroutine because newTr.Close() calls onClose()
// inline, which requires locking ac.mu.
//
// The error we pass to Close() is immaterial since there are no open
// streams at this point, so no trailers with error details will be sent
// out. We just need to pass a non-nil error.
newTr.Close(transport.ErrConnClosing)
if connectCtx.Err() == context.DeadlineExceeded {
err := errors.New("failed to receive server preface within timeout")
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err)
return err
}
go newTr.Close(transport.ErrConnClosing)
return nil
case <-prefaceReceived.Done():
// We got the preface - huzzah! things are good.
ac.mu.Lock()
defer ac.mu.Unlock()
if connClosed.HasFired() {
// onClose called first; go idle but do nothing else.
if ac.state != connectivity.Shutdown {
ac.updateConnectivityState(connectivity.Idle, nil)
}
return nil
}
if ac.state == connectivity.Shutdown {
// This can happen if the subConn was removed while in `Connecting`
// state. tearDown() would have set the state to `Shutdown`, but
// would not have closed the transport since ac.transport would not
// been set at that point.
//
// We run this in a goroutine because newTr.Close() calls onClose()
// inline, which requires locking ac.mu.
//
// The error we pass to Close() is immaterial since there are no open
// streams at this point, so no trailers with error details will be sent
// out. We just need to pass a non-nil error.
go newTr.Close(transport.ErrConnClosing)
return nil
}
ac.curAddr = addr
ac.transport = newTr
hcStarted = true
ac.startHealthCheck(hctx) // Will set state to READY if appropriate.
return nil
case <-connClosed.Done():
// The transport has already closed. If we received the preface, too,
// this is not an error.
select {
case <-prefaceReceived.Done():
return nil
default:
return errors.New("connection closed before server preface received")
}
}
if hctx.Err() != nil {
// onClose was already called for this connection, but the connection
// was successfully established first. Consider it a success and set
// the new state to Idle.
ac.updateConnectivityState(connectivity.Idle, nil)
return nil
}
ac.curAddr = addr
ac.transport = newTr
ac.startHealthCheck(hctx) // Will set state to READY if appropriate.
return nil
}
// startHealthCheck starts the health checking stream (RPC) to watch the health
@ -1402,7 +1376,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
if status.Code(err) == codes.Unimplemented {
channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled")
} else {
channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err)
channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err)
}
}
}()
@ -1583,7 +1557,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err)
} else {
channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
rb = cc.getResolver(parsedTarget.Scheme)
rb = cc.getResolver(parsedTarget.URL.Scheme)
if rb != nil {
cc.parsedTarget = parsedTarget
return rb, nil
@ -1604,39 +1578,26 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
return nil, err
}
channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
rb = cc.getResolver(parsedTarget.Scheme)
rb = cc.getResolver(parsedTarget.URL.Scheme)
if rb == nil {
return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme)
return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme)
}
cc.parsedTarget = parsedTarget
return rb, nil
}
// parseTarget uses RFC 3986 semantics to parse the given target into a
// resolver.Target struct containing scheme, authority and endpoint. Query
// resolver.Target struct containing scheme, authority and url. Query
// params are stripped from the endpoint.
func parseTarget(target string) (resolver.Target, error) {
u, err := url.Parse(target)
if err != nil {
return resolver.Target{}, err
}
// For targets of the form "[scheme]://[authority]/endpoint, the endpoint
// value returned from url.Parse() contains a leading "/". Although this is
// in accordance with RFC 3986, we do not want to break existing resolver
// implementations which expect the endpoint without the leading "/". So, we
// end up stripping the leading "/" here. But this will result in an
// incorrect parsing for something like "unix:///path/to/socket". Since we
// own the "unix" resolver, we can workaround in the unix resolver by using
// the `URL` field instead of the `Endpoint` field.
endpoint := u.Path
if endpoint == "" {
endpoint = u.Opaque
}
endpoint = strings.TrimPrefix(endpoint, "/")
return resolver.Target{
Scheme: u.Scheme,
Authority: u.Host,
Endpoint: endpoint,
URL: *u,
}, nil
}

View File

@ -36,16 +36,16 @@ import (
// PerRPCCredentials defines the common interface for the credentials which need to
// attach security information to every RPC (e.g., oauth2).
type PerRPCCredentials interface {
// GetRequestMetadata gets the current request metadata, refreshing
// tokens if required. This should be called by the transport layer on
// each request, and the data should be populated in headers or other
// context. If a status code is returned, it will be used as the status
// for the RPC. uri is the URI of the entry point for the request.
// When supported by the underlying implementation, ctx can be used for
// timeout and cancellation. Additionally, RequestInfo data will be
// available via ctx to this call.
// TODO(zhaoq): Define the set of the qualified keys instead of leaving
// it as an arbitrary string.
// GetRequestMetadata gets the current request metadata, refreshing tokens
// if required. This should be called by the transport layer on each
// request, and the data should be populated in headers or other
// context. If a status code is returned, it will be used as the status for
// the RPC (restricted to an allowable set of codes as defined by gRFC
// A54). uri is the URI of the entry point for the request. When supported
// by the underlying implementation, ctx can be used for timeout and
// cancellation. Additionally, RequestInfo data will be available via ctx
// to this call. TODO(zhaoq): Define the set of the qualified keys instead
// of leaving it as an arbitrary string.
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
// RequireTransportSecurity indicates whether the credentials requires
// transport security.

View File

@ -23,9 +23,9 @@ import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"net/url"
"os"
credinternal "google.golang.org/grpc/internal/credentials"
)
@ -166,7 +166,7 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor
// it will override the virtual host name of authority (e.g. :authority header
// field) in requests.
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
b, err := ioutil.ReadFile(certFile)
b, err := os.ReadFile(certFile)
if err != nil {
return nil, err
}
@ -195,7 +195,7 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
// from GetSecurityValue(), containing security info like cipher and certificate used.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.

View File

@ -44,6 +44,7 @@ func init() {
extraDialOptions = nil
}
internal.WithBinaryLogger = withBinaryLogger
internal.JoinDialOptions = newJoinDialOption
}
// dialOptions configure a Dial call. dialOptions are set by the DialOption
@ -111,13 +112,28 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption {
}
}
type joinDialOption struct {
opts []DialOption
}
func (jdo *joinDialOption) apply(do *dialOptions) {
for _, opt := range jdo.opts {
opt.apply(do)
}
}
func newJoinDialOption(opts ...DialOption) DialOption {
return &joinDialOption{opts: opts}
}
// WithWriteBufferSize determines how much data can be batched before doing a
// write on the wire. The corresponding memory allocation for this buffer will
// be twice the size to keep syscalls low. The default value for this buffer is
// 32KB.
//
// Zero will disable the write buffer such that each write will be on underlying
// connection. Note: A Send call may not directly translate to a write.
// Zero or negative values will disable the write buffer such that each write
// will be on underlying connection. Note: A Send call may not directly
// translate to a write.
func WithWriteBufferSize(s int) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.WriteBufferSize = s
@ -127,8 +143,9 @@ func WithWriteBufferSize(s int) DialOption {
// WithReadBufferSize lets you set the size of read buffer, this determines how
// much data can be read at most for each read syscall.
//
// The default value for this buffer is 32KB. Zero will disable read buffer for
// a connection so data framer can access the underlying conn directly.
// The default value for this buffer is 32KB. Zero or negative values will
// disable read buffer for a connection so data framer can access the
// underlying conn directly.
func WithReadBufferSize(s int) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.ReadBufferSize = s

View File

@ -19,7 +19,7 @@
// Package encoding defines the interface for the compressor and codec, and
// functions to register and retrieve compressors and codecs.
//
// Experimental
// # Experimental
//
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -28,6 +28,8 @@ package encoding
import (
"io"
"strings"
"google.golang.org/grpc/internal/grpcutil"
)
// Identity specifies the optional encoding for uncompressed streams.
@ -73,6 +75,9 @@ var registeredCompressor = make(map[string]Compressor)
// registered with the same name, the one registered last will take effect.
func RegisterCompressor(c Compressor) {
registeredCompressor[c.Name()] = c
if !grpcutil.IsCompressorNameRegistered(c.Name()) {
grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name())
}
}
// GetCompressor returns Compressor for the given compressor name.

View File

@ -22,7 +22,6 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"strconv"
@ -140,9 +139,9 @@ func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config)
// newLoggerV2 creates a loggerV2 to be used as default logger.
// All logs are written to stderr.
func newLoggerV2() LoggerV2 {
errorW := ioutil.Discard
warningW := ioutil.Discard
infoW := ioutil.Discard
errorW := io.Discard
warningW := io.Discard
infoW := io.Discard
logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL")
switch logLevel {
@ -242,7 +241,7 @@ func (g *loggerT) V(l int) bool {
// DepthLoggerV2, the below functions will be called with the appropriate stack
// depth set for trivial functions the logger may ignore.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.

View File

@ -30,15 +30,15 @@ import (
// to build a new logger and assign it to binarylog.Logger.
//
// Example filter config strings:
// - "" Nothing will be logged
// - "*" All headers and messages will be fully logged.
// - "*{h}" Only headers will be logged.
// - "*{m:256}" Only the first 256 bytes of each message will be logged.
// - "Foo/*" Logs every method in service Foo
// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
// /Foo/Bar, logs all headers and messages in every other method in service
// Foo.
// - "" Nothing will be logged
// - "*" All headers and messages will be fully logged.
// - "*{h}" Only headers will be logged.
// - "*{m:256}" Only the first 256 bytes of each message will be logged.
// - "Foo/*" Logs every method in service Foo
// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
// /Foo/Bar, logs all headers and messages in every other method in service
// Foo.
//
// If two configs exist for one certain method or service, the one specified
// later overrides the previous config.

View File

@ -26,7 +26,7 @@ import (
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
@ -79,7 +79,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
// Build is an internal only method for building the proto message out of the
// input event. It's made public to enable other library to reuse as much logic
// in TruncatingMethodLogger as possible.
func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry {
m := c.toProto()
timestamp, _ := ptypes.TimestampProto(time.Now())
m.Timestamp = timestamp
@ -87,11 +87,11 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
m.SequenceIdWithinCall = ml.idWithinCallGen.next()
switch pay := m.Payload.(type) {
case *pb.GrpcLogEntry_ClientHeader:
case *binlogpb.GrpcLogEntry_ClientHeader:
m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
case *pb.GrpcLogEntry_ServerHeader:
case *binlogpb.GrpcLogEntry_ServerHeader:
m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
case *pb.GrpcLogEntry_Message:
case *binlogpb.GrpcLogEntry_Message:
m.PayloadTruncated = ml.truncateMessage(pay.Message)
}
return m
@ -102,7 +102,7 @@ func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) {
ml.sink.Write(ml.Build(c))
}
func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) {
if ml.headerMaxLen == maxUInt {
return false
}
@ -121,7 +121,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated
// but not counted towards the size limit.
continue
}
currentEntryLen := uint64(len(entry.Value))
currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue()))
if currentEntryLen > bytesLimit {
break
}
@ -132,7 +132,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated
return truncated
}
func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) {
if ml.messageMaxLen == maxUInt {
return false
}
@ -145,7 +145,7 @@ func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated
// LogEntryConfig represents the configuration for binary log entry.
type LogEntryConfig interface {
toProto() *pb.GrpcLogEntry
toProto() *binlogpb.GrpcLogEntry
}
// ClientHeader configs the binary log entry to be a ClientHeader entry.
@ -159,10 +159,10 @@ type ClientHeader struct {
PeerAddr net.Addr
}
func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry {
// This function doesn't need to set all the fields (e.g. seq ID). The Log
// function will set the fields when necessary.
clientHeader := &pb.ClientHeader{
clientHeader := &binlogpb.ClientHeader{
Metadata: mdToMetadataProto(c.Header),
MethodName: c.MethodName,
Authority: c.Authority,
@ -170,16 +170,16 @@ func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
if c.Timeout > 0 {
clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
}
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
Payload: &pb.GrpcLogEntry_ClientHeader{
ret := &binlogpb.GrpcLogEntry{
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
Payload: &binlogpb.GrpcLogEntry_ClientHeader{
ClientHeader: clientHeader,
},
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
if c.PeerAddr != nil {
ret.Peer = addrToProto(c.PeerAddr)
@ -195,19 +195,19 @@ type ServerHeader struct {
PeerAddr net.Addr
}
func (c *ServerHeader) toProto() *pb.GrpcLogEntry {
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
Payload: &pb.GrpcLogEntry_ServerHeader{
ServerHeader: &pb.ServerHeader{
func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry {
ret := &binlogpb.GrpcLogEntry{
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
Payload: &binlogpb.GrpcLogEntry_ServerHeader{
ServerHeader: &binlogpb.ServerHeader{
Metadata: mdToMetadataProto(c.Header),
},
},
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
if c.PeerAddr != nil {
ret.Peer = addrToProto(c.PeerAddr)
@ -223,7 +223,7 @@ type ClientMessage struct {
Message interface{}
}
func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry {
var (
data []byte
err error
@ -238,19 +238,19 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
} else {
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
}
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
Payload: &pb.GrpcLogEntry_Message{
Message: &pb.Message{
ret := &binlogpb.GrpcLogEntry{
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
Payload: &binlogpb.GrpcLogEntry_Message{
Message: &binlogpb.Message{
Length: uint32(len(data)),
Data: data,
},
},
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
return ret
}
@ -263,7 +263,7 @@ type ServerMessage struct {
Message interface{}
}
func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry {
var (
data []byte
err error
@ -278,19 +278,19 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
} else {
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
}
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
Payload: &pb.GrpcLogEntry_Message{
Message: &pb.Message{
ret := &binlogpb.GrpcLogEntry{
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
Payload: &binlogpb.GrpcLogEntry_Message{
Message: &binlogpb.Message{
Length: uint32(len(data)),
Data: data,
},
},
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
return ret
}
@ -300,15 +300,15 @@ type ClientHalfClose struct {
OnClientSide bool
}
func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry {
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry {
ret := &binlogpb.GrpcLogEntry{
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
Payload: nil, // No payload here.
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
return ret
}
@ -324,7 +324,7 @@ type ServerTrailer struct {
PeerAddr net.Addr
}
func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry {
st, ok := status.FromError(c.Err)
if !ok {
grpclogLogger.Info("binarylogging: error in trailer is not a status error")
@ -340,10 +340,10 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
}
}
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
Payload: &pb.GrpcLogEntry_Trailer{
Trailer: &pb.Trailer{
ret := &binlogpb.GrpcLogEntry{
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
Payload: &binlogpb.GrpcLogEntry_Trailer{
Trailer: &binlogpb.Trailer{
Metadata: mdToMetadataProto(c.Trailer),
StatusCode: uint32(st.Code()),
StatusMessage: st.Message(),
@ -352,9 +352,9 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
},
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
if c.PeerAddr != nil {
ret.Peer = addrToProto(c.PeerAddr)
@ -367,15 +367,15 @@ type Cancel struct {
OnClientSide bool
}
func (c *Cancel) toProto() *pb.GrpcLogEntry {
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL,
func (c *Cancel) toProto() *binlogpb.GrpcLogEntry {
ret := &binlogpb.GrpcLogEntry{
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL,
Payload: nil,
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
return ret
}
@ -392,15 +392,15 @@ func metadataKeyOmit(key string) bool {
return strings.HasPrefix(key, "grpc-")
}
func mdToMetadataProto(md metadata.MD) *pb.Metadata {
ret := &pb.Metadata{}
func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata {
ret := &binlogpb.Metadata{}
for k, vv := range md {
if metadataKeyOmit(k) {
continue
}
for _, v := range vv {
ret.Entry = append(ret.Entry,
&pb.MetadataEntry{
&binlogpb.MetadataEntry{
Key: k,
Value: []byte(v),
},
@ -410,26 +410,26 @@ func mdToMetadataProto(md metadata.MD) *pb.Metadata {
return ret
}
func addrToProto(addr net.Addr) *pb.Address {
ret := &pb.Address{}
func addrToProto(addr net.Addr) *binlogpb.Address {
ret := &binlogpb.Address{}
switch a := addr.(type) {
case *net.TCPAddr:
if a.IP.To4() != nil {
ret.Type = pb.Address_TYPE_IPV4
ret.Type = binlogpb.Address_TYPE_IPV4
} else if a.IP.To16() != nil {
ret.Type = pb.Address_TYPE_IPV6
ret.Type = binlogpb.Address_TYPE_IPV6
} else {
ret.Type = pb.Address_TYPE_UNKNOWN
ret.Type = binlogpb.Address_TYPE_UNKNOWN
// Do not set address and port fields.
break
}
ret.Address = a.IP.String()
ret.IpPort = uint32(a.Port)
case *net.UnixAddr:
ret.Type = pb.Address_TYPE_UNIX
ret.Type = binlogpb.Address_TYPE_UNIX
ret.Address = a.String()
default:
ret.Type = pb.Address_TYPE_UNKNOWN
ret.Type = binlogpb.Address_TYPE_UNKNOWN
}
return ret
}

View File

@ -26,7 +26,7 @@ import (
"time"
"github.com/golang/protobuf/proto"
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
)
var (
@ -42,15 +42,15 @@ type Sink interface {
// Write will be called to write the log entry into the sink.
//
// It should be thread-safe so it can be called in parallel.
Write(*pb.GrpcLogEntry) error
Write(*binlogpb.GrpcLogEntry) error
// Close will be called when the Sink is replaced by a new Sink.
Close() error
}
type noopSink struct{}
func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil }
func (ns *noopSink) Close() error { return nil }
func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil }
func (ns *noopSink) Close() error { return nil }
// newWriterSink creates a binary log sink with the given writer.
//
@ -66,7 +66,7 @@ type writerSink struct {
out io.Writer
}
func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error {
b, err := proto.Marshal(e)
if err != nil {
grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err)
@ -96,7 +96,7 @@ type bufferedSink struct {
done chan struct{}
}
func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error {
func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error {
fs.mu.Lock()
defer fs.mu.Unlock()
if !fs.flusherStarted {

View File

@ -273,10 +273,10 @@ func (c *channel) deleteSelfFromMap() (delete bool) {
// deleteSelfIfReady tries to delete the channel itself from the channelz database.
// The delete process includes two steps:
// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
// parent's child list.
// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
// will return entry not found error.
// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
// parent's child list.
// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
// will return entry not found error.
func (c *channel) deleteSelfIfReady() {
if !c.deleteSelfFromTree() {
return
@ -381,10 +381,10 @@ func (sc *subChannel) deleteSelfFromMap() (delete bool) {
// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
// The delete process includes two steps:
// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
// its parent's child list.
// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
// by id will return entry not found error.
// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
// its parent's child list.
// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
// by id will return entry not found error.
func (sc *subChannel) deleteSelfIfReady() {
if !sc.deleteSelfFromTree() {
return

View File

@ -21,15 +21,42 @@ package envconfig
import (
"os"
"strconv"
"strings"
)
const (
prefix = "GRPC_GO_"
txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
)
var (
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false")
TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
// AdvertiseCompressors is set if registered compressor should be advertised
// ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false").
AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true)
// RingHashCap indicates the maximum ring size which defaults to 4096
// entries but may be overridden by setting the environment variable
// "GRPC_RING_HASH_CAP". This does not override the default bounds
// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
)
func boolFromEnv(envVar string, def bool) bool {
if def {
// The default is true; return true unless the variable is "false".
return !strings.EqualFold(os.Getenv(envVar), "false")
}
// The default is false; return false unless the variable is "true".
return strings.EqualFold(os.Getenv(envVar), "true")
}
func uint64FromEnv(envVar string, def, min, max uint64) uint64 {
v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64)
if err != nil {
return def
}
if v < min {
return min
}
if v > max {
return max
}
return v
}

View File

@ -20,7 +20,6 @@ package envconfig
import (
"os"
"strings"
)
const (
@ -36,16 +35,6 @@ const (
//
// When both bootstrap FileName and FileContent are set, FileName is used.
XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH"
clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT"
aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC"
outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION"
federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION"
rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB"
c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI"
)
var (
@ -64,38 +53,40 @@ var (
// XDSRingHash indicates whether ring hash support is enabled, which can be
// disabled by setting the environment variable
// "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false")
XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true)
// XDSClientSideSecurity is used to control processing of security
// configuration on the client-side.
//
// Note that there is no env var protection for the server-side because we
// have a brand new API on the server-side and users explicitly need to use
// the new API to get security integration on the server.
XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false")
XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true)
// XDSAggregateAndDNS indicates whether processing of aggregated cluster
// and DNS cluster is enabled, which can be enabled by setting the
// environment variable
// "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to
// "true".
XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false")
XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true)
// XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
// which can be disabled by setting the environment variable
// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false")
XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true)
// XDSOutlierDetection indicates whether outlier detection support is
// enabled, which can be disabled by setting the environment variable
// "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false".
XDSOutlierDetection = !strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "false")
// XDSFederation indicates whether federation support is enabled.
XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true")
XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true)
// XDSFederation indicates whether federation support is enabled, which can
// be enabled by setting the environment variable
// "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true".
XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", false)
// XDSRLS indicates whether processing of Cluster Specifier plugins and
// support for the RLS CLuster Specifier is enabled, which can be enabled by
// setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
// "true".
XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true")
XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", false)
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv)
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
)

View File

@ -110,7 +110,7 @@ type LoggerV2 interface {
// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
// It is defined here to avoid a circular dependency.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.

View File

@ -0,0 +1,32 @@
/*
*
* Copyright 2022 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpcsync
import (
"sync"
)
// OnceFunc returns a function wrapping f which ensures f is only executed
// once even if the returned function is executed multiple times.
func OnceFunc(f func()) func() {
var once sync.Once
return func() {
once.Do(f)
}
}

View File

@ -0,0 +1,47 @@
/*
*
* Copyright 2022 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpcutil
import (
"strings"
"google.golang.org/grpc/internal/envconfig"
)
// RegisteredCompressorNames holds names of the registered compressors.
var RegisteredCompressorNames []string
// IsCompressorNameRegistered returns true when name is available in registry.
func IsCompressorNameRegistered(name string) bool {
for _, compressor := range RegisteredCompressorNames {
if compressor == name {
return true
}
}
return false
}
// RegisteredCompressors returns a string of registered compressor names
// separated by comma.
func RegisteredCompressors() string {
if !envconfig.AdvertiseCompressors {
return ""
}
return strings.Join(RegisteredCompressorNames, ",")
}

View File

@ -25,7 +25,6 @@ import (
// ParseMethod splits service and method from the input. It expects format
// "/service/method".
//
func ParseMethod(methodName string) (service, method string, _ error) {
if !strings.HasPrefix(methodName, "/") {
return "", "", errors.New("invalid method name: should start with /")

View File

@ -77,6 +77,9 @@ var (
// ClearGlobalDialOptions clears the array of extra DialOption. This
// method is useful in testing and benchmarking.
ClearGlobalDialOptions func()
// JoinDialOptions combines the dial options passed as arguments into a
// single dial option.
JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption
// JoinServerOptions combines the server options passed as arguments into a
// single server option.
JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption

View File

@ -116,7 +116,7 @@ type dnsBuilder struct{}
// Build creates and starts a DNS resolver that watches the name resolution of the target.
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
host, port, err := parseTarget(target.Endpoint, defaultPort)
host, port, err := parseTarget(target.Endpoint(), defaultPort)
if err != nil {
return nil, err
}
@ -140,10 +140,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
disableServiceConfig: opts.DisableServiceConfig,
}
if target.Authority == "" {
if target.URL.Host == "" {
d.resolver = defaultResolver
} else {
d.resolver, err = customAuthorityResolver(target.Authority)
d.resolver, err = customAuthorityResolver(target.URL.Host)
if err != nil {
return nil, err
}

View File

@ -20,13 +20,20 @@
// name without scheme back to gRPC as resolved address.
package passthrough
import "google.golang.org/grpc/resolver"
import (
"errors"
"google.golang.org/grpc/resolver"
)
const scheme = "passthrough"
type passthroughBuilder struct{}
func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
if target.Endpoint() == "" && opts.Dialer == nil {
return nil, errors.New("passthrough: received empty target in Build()")
}
r := &passthroughResolver{
target: target,
cc: cc,
@ -45,7 +52,7 @@ type passthroughResolver struct {
}
func (r *passthroughResolver) start() {
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}})
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
}
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}

View File

@ -34,8 +34,8 @@ type builder struct {
}
func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
if target.Authority != "" {
return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority)
if target.URL.Host != "" {
return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host)
}
// gRPC was parsing the dial target manually before PR #4817, and we

View File

@ -67,10 +67,10 @@ func (bc *BalancerConfig) MarshalJSON() ([]byte, error) {
// ServiceConfig contains a list of loadBalancingConfigs, each with a name and
// config. This method iterates through that list in order, and stops at the
// first policy that is supported.
// - If the config for the first supported policy is invalid, the whole service
// config is invalid.
// - If the list doesn't contain any supported policy, the whole service config
// is invalid.
// - If the config for the first supported policy is invalid, the whole service
// config is invalid.
// - If the list doesn't contain any supported policy, the whole service config
// is invalid.
func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
var ir intermediateBalancerConfig
err := json.Unmarshal(b, &ir)

View File

@ -164,3 +164,13 @@ func (e *Error) Is(target error) bool {
}
return proto.Equal(e.s.s, tse.s.s)
}
// IsRestrictedControlPlaneCode returns whether the status includes a code
// restricted for control plane usage as defined by gRFC A54.
func IsRestrictedControlPlaneCode(s *Status) bool {
switch s.Code() {
case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss:
return true
}
return false
}

View File

@ -191,7 +191,7 @@ type goAway struct {
code http2.ErrCode
debugData []byte
headsUp bool
closeConn bool
closeConn error // if set, loopyWriter will exit, resulting in conn closure
}
func (*goAway) isTransportResponseFrame() bool { return false }
@ -209,6 +209,14 @@ type outFlowControlSizeRequest struct {
func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
// closeConnection is an instruction to tell the loopy writer to flush the
// framer and exit, which will cause the transport's connection to be closed
// (by the client or server). The transport itself will close after the reader
// encounters the EOF caused by the connection closure.
type closeConnection struct{}
func (closeConnection) isTransportResponseFrame() bool { return false }
type outStreamState int
const (
@ -408,7 +416,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) {
select {
case <-c.ch:
case <-c.done:
return nil, ErrConnClosing
return nil, errors.New("transport closed by client")
}
}
}
@ -519,18 +527,9 @@ const minBatchSize = 1000
// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
// if the batch size is too low to give stream goroutines a chance to fill it up.
func (l *loopyWriter) run() (err error) {
defer func() {
if err == ErrConnClosing {
// Don't log ErrConnClosing as error since it happens
// 1. When the connection is closed by some other known issue.
// 2. User closed the connection.
// 3. A graceful close of connection.
if logger.V(logLevel) {
logger.Infof("transport: loopyWriter.run returning. %v", err)
}
err = nil
}
}()
// Always flush the writer before exiting in case there are pending frames
// to be sent.
defer l.framer.writer.Flush()
for {
it, err := l.cbuf.get(true)
if err != nil {
@ -574,7 +573,6 @@ func (l *loopyWriter) run() (err error) {
}
l.framer.writer.Flush()
break hasdata
}
}
}
@ -655,19 +653,20 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
itl: &itemList{},
wq: h.wq,
}
str.itl.enqueue(h)
return l.originateStream(str)
return l.originateStream(str, h)
}
func (l *loopyWriter) originateStream(str *outStream) error {
hdr := str.itl.dequeue().(*headerFrame)
if err := hdr.initStream(str.id); err != nil {
if err == ErrConnClosing {
return err
}
// Other errors(errStreamDrain) need not close transport.
func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error {
// l.draining is set when handling GoAway. In which case, we want to avoid
// creating new streams.
if l.draining {
// TODO: provide a better error with the reason we are in draining.
hdr.onOrphaned(errStreamDrain)
return nil
}
if err := hdr.initStream(str.id); err != nil {
return err
}
if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
return err
}
@ -763,8 +762,8 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
return err
}
}
if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
return ErrConnClosing
if l.draining && len(l.estdStreams) == 0 {
return errors.New("finished processing active streams while in draining mode")
}
return nil
}
@ -799,7 +798,7 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
if l.side == clientSide {
l.draining = true
if len(l.estdStreams) == 0 {
return ErrConnClosing
return errors.New("received GOAWAY with no active streams")
}
}
return nil
@ -817,6 +816,13 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error {
return nil
}
func (l *loopyWriter) closeConnectionHandler() error {
// Exit loopyWriter entirely by returning an error here. This will lead to
// the transport closing the connection, and, ultimately, transport
// closure.
return ErrConnClosing
}
func (l *loopyWriter) handle(i interface{}) error {
switch i := i.(type) {
case *incomingWindowUpdate:
@ -845,6 +851,8 @@ func (l *loopyWriter) handle(i interface{}) error {
return l.goAwayHandler(i)
case *outFlowControlSizeRequest:
return l.outFlowControlSizeRequestHandler(i)
case closeConnection:
return l.closeConnectionHandler()
default:
return fmt.Errorf("transport: unknown control message type %T", i)
}

View File

@ -47,3 +47,9 @@ const (
defaultClientMaxHeaderListSize = uint32(16 << 20)
defaultServerMaxHeaderListSize = uint32(16 << 20)
)
// MaxStreamID is the upper bound for the stream ID before the current
// transport gracefully closes and new transport is created for subsequent RPCs.
// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit
// integer. It's exported so that tests can override it.
var MaxStreamID = uint32(math.MaxInt32 * 3 / 4)

View File

@ -46,24 +46,32 @@ import (
"google.golang.org/grpc/status"
)
// NewServerHandlerTransport returns a ServerTransport handling gRPC
// from inside an http.Handler. It requires that the http Server
// supports HTTP/2.
// NewServerHandlerTransport returns a ServerTransport handling gRPC from
// inside an http.Handler, or writes an HTTP error to w and returns an error.
// It requires that the http Server supports HTTP/2.
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) {
if r.ProtoMajor != 2 {
return nil, errors.New("gRPC requires HTTP/2")
msg := "gRPC requires HTTP/2"
http.Error(w, msg, http.StatusBadRequest)
return nil, errors.New(msg)
}
if r.Method != "POST" {
return nil, errors.New("invalid gRPC request method")
msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
http.Error(w, msg, http.StatusBadRequest)
return nil, errors.New(msg)
}
contentType := r.Header.Get("Content-Type")
// TODO: do we assume contentType is lowercase? we did before
contentSubtype, validContentType := grpcutil.ContentSubtype(contentType)
if !validContentType {
return nil, errors.New("invalid gRPC request content-type")
msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType)
http.Error(w, msg, http.StatusUnsupportedMediaType)
return nil, errors.New(msg)
}
if _, ok := w.(http.Flusher); !ok {
return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
msg := "gRPC requires a ResponseWriter supporting http.Flusher"
http.Error(w, msg, http.StatusInternalServerError)
return nil, errors.New(msg)
}
st := &serverHandlerTransport{
@ -79,7 +87,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
if v := r.Header.Get("grpc-timeout"); v != "" {
to, err := decodeTimeout(v)
if err != nil {
return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err)
msg := fmt.Sprintf("malformed grpc-timeout: %v", err)
http.Error(w, msg, http.StatusBadRequest)
return nil, status.Error(codes.Internal, msg)
}
st.timeoutSet = true
st.timeout = to
@ -97,7 +107,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
for _, v := range vv {
v, err := decodeMetadataHeader(k, v)
if err != nil {
return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err)
msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err)
http.Error(w, msg, http.StatusBadRequest)
return nil, status.Error(codes.Internal, msg)
}
metakv = append(metakv, k, v)
}
@ -141,12 +153,15 @@ type serverHandlerTransport struct {
stats []stats.Handler
}
func (ht *serverHandlerTransport) Close() {
ht.closeOnce.Do(ht.closeCloseChanOnce)
func (ht *serverHandlerTransport) Close(err error) {
ht.closeOnce.Do(func() {
if logger.V(logLevel) {
logger.Infof("Closing serverHandlerTransport: %v", err)
}
close(ht.closedCh)
})
}
func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
@ -236,7 +251,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
})
}
}
ht.Close()
ht.Close(errors.New("finished writing status"))
return err
}
@ -346,7 +361,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
case <-ht.req.Context().Done():
}
cancel()
ht.Close()
ht.Close(errors.New("request is done processing"))
}()
req := ht.req
@ -442,10 +457,10 @@ func (ht *serverHandlerTransport) Drain() {
// mapRecvMsgError returns the non-nil err into the appropriate
// error value as expected by callers of *grpc.parser.recvMsg.
// In particular, in can only be:
// * io.EOF
// * io.ErrUnexpectedEOF
// * of type transport.ConnectionError
// * an error from the status package
// - io.EOF
// - io.ErrUnexpectedEOF
// - of type transport.ConnectionError
// - an error from the status package
func mapRecvMsgError(err error) error {
if err == io.EOF || err == io.ErrUnexpectedEOF {
return err

View File

@ -38,8 +38,10 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
icredentials "google.golang.org/grpc/internal/credentials"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/grpcutil"
imetadata "google.golang.org/grpc/internal/metadata"
istatus "google.golang.org/grpc/internal/status"
"google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/internal/transport/networktype"
"google.golang.org/grpc/keepalive"
@ -57,11 +59,15 @@ var clientConnectionCounter uint64
// http2Client implements the ClientTransport interface with HTTP2.
type http2Client struct {
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
ctx context.Context
cancel context.CancelFunc
ctxDone <-chan struct{} // Cache the ctx.Done() chan.
userAgent string
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
ctx context.Context
cancel context.CancelFunc
ctxDone <-chan struct{} // Cache the ctx.Done() chan.
userAgent string
// address contains the resolver returned address for this transport.
// If the `ServerName` field is set, it takes precedence over `CallHdr.Host`
// passed to `NewStream`, when determining the :authority header.
address resolver.Address
md metadata.MD
conn net.Conn // underlying communication channel
loopy *loopyWriter
@ -99,16 +105,13 @@ type http2Client struct {
maxSendHeaderListSize *uint32
bdpEst *bdpEstimator
// onPrefaceReceipt is a callback that client transport calls upon
// receiving server preface to signal that a succefull HTTP2
// connection was established.
onPrefaceReceipt func()
maxConcurrentStreams uint32
streamQuota int64
streamsQuotaAvailable chan struct{}
waitingStreams uint32
nextID uint32
registeredCompressors string
// Do not access controlBuf with mu held.
mu sync.Mutex // guard the following variables
@ -137,8 +140,7 @@ type http2Client struct {
channelzID *channelz.Identifier
czData *channelzData
onGoAway func(GoAwayReason)
onClose func()
onClose func(GoAwayReason)
bufferPool *bufferPool
@ -194,7 +196,7 @@ func isTemporary(err error) bool {
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
// and starts to receive messages on it. Non-nil error returns if construction
// fails.
func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) {
scheme := "http"
ctx, cancel := context.WithCancel(ctx)
defer func() {
@ -214,14 +216,40 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
if opts.FailOnNonTempDialError {
return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
}
return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err)
return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err)
}
// Any further errors will close the underlying connection
defer func(conn net.Conn) {
if err != nil {
conn.Close()
}
}(conn)
// The following defer and goroutine monitor the connectCtx for cancelation
// and deadline. On context expiration, the connection is hard closed and
// this function will naturally fail as a result. Otherwise, the defer
// waits for the goroutine to exit to prevent the context from being
// monitored (and to prevent the connection from ever being closed) after
// returning from this function.
ctxMonitorDone := grpcsync.NewEvent()
newClientCtx, newClientDone := context.WithCancel(connectCtx)
defer func() {
newClientDone() // Awaken the goroutine below if connectCtx hasn't expired.
<-ctxMonitorDone.Done() // Wait for the goroutine below to exit.
}()
go func(conn net.Conn) {
defer ctxMonitorDone.Fire() // Signal this goroutine has exited.
<-newClientCtx.Done() // Block until connectCtx expires or the defer above executes.
if err := connectCtx.Err(); err != nil {
// connectCtx expired before exiting the function. Hard close the connection.
if logger.V(logLevel) {
logger.Infof("newClientTransport: aborting due to connectCtx: %v", err)
}
conn.Close()
}
}(conn)
kp := opts.KeepaliveParams
// Validate keepalive parameters.
if kp.Time == 0 {
@ -253,15 +281,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
}
}
if transportCreds != nil {
rawConn := conn
// Pull the deadline from the connectCtx, which will be used for
// timeouts in the authentication protocol handshake. Can ignore the
// boolean as the deadline will return the zero value, which will make
// the conn not timeout on I/O operations.
deadline, _ := connectCtx.Deadline()
rawConn.SetDeadline(deadline)
conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn)
rawConn.SetDeadline(time.Time{})
conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn)
if err != nil {
return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
}
@ -299,6 +319,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
ctxDone: ctx.Done(), // Cache Done chan.
cancel: cancel,
userAgent: opts.UserAgent,
registeredCompressors: grpcutil.RegisteredCompressors(),
address: addr,
conn: conn,
remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
@ -315,16 +337,14 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
kp: kp,
statsHandlers: opts.StatsHandlers,
initialWindowSize: initialWindowSize,
onPrefaceReceipt: onPrefaceReceipt,
nextID: 1,
maxConcurrentStreams: defaultMaxStreamsClient,
streamQuota: defaultMaxStreamsClient,
streamsQuotaAvailable: make(chan struct{}, 1),
czData: new(channelzData),
onGoAway: onGoAway,
onClose: onClose,
keepaliveEnabled: keepaliveEnabled,
bufferPool: newBufferPool(),
onClose: onClose,
}
// Add peer information to the http2client context.
t.ctx = peer.NewContext(t.ctx, t.getPeer())
@ -363,21 +383,32 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
t.kpDormancyCond = sync.NewCond(&t.mu)
go t.keepalive()
}
// Start the reader goroutine for incoming message. Each transport has
// a dedicated goroutine which reads HTTP2 frame from network. Then it
// dispatches the frame to the corresponding stream entity.
go t.reader()
// Start the reader goroutine for incoming messages. Each transport has a
// dedicated goroutine which reads HTTP2 frames from the network. Then it
// dispatches the frame to the corresponding stream entity. When the
// server preface is received, readerErrCh is closed. If an error occurs
// first, an error is pushed to the channel. This must be checked before
// returning from this function.
readerErrCh := make(chan error, 1)
go t.reader(readerErrCh)
defer func() {
if err == nil {
err = <-readerErrCh
}
if err != nil {
t.Close(err)
}
}()
// Send connection preface to server.
n, err := t.conn.Write(clientPreface)
if err != nil {
err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
t.Close(err)
return nil, err
}
if n != len(clientPreface) {
err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
t.Close(err)
return nil, err
}
var ss []http2.Setting
@ -397,14 +428,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
err = t.framer.fr.WriteSettings(ss...)
if err != nil {
err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
t.Close(err)
return nil, err
}
// Adjust the connection flow control window if needed.
if delta := uint32(icwz - defaultWindowSize); delta > 0 {
if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
err = connectionErrorf(true, err, "transport: failed to write window update: %v", err)
t.Close(err)
return nil, err
}
}
@ -417,10 +446,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
go func() {
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
err := t.loopy.run()
if err != nil {
if logger.V(logLevel) {
logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
}
if logger.V(logLevel) {
logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err)
}
// Do not close the transport. Let reader goroutine handle it since
// there might be data in the buffers.
@ -507,9 +534,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)})
}
registeredCompressors := t.registeredCompressors
if callHdr.SendCompress != "" {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress})
// Include the outgoing compressor name when compressor is not registered
// via encoding.RegisterCompressor. This is possible when client uses
// WithCompressor dial option.
if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) {
if registeredCompressors != "" {
registeredCompressors += ","
}
registeredCompressors += callHdr.SendCompress
}
}
if registeredCompressors != "" {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors})
}
if dl, ok := ctx.Deadline(); ok {
// Send out timeout regardless its value. The server can detect timeout context by itself.
@ -589,7 +629,11 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s
for _, c := range t.perRPCCreds {
data, err := c.GetRequestMetadata(ctx, audience)
if err != nil {
if _, ok := status.FromError(err); ok {
if st, ok := status.FromError(err); ok {
// Restrict the code to the list allowed by gRFC A54.
if istatus.IsRestrictedControlPlaneCode(st) {
err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
}
return nil, err
}
@ -618,7 +662,14 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
}
data, err := callCreds.GetRequestMetadata(ctx, audience)
if err != nil {
return nil, status.Errorf(codes.Internal, "transport: %v", err)
if st, ok := status.FromError(err); ok {
// Restrict the code to the list allowed by gRFC A54.
if istatus.IsRestrictedControlPlaneCode(st) {
err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
}
return nil, err
}
return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err)
}
callAuthData = make(map[string]string, len(data))
for k, v := range data {
@ -634,13 +685,13 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
// NewStream errors result in transparent retry, as they mean nothing went onto
// the wire. However, there are two notable exceptions:
//
// 1. If the stream headers violate the max header list size allowed by the
// server. It's possible this could succeed on another transport, even if
// it's unlikely, but do not transparently retry.
// 2. If the credentials errored when requesting their headers. In this case,
// it's possible a retry can fix the problem, but indefinitely transparently
// retrying is not appropriate as it is likely the credentials, if they can
// eventually succeed, would need I/O to do so.
// 1. If the stream headers violate the max header list size allowed by the
// server. It's possible this could succeed on another transport, even if
// it's unlikely, but do not transparently retry.
// 2. If the credentials errored when requesting their headers. In this case,
// it's possible a retry can fix the problem, but indefinitely transparently
// retrying is not appropriate as it is likely the credentials, if they can
// eventually succeed, would need I/O to do so.
type NewStreamError struct {
Err error
@ -655,6 +706,18 @@ func (e NewStreamError) Error() string {
// streams. All non-nil errors returned will be *NewStreamError.
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) {
ctx = peer.NewContext(ctx, t.getPeer())
// ServerName field of the resolver returned address takes precedence over
// Host field of CallHdr to determine the :authority header. This is because,
// the ServerName field takes precedence for server authentication during
// TLS handshake, and the :authority header should match the value used
// for server authentication.
if t.address.ServerName != "" {
newCallHdr := *callHdr
newCallHdr.Host = t.address.ServerName
callHdr = &newCallHdr
}
headerFields, err := t.createHeaderFields(ctx, callHdr)
if err != nil {
return nil, &NewStreamError{Err: err, AllowTransparentRetry: false}
@ -679,15 +742,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
endStream: false,
initStream: func(id uint32) error {
t.mu.Lock()
if state := t.state; state != reachable {
// TODO: handle transport closure in loopy instead and remove this
// initStream is never called when transport is draining.
if t.state == closing {
t.mu.Unlock()
// Do a quick cleanup.
err := error(errStreamDrain)
if state == closing {
err = ErrConnClosing
}
cleanup(err)
return err
cleanup(ErrConnClosing)
return ErrConnClosing
}
if channelz.IsOn() {
atomic.AddInt64(&t.czData.streamsStarted, 1)
@ -705,6 +765,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
}
firstTry := true
var ch chan struct{}
transportDrainRequired := false
checkForStreamQuota := func(it interface{}) bool {
if t.streamQuota <= 0 { // Can go negative if server decreases it.
if firstTry {
@ -720,6 +781,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
h := it.(*headerFrame)
h.streamID = t.nextID
t.nextID += 2
// Drain client transport if nextID > MaxStreamID which signals gRPC that
// the connection is closed and a new one must be created for subsequent RPCs.
transportDrainRequired = t.nextID > MaxStreamID
s.id = h.streamID
s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
t.mu.Lock()
@ -799,6 +865,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
sh.HandleRPC(s.ctx, outHeader)
}
}
if transportDrainRequired {
if logger.V(logLevel) {
logger.Infof("transport: t.nextID > MaxStreamID. Draining")
}
t.GracefulClose()
}
return s, nil
}
@ -880,20 +952,21 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
// Close kicks off the shutdown process of the transport. This should be called
// only once on a transport. Once it is called, the transport should not be
// accessed any more.
//
// This method blocks until the addrConn that initiated this transport is
// re-connected. This happens because t.onClose() begins reconnect logic at the
// addrConn level and blocks until the addrConn is successfully connected.
func (t *http2Client) Close(err error) {
t.mu.Lock()
// Make sure we only Close once.
// Make sure we only close once.
if t.state == closing {
t.mu.Unlock()
return
}
// Call t.onClose before setting the state to closing to prevent the client
// from attempting to create new streams ASAP.
t.onClose()
if logger.V(logLevel) {
logger.Infof("transport: closing: %v", err)
}
// Call t.onClose ASAP to prevent the client from attempting to create new
// streams.
if t.state != draining {
t.onClose(GoAwayInvalid)
}
t.state = closing
streams := t.activeStreams
t.activeStreams = nil
@ -943,11 +1016,15 @@ func (t *http2Client) GracefulClose() {
t.mu.Unlock()
return
}
if logger.V(logLevel) {
logger.Infof("transport: GracefulClose called")
}
t.onClose(GoAwayInvalid)
t.state = draining
active := len(t.activeStreams)
t.mu.Unlock()
if active == 0 {
t.Close(ErrConnClosing)
t.Close(connectionErrorf(true, nil, "no active streams left to process while draining"))
return
}
t.controlBuf.put(&incomingGoAway{})
@ -1105,7 +1182,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
statusCode, ok := http2ErrConvTab[f.ErrCode]
if !ok {
if logger.V(logLevel) {
logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error: %v", f.ErrCode)
}
statusCode = codes.Unknown
}
@ -1223,8 +1300,10 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
// Notify the clientconn about the GOAWAY before we set the state to
// draining, to allow the client to stop attempting to create streams
// before disallowing new streams on this connection.
t.onGoAway(t.goAwayReason)
t.state = draining
if t.state != draining {
t.onClose(t.goAwayReason)
t.state = draining
}
}
// All streams with IDs greater than the GoAwayId
// and smaller than the previous GoAway ID should be killed.
@ -1482,33 +1561,35 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true)
}
// reader runs as a separate goroutine in charge of reading data from network
// connection.
//
// TODO(zhaoq): currently one reader per transport. Investigate whether this is
// optimal.
// TODO(zhaoq): Check the validity of the incoming frame sequence.
func (t *http2Client) reader() {
defer close(t.readerDone)
// Check the validity of server preface.
// readServerPreface reads and handles the initial settings frame from the
// server.
func (t *http2Client) readServerPreface() error {
frame, err := t.framer.fr.ReadFrame()
if err != nil {
err = connectionErrorf(true, err, "error reading server preface: %v", err)
t.Close(err) // this kicks off resetTransport, so must be last before return
return
}
t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
if t.keepaliveEnabled {
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
return connectionErrorf(true, err, "error reading server preface: %v", err)
}
sf, ok := frame.(*http2.SettingsFrame)
if !ok {
// this kicks off resetTransport, so must be last before return
t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame))
return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)
}
t.handleSettings(sf, true)
return nil
}
// reader verifies the server preface and reads all subsequent data from
// network connection. If the server preface is not read successfully, an
// error is pushed to errCh; otherwise errCh is closed with no error.
func (t *http2Client) reader(errCh chan<- error) {
defer close(t.readerDone)
if err := t.readServerPreface(); err != nil {
errCh <- err
return
}
t.onPrefaceReceipt()
t.handleSettings(sf, true)
close(errCh)
if t.keepaliveEnabled {
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
}
// loop to keep reading incoming messages on this transport.
for {
@ -1711,3 +1792,9 @@ func (t *http2Client) getOutFlowWindow() int64 {
return -2
}
}
func (t *http2Client) stateForTesting() transportState {
t.mu.Lock()
defer t.mu.Unlock()
return t.state
}

View File

@ -21,6 +21,7 @@ package transport
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"math"
@ -41,6 +42,7 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
@ -101,13 +103,13 @@ type http2Server struct {
mu sync.Mutex // guard the following
// drainChan is initialized when Drain() is called the first time.
// After which the server writes out the first GoAway(with ID 2^31-1) frame.
// Then an independent goroutine will be launched to later send the second GoAway.
// During this time we don't want to write another first GoAway(with ID 2^31 -1) frame.
// Thus call to Drain() will be a no-op if drainChan is already initialized since draining is
// already underway.
drainChan chan struct{}
// drainEvent is initialized when Drain() is called the first time. After
// which the server writes out the first GoAway(with ID 2^31-1) frame. Then
// an independent goroutine will be launched to later send the second
// GoAway. During this time we don't want to write another first GoAway(with
// ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is
// already initialized since draining is already underway.
drainEvent *grpcsync.Event
state transportState
activeStreams map[uint32]*Stream
// idle is the time instant when the connection went idle.
@ -293,7 +295,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
defer func() {
if err != nil {
t.Close()
t.Close(err)
}
}()
@ -331,10 +333,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
go func() {
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
if err := t.loopy.run(); err != nil {
if logger.V(logLevel) {
logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
}
err := t.loopy.run()
if logger.V(logLevel) {
logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err)
}
t.conn.Close()
t.controlBuf.finish()
@ -344,8 +345,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
return t, nil
}
// operateHeader takes action on the decoded headers.
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
// operateHeaders takes action on the decoded headers. Returns an error if fatal
// error encountered and transport needs to close, otherwise returns nil.
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error {
// Acquire max stream ID lock for entire duration
t.maxStreamMu.Lock()
defer t.maxStreamMu.Unlock()
@ -361,15 +363,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
rstCode: http2.ErrCodeFrameSize,
onWrite: func() {},
})
return false
return nil
}
if streamID%2 != 1 || streamID <= t.maxStreamID {
// illegal gRPC stream id.
if logger.V(logLevel) {
logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
}
return true
return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame)
}
t.maxStreamID = streamID
@ -381,13 +380,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
fc: &inFlow{limit: uint32(t.initialWindowSize)},
}
var (
// If a gRPC Response-Headers has already been received, then it means
// that the peer is speaking gRPC and we are in gRPC mode.
isGRPC = false
mdata = make(map[string][]string)
httpMethod string
// headerError is set if an error is encountered while parsing the headers
headerError bool
// if false, content-type was missing or invalid
isGRPC = false
contentType = ""
mdata = make(map[string][]string)
httpMethod string
// these are set if an error is encountered while parsing the headers
protocolError bool
headerError *status.Status
timeoutSet bool
timeout time.Duration
@ -398,6 +398,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
case "content-type":
contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value)
if !validContentType {
contentType = hf.Value
break
}
mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
@ -413,7 +414,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
timeoutSet = true
var err error
if timeout, err = decodeTimeout(hf.Value); err != nil {
headerError = true
headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err)
}
// "Transports must consider requests containing the Connection header
// as malformed." - A41
@ -421,14 +422,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
if logger.V(logLevel) {
logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec")
}
headerError = true
protocolError = true
default:
if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
break
}
v, err := decodeMetadataHeader(hf.Name, hf.Value)
if err != nil {
headerError = true
headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err)
logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
break
}
@ -447,23 +448,43 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
logger.Errorf("transport: %v", errMsg)
}
t.controlBuf.put(&earlyAbortStream{
httpStatus: 400,
httpStatus: http.StatusBadRequest,
streamID: streamID,
contentSubtype: s.contentSubtype,
status: status.New(codes.Internal, errMsg),
rst: !frame.StreamEnded(),
})
return false
return nil
}
if !isGRPC || headerError {
if protocolError {
t.controlBuf.put(&cleanupStream{
streamID: streamID,
rst: true,
rstCode: http2.ErrCodeProtocol,
onWrite: func() {},
})
return false
return nil
}
if !isGRPC {
t.controlBuf.put(&earlyAbortStream{
httpStatus: http.StatusUnsupportedMediaType,
streamID: streamID,
contentSubtype: s.contentSubtype,
status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType),
rst: !frame.StreamEnded(),
})
return nil
}
if headerError != nil {
t.controlBuf.put(&earlyAbortStream{
httpStatus: http.StatusBadRequest,
streamID: streamID,
contentSubtype: s.contentSubtype,
status: headerError,
rst: !frame.StreamEnded(),
})
return nil
}
// "If :authority is missing, Host must be renamed to :authority." - A41
@ -503,7 +524,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
if t.state != reachable {
t.mu.Unlock()
s.cancel()
return false
return nil
}
if uint32(len(t.activeStreams)) >= t.maxStreams {
t.mu.Unlock()
@ -514,7 +535,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
onWrite: func() {},
})
s.cancel()
return false
return nil
}
if httpMethod != http.MethodPost {
t.mu.Unlock()
@ -530,7 +551,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
rst: !frame.StreamEnded(),
})
s.cancel()
return false
return nil
}
if t.inTapHandle != nil {
var err error
@ -550,7 +571,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
status: stat,
rst: !frame.StreamEnded(),
})
return false
return nil
}
}
t.activeStreams[streamID] = s
@ -597,7 +618,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
wq: s.wq,
})
handle(s)
return false
return nil
}
// HandleStreams receives incoming streams using the given handler. This is
@ -630,19 +651,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
continue
}
if err == io.EOF || err == io.ErrUnexpectedEOF {
t.Close()
t.Close(err)
return
}
if logger.V(logLevel) {
logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
}
t.Close()
t.Close(err)
return
}
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
if t.operateHeaders(frame, handle, traceCtx) {
t.Close()
if err := t.operateHeaders(frame, handle, traceCtx); err != nil {
t.Close(err)
break
}
case *http2.DataFrame:
@ -843,8 +861,8 @@ const (
func (t *http2Server) handlePing(f *http2.PingFrame) {
if f.IsAck() {
if f.Data == goAwayPing.data && t.drainChan != nil {
close(t.drainChan)
if f.Data == goAwayPing.data && t.drainEvent != nil {
t.drainEvent.Fire()
return
}
// Maybe it's a BDP ping.
@ -886,10 +904,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) {
if t.pingStrikes > maxPingStrikes {
// Send goaway and close the connection.
if logger.V(logLevel) {
logger.Errorf("transport: Got too many pings from the client, closing the connection.")
}
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")})
}
}
@ -1153,7 +1168,7 @@ func (t *http2Server) keepalive() {
if logger.V(logLevel) {
logger.Infof("transport: closing server transport due to maximum connection age.")
}
t.Close()
t.controlBuf.put(closeConnection{})
case <-t.done:
}
return
@ -1169,10 +1184,7 @@ func (t *http2Server) keepalive() {
continue
}
if outstandingPing && kpTimeoutLeft <= 0 {
if logger.V(logLevel) {
logger.Infof("transport: closing server transport due to idleness.")
}
t.Close()
t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time))
return
}
if !outstandingPing {
@ -1199,12 +1211,15 @@ func (t *http2Server) keepalive() {
// Close starts shutting down the http2Server transport.
// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
// could cause some resource issue. Revisit this later.
func (t *http2Server) Close() {
func (t *http2Server) Close(err error) {
t.mu.Lock()
if t.state == closing {
t.mu.Unlock()
return
}
if logger.V(logLevel) {
logger.Infof("transport: closing: %v", err)
}
t.state = closing
streams := t.activeStreams
t.activeStreams = nil
@ -1295,10 +1310,10 @@ func (t *http2Server) RemoteAddr() net.Addr {
func (t *http2Server) Drain() {
t.mu.Lock()
defer t.mu.Unlock()
if t.drainChan != nil {
if t.drainEvent != nil {
return
}
t.drainChan = make(chan struct{})
t.drainEvent = grpcsync.NewEvent()
t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true})
}
@ -1319,19 +1334,20 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
// Stop accepting more streams now.
t.state = draining
sid := t.maxStreamID
retErr := g.closeConn
if len(t.activeStreams) == 0 {
g.closeConn = true
retErr = errors.New("second GOAWAY written and no active streams left to process")
}
t.mu.Unlock()
t.maxStreamMu.Unlock()
if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
return false, err
}
if g.closeConn {
if retErr != nil {
// Abruptly close the connection following the GoAway (via
// loopywriter). But flush out what's inside the buffer first.
t.framer.writer.Flush()
return false, fmt.Errorf("transport: Connection closing")
return false, retErr
}
return true, nil
}
@ -1353,7 +1369,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
timer := time.NewTimer(time.Minute)
defer timer.Stop()
select {
case <-t.drainChan:
case <-t.drainEvent.Done():
case <-timer.C:
case <-t.done:
return

View File

@ -43,6 +43,10 @@ import (
"google.golang.org/grpc/tap"
)
// ErrNoHeaders is used as a signal that a trailers only response was received,
// and is not a real error.
var ErrNoHeaders = errors.New("stream has no headers")
const logLevel = 2
type bufferPool struct {
@ -366,9 +370,15 @@ func (s *Stream) Header() (metadata.MD, error) {
return s.header.Copy(), nil
}
s.waitOnHeader()
if !s.headerValid {
return nil, s.status.Err()
}
if s.noHeaders {
return nil, ErrNoHeaders
}
return s.header.Copy(), nil
}
@ -573,8 +583,8 @@ type ConnectOptions struct {
// NewClientTransport establishes the transport with the required ConnectOptions
// and returns it to the caller.
func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose)
func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) {
return newHTTP2Client(connectCtx, ctx, addr, opts, onClose)
}
// Options provides additional hints and information for message
@ -691,7 +701,7 @@ type ServerTransport interface {
// Close tears down the transport. Once it is called, the transport
// should not be accessed any more. All the pending streams and their
// handlers will be terminated asynchronously.
Close()
Close(err error)
// RemoteAddr returns the remote network address.
RemoteAddr() net.Addr

View File

@ -41,10 +41,11 @@ type MD map[string][]string
// New creates an MD from a given key-value map.
//
// Only the following ASCII characters are allowed in keys:
// - digits: 0-9
// - uppercase letters: A-Z (normalized to lower)
// - lowercase letters: a-z
// - special characters: -_.
// - digits: 0-9
// - uppercase letters: A-Z (normalized to lower)
// - lowercase letters: a-z
// - special characters: -_.
//
// Uppercase letters are automatically converted to lowercase.
//
// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
@ -62,10 +63,11 @@ func New(m map[string]string) MD {
// Pairs panics if len(kv) is odd.
//
// Only the following ASCII characters are allowed in keys:
// - digits: 0-9
// - uppercase letters: A-Z (normalized to lower)
// - lowercase letters: a-z
// - special characters: -_.
// - digits: 0-9
// - uppercase letters: A-Z (normalized to lower)
// - lowercase letters: a-z
// - special characters: -_.
//
// Uppercase letters are automatically converted to lowercase.
//
// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
@ -196,7 +198,7 @@ func FromIncomingContext(ctx context.Context) (MD, bool) {
// ValueFromIncomingContext returns the metadata value corresponding to the metadata
// key from the incoming metadata if it exists. Key must be lower-case.
//
// Experimental
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.

View File

@ -26,6 +26,7 @@ import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/internal/channelz"
istatus "google.golang.org/grpc/internal/status"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/status"
)
@ -57,12 +58,18 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
pw.mu.Unlock()
}
func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) {
// doneChannelzWrapper performs the following:
// - increments the calls started channelz counter
// - wraps the done function in the passed in result to increment the calls
// failed or calls succeeded channelz counter before invoking the actual
// done function.
func doneChannelzWrapper(acw *acBalancerWrapper, result *balancer.PickResult) {
acw.mu.Lock()
ac := acw.ac
acw.mu.Unlock()
ac.incrCallsStarted()
return func(b balancer.DoneInfo) {
done := result.Done
result.Done = func(b balancer.DoneInfo) {
if b.Err != nil && b.Err != io.EOF {
ac.incrCallsFailed()
} else {
@ -81,7 +88,7 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f
// - the current picker returns other errors and failfast is false.
// - the subConn returned by the current picker is not READY
// When one of these situations happens, pick blocks until the picker gets updated.
func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) {
func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) {
var ch chan struct{}
var lastPickErr error
@ -89,7 +96,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
pw.mu.Lock()
if pw.done {
pw.mu.Unlock()
return nil, nil, ErrClientConnClosing
return nil, balancer.PickResult{}, ErrClientConnClosing
}
if pw.picker == nil {
@ -110,9 +117,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
}
switch ctx.Err() {
case context.DeadlineExceeded:
return nil, nil, status.Error(codes.DeadlineExceeded, errStr)
return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr)
case context.Canceled:
return nil, nil, status.Error(codes.Canceled, errStr)
return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr)
}
case <-ch:
}
@ -124,14 +131,17 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
pw.mu.Unlock()
pickResult, err := p.Pick(info)
if err != nil {
if err == balancer.ErrNoSubConnAvailable {
continue
}
if _, ok := status.FromError(err); ok {
if st, ok := status.FromError(err); ok {
// Status error: end the RPC unconditionally with this status.
return nil, nil, dropError{error: err}
// First restrict the code to the list allowed by gRFC A54.
if istatus.IsRestrictedControlPlaneCode(st) {
err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err)
}
return nil, balancer.PickResult{}, dropError{error: err}
}
// For all other errors, wait for ready RPCs should block and other
// RPCs should fail with unavailable.
@ -139,7 +149,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
lastPickErr = err
continue
}
return nil, nil, status.Error(codes.Unavailable, err.Error())
return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error())
}
acw, ok := pickResult.SubConn.(*acBalancerWrapper)
@ -149,9 +159,10 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
}
if t := acw.getAddrConn().getReadyTransport(); t != nil {
if channelz.IsOn() {
return t, doneChannelzWrapper(acw, pickResult.Done), nil
doneChannelzWrapper(acw, &pickResult)
return t, pickResult, nil
}
return t, pickResult.Done, nil
return t, pickResult, nil
}
if pickResult.Done != nil {
// Calling done with nil error, no bytes sent and no bytes received.

View File

@ -51,7 +51,7 @@ type pickfirstBalancer struct {
func (b *pickfirstBalancer) ResolverError(err error) {
if logger.V(2) {
logger.Infof("pickfirstBalancer: ResolverError called with error %v", err)
logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err)
}
if b.subConn == nil {
b.state = connectivity.TransientFailure
@ -102,8 +102,8 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
b.subConn = subConn
b.state = connectivity.Idle
b.cc.UpdateState(balancer.State{
ConnectivityState: connectivity.Idle,
Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}},
ConnectivityState: connectivity.Connecting,
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
})
b.subConn.Connect()
return nil

View File

@ -25,7 +25,7 @@ import (
// PreparedMsg is responsible for creating a Marshalled and Compressed object.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.

View File

@ -57,7 +57,8 @@ LEGACY_SOURCES=(
${WORKDIR}/grpc-proto/grpc/health/v1/health.proto
${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto
profiling/proto/service.proto
reflection/grpc_reflection_v1alpha/reflection.proto
${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto
${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto
)
# Generates only the new gRPC Service symbols
@ -119,8 +120,4 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/
# see grpc_testing_not_regenerate/README.md for details.
rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go
# grpc/testing does not have a go_package option.
mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/
mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/
cp -R ${WORKDIR}/out/google.golang.org/grpc/* .

View File

@ -24,6 +24,7 @@ import (
"context"
"net"
"net/url"
"strings"
"google.golang.org/grpc/attributes"
"google.golang.org/grpc/credentials"
@ -96,7 +97,7 @@ const (
// Address represents a server the client connects to.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -236,20 +237,17 @@ type ClientConn interface {
//
// Examples:
//
// - "dns://some_authority/foo.bar"
// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
// - "foo.bar"
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}
// - "unknown_scheme://authority/endpoint"
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
// - "dns://some_authority/foo.bar"
// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
// - "foo.bar"
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}
// - "unknown_scheme://authority/endpoint"
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
type Target struct {
// Deprecated: use URL.Scheme instead.
Scheme string
// Deprecated: use URL.Host instead.
Authority string
// Deprecated: use URL.Path or URL.Opaque instead. The latter is set when
// the former is empty.
Endpoint string
// URL contains the parsed dial target with an optional default scheme added
// to it if the original dial target contained no scheme or contained an
// unregistered scheme. Any query params specified in the original dial
@ -257,6 +255,24 @@ type Target struct {
URL url.URL
}
// Endpoint retrieves endpoint without leading "/" from either `URL.Path`
// or `URL.Opaque`. The latter is used when the former is empty.
func (t Target) Endpoint() string {
endpoint := t.URL.Path
if endpoint == "" {
endpoint = t.URL.Opaque
}
// For targets of the form "[scheme]://[authority]/endpoint, the endpoint
// value returned from url.Parse() contains a leading "/". Although this is
// in accordance with RFC 3986, we do not want to break existing resolver
// implementations which expect the endpoint without the leading "/". So, we
// end up stripping the leading "/" here. But this will result in an
// incorrect parsing for something like "unix:///path/to/socket". Since we
// own the "unix" resolver, we can workaround in the unix resolver by using
// the `URL` field.
return strings.TrimPrefix(endpoint, "/")
}
// Builder creates a resolver that will be used to watch name resolution updates.
type Builder interface {
// Build creates a new resolver for the given target.

View File

@ -25,7 +25,6 @@ import (
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"math"
"strings"
"sync"
@ -77,7 +76,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
return &gzipCompressor{
pool: sync.Pool{
New: func() interface{} {
w, err := gzip.NewWriterLevel(ioutil.Discard, level)
w, err := gzip.NewWriterLevel(io.Discard, level)
if err != nil {
panic(err)
}
@ -143,7 +142,7 @@ func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
z.Close()
d.pool.Put(z)
}()
return ioutil.ReadAll(z)
return io.ReadAll(z)
}
func (d *gzipDecompressor) Type() string {
@ -198,7 +197,7 @@ func Header(md *metadata.MD) CallOption {
// HeaderCallOption is a CallOption for collecting response header metadata.
// The metadata field will be populated *after* the RPC completes.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -220,7 +219,7 @@ func Trailer(md *metadata.MD) CallOption {
// TrailerCallOption is a CallOption for collecting response trailer metadata.
// The metadata field will be populated *after* the RPC completes.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -242,7 +241,7 @@ func Peer(p *peer.Peer) CallOption {
// PeerCallOption is a CallOption for collecting the identity of the remote
// peer. The peer field will be populated *after* the RPC completes.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -282,7 +281,7 @@ func FailFast(failFast bool) CallOption {
// FailFastCallOption is a CallOption for indicating whether an RPC should fail
// fast or not.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -297,7 +296,8 @@ func (o FailFastCallOption) before(c *callInfo) error {
func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {}
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
// in bytes the client can receive.
// in bytes the client can receive. If this is not set, gRPC uses the default
// 4MB.
func MaxCallRecvMsgSize(bytes int) CallOption {
return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes}
}
@ -305,7 +305,7 @@ func MaxCallRecvMsgSize(bytes int) CallOption {
// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
// size in bytes the client can receive.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -320,7 +320,8 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
// in bytes the client can send.
// in bytes the client can send. If this is not set, gRPC uses the default
// `math.MaxInt32`.
func MaxCallSendMsgSize(bytes int) CallOption {
return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes}
}
@ -328,7 +329,7 @@ func MaxCallSendMsgSize(bytes int) CallOption {
// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
// size in bytes the client can send.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -351,7 +352,7 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
// credentials to use for the call.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -369,7 +370,7 @@ func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {}
// sending the request. If WithCompressor is also set, UseCompressor has
// higher priority.
//
// Experimental
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -379,7 +380,7 @@ func UseCompressor(name string) CallOption {
// CompressorCallOption is a CallOption that indicates the compressor to use.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -416,7 +417,7 @@ func CallContentSubtype(contentSubtype string) CallOption {
// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
// used for marshaling messages.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -444,7 +445,7 @@ func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {}
// This function is provided for advanced users; prefer to use only
// CallContentSubtype to select a registered codec instead.
//
// Experimental
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -455,7 +456,7 @@ func ForceCodec(codec encoding.Codec) CallOption {
// ForceCodecCallOption is a CallOption that indicates the codec used for
// marshaling messages.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -480,7 +481,7 @@ func CallCustomCodec(codec Codec) CallOption {
// CustomCodecCallOption is a CallOption that indicates the codec used for
// marshaling messages.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -497,7 +498,7 @@ func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
// used for buffering this RPC's requests for retry purposes.
//
// Experimental
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -508,7 +509,7 @@ func MaxRetryRPCBufferSize(bytes int) CallOption {
// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
// memory to be used for caching this RPC for retry purposes.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -548,10 +549,11 @@ type parser struct {
// format. The caller owns the returned msg memory.
//
// If there is an error, possible values are:
// * io.EOF, when no messages remain
// * io.ErrUnexpectedEOF
// * of type transport.ConnectionError
// * an error from the status package
// - io.EOF, when no messages remain
// - io.ErrUnexpectedEOF
// - of type transport.ConnectionError
// - an error from the status package
//
// No other error values or types must be returned, which also means
// that the underlying io.Reader must not return an incompatible
// error.
@ -710,7 +712,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
d, size, err = decompress(compressor, d, maxReceiveMessageSize)
}
if err != nil {
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
}
if size > maxReceiveMessageSize {
// TODO: Revisit the error code. Currently keep it consistent with java
@ -745,7 +747,7 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize
}
// Read from LimitReader with limit max+1. So if the underlying
// reader is over limit, the result will be bigger than max.
d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
return d, len(d), err
}
@ -758,7 +760,7 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf
return err
}
if err := c.Unmarshal(d, m); err != nil {
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
}
if payInfo != nil {
payInfo.uncompressedBytes = d

View File

@ -233,10 +233,11 @@ func newJoinServerOption(opts ...ServerOption) ServerOption {
return &joinServerOption{opts: opts}
}
// WriteBufferSize determines how much data can be batched before doing a write on the wire.
// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low.
// The default value for this buffer is 32KB.
// Zero will disable the write buffer such that each write will be on underlying connection.
// WriteBufferSize determines how much data can be batched before doing a write
// on the wire. The corresponding memory allocation for this buffer will be
// twice the size to keep syscalls low. The default value for this buffer is
// 32KB. Zero or negative values will disable the write buffer such that each
// write will be on underlying connection.
// Note: A Send call may not directly translate to a write.
func WriteBufferSize(s int) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
@ -244,11 +245,10 @@ func WriteBufferSize(s int) ServerOption {
})
}
// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
// for one read syscall.
// The default value for this buffer is 32KB.
// Zero will disable read buffer for a connection so data framer can access the underlying
// conn directly.
// ReadBufferSize lets you set the size of read buffer, this determines how much
// data can be read at most for one read syscall. The default value for this
// buffer is 32KB. Zero or negative values will disable read buffer for a
// connection so data framer can access the underlying conn directly.
func ReadBufferSize(s int) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.readBufferSize = s
@ -942,7 +942,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
}
func (s *Server) serveStreams(st transport.ServerTransport) {
defer st.Close()
defer st.Close(errors.New("finished serving streams for the server transport"))
var wg sync.WaitGroup
var roundRobinCounter uint32
@ -1008,7 +1008,8 @@ var _ http.Handler = (*Server)(nil)
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
// Errors returned from transport.NewServerHandlerTransport have
// already been written to w.
return
}
if !s.addConn(listenerAddressForServeHTTP, st) {
@ -1046,7 +1047,7 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
s.mu.Lock()
defer s.mu.Unlock()
if s.conns == nil {
st.Close()
st.Close(errors.New("Server.addConn called when server has already been stopped"))
return false
}
if s.drain {
@ -1150,21 +1151,16 @@ func chainUnaryServerInterceptors(s *Server) {
func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor {
return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) {
// the struct ensures the variables are allocated together, rather than separately, since we
// know they should be garbage collected together. This saves 1 allocation and decreases
// time/call by about 10% on the microbenchmark.
var state struct {
i int
next UnaryHandler
}
state.next = func(ctx context.Context, req interface{}) (interface{}, error) {
if state.i == len(interceptors)-1 {
return interceptors[state.i](ctx, req, info, handler)
}
state.i++
return interceptors[state.i-1](ctx, req, info, state.next)
}
return state.next(ctx, req)
return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler))
}
}
func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler {
if curr == len(interceptors)-1 {
return finalHandler
}
return func(ctx context.Context, req interface{}) (interface{}, error) {
return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler))
}
}
@ -1303,7 +1299,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
if err != nil {
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e)
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
}
return err
}
@ -1470,21 +1466,16 @@ func chainStreamServerInterceptors(s *Server) {
func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor {
return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
// the struct ensures the variables are allocated together, rather than separately, since we
// know they should be garbage collected together. This saves 1 allocation and decreases
// time/call by about 10% on the microbenchmark.
var state struct {
i int
next StreamHandler
}
state.next = func(srv interface{}, ss ServerStream) error {
if state.i == len(interceptors)-1 {
return interceptors[state.i](srv, ss, info, handler)
}
state.i++
return interceptors[state.i-1](srv, ss, info, state.next)
}
return state.next(srv, ss)
return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler))
}
}
func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler {
if curr == len(interceptors)-1 {
return finalHandler
}
return func(srv interface{}, stream ServerStream) error {
return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler))
}
}
@ -1819,7 +1810,7 @@ func (s *Server) Stop() {
}
for _, cs := range conns {
for st := range cs {
st.Close()
st.Close(errors.New("Server.Stop called"))
}
}
if s.opts.numServerWorkers > 0 {

View File

@ -226,7 +226,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
var rsc jsonSC
err := json.Unmarshal([]byte(js), &rsc)
if err != nil {
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
return &serviceconfig.ParseResult{Err: err}
}
sc := ServiceConfig{
@ -254,7 +254,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
}
d, err := parseDuration(m.Timeout)
if err != nil {
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
return &serviceconfig.ParseResult{Err: err}
}
@ -263,7 +263,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
Timeout: d,
}
if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
return &serviceconfig.ParseResult{Err: err}
}
if m.MaxRequestMessageBytes != nil {
@ -283,13 +283,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
for i, n := range *m.Name {
path, err := n.generatePath()
if err != nil {
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err)
logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
return &serviceconfig.ParseResult{Err: err}
}
if _, ok := paths[path]; ok {
err = errDuplicatedName
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err)
logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
return &serviceconfig.ParseResult{Err: err}
}
paths[path] = struct{}{}

View File

@ -19,7 +19,7 @@
// Package serviceconfig defines types and methods for operating on gRPC
// service configs.
//
// Experimental
// # Experimental
//
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
// later release.

View File

@ -76,14 +76,14 @@ func FromProto(s *spb.Status) *Status {
// FromError returns a Status representation of err.
//
// - If err was produced by this package or implements the method `GRPCStatus()
// *Status`, the appropriate Status is returned.
// - If err was produced by this package or implements the method `GRPCStatus()
// *Status`, the appropriate Status is returned.
//
// - If err is nil, a Status is returned with codes.OK and no message.
// - If err is nil, a Status is returned with codes.OK and no message.
//
// - Otherwise, err is an error not compatible with this package. In this
// case, a Status is returned with codes.Unknown and err's Error() message,
// and ok is false.
// - Otherwise, err is an error not compatible with this package. In this
// case, a Status is returned with codes.Unknown and err's Error() message,
// and ok is false.
func FromError(err error) (s *Status, ok bool) {
if err == nil {
return nil, true

View File

@ -39,6 +39,7 @@ import (
imetadata "google.golang.org/grpc/internal/metadata"
iresolver "google.golang.org/grpc/internal/resolver"
"google.golang.org/grpc/internal/serviceconfig"
istatus "google.golang.org/grpc/internal/status"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
@ -195,6 +196,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method}
rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo)
if err != nil {
if st, ok := status.FromError(err); ok {
// Restrict the code to the list allowed by gRFC A54.
if istatus.IsRestrictedControlPlaneCode(st) {
err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err)
}
return nil, err
}
return nil, toRPCErr(err)
}
@ -408,7 +416,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
ctx = trace.NewContext(ctx, trInfo.tr)
}
if cs.cc.parsedTarget.Scheme == "xds" {
if cs.cc.parsedTarget.URL.Scheme == "xds" {
// Add extra metadata (metadata that will be added by transport) to context
// so the balancer can see them.
ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs(
@ -430,7 +438,7 @@ func (a *csAttempt) getTransport() error {
cs := a.cs
var err error
a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
if err != nil {
if de, ok := err.(dropError); ok {
err = de.error
@ -447,6 +455,25 @@ func (a *csAttempt) getTransport() error {
func (a *csAttempt) newStream() error {
cs := a.cs
cs.callHdr.PreviousAttempts = cs.numRetries
// Merge metadata stored in PickResult, if any, with existing call metadata.
// It is safe to overwrite the csAttempt's context here, since all state
// maintained in it are local to the attempt. When the attempt has to be
// retried, a new instance of csAttempt will be created.
if a.pickResult.Metatada != nil {
// We currently do not have a function it the metadata package which
// merges given metadata with existing metadata in a context. Existing
// function `AppendToOutgoingContext()` takes a variadic argument of key
// value pairs.
//
// TODO: Make it possible to retrieve key value pairs from metadata.MD
// in a form passable to AppendToOutgoingContext(), or create a version
// of AppendToOutgoingContext() that accepts a metadata.MD.
md, _ := metadata.FromOutgoingContext(a.ctx)
md = metadata.Join(md, a.pickResult.Metatada)
a.ctx = metadata.NewOutgoingContext(a.ctx, md)
}
s, err := a.t.NewStream(a.ctx, cs.callHdr)
if err != nil {
nse, ok := err.(*transport.NewStreamError)
@ -521,12 +548,12 @@ type clientStream struct {
// csAttempt implements a single transport stream attempt within a
// clientStream.
type csAttempt struct {
ctx context.Context
cs *clientStream
t transport.ClientTransport
s *transport.Stream
p *parser
done func(balancer.DoneInfo)
ctx context.Context
cs *clientStream
t transport.ClientTransport
s *transport.Stream
p *parser
pickResult balancer.PickResult
finished bool
dc Decompressor
@ -744,17 +771,25 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func())
func (cs *clientStream) Header() (metadata.MD, error) {
var m metadata.MD
noHeader := false
err := cs.withRetry(func(a *csAttempt) error {
var err error
m, err = a.s.Header()
if err == transport.ErrNoHeaders {
noHeader = true
return nil
}
return toRPCErr(err)
}, cs.commitAttemptLocked)
if err != nil {
cs.finish(err)
return nil, err
}
if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged {
// Only log if binary log is on and header has not been logged.
if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader {
// Only log if binary log is on and header has not been logged, and
// there is actually headers to log.
logEntry := &binarylog.ServerHeader{
OnClientSide: true,
Header: m,
@ -1087,12 +1122,12 @@ func (a *csAttempt) finish(err error) {
tr = a.s.Trailer()
}
if a.done != nil {
if a.pickResult.Done != nil {
br := false
if a.s != nil {
br = a.s.BytesReceived()
}
a.done(balancer.DoneInfo{
a.pickResult.Done(balancer.DoneInfo{
Err: err,
Trailer: tr,
BytesSent: a.s != nil,
@ -1448,6 +1483,9 @@ type ServerStream interface {
// It is safe to have a goroutine calling SendMsg and another goroutine
// calling RecvMsg on the same stream at the same time, but it is not safe
// to call SendMsg on the same stream in different goroutines.
//
// It is not safe to modify the message after calling SendMsg. Tracing
// libraries and stats handlers may use the message lazily.
SendMsg(m interface{}) error
// RecvMsg blocks until it receives a message into m or the stream is
// done. It returns io.EOF when the client has performed a CloseSend. On

View File

@ -19,7 +19,7 @@
// Package tap defines the function handles which are executed on the transport
// layer of gRPC-Go and related information.
//
// Experimental
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.

View File

@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
const Version = "1.50.1"
const Version = "1.53.0"

29
vendor/google.golang.org/grpc/vet.sh generated vendored
View File

@ -66,8 +66,21 @@ elif [[ "$#" -ne 0 ]]; then
die "Unknown argument(s): $*"
fi
# - Check that generated proto files are up to date.
if [[ -z "${VET_SKIP_PROTO}" ]]; then
PATH="/home/travis/bin:${PATH}" make proto && \
git status --porcelain 2>&1 | fail_on_output || \
(git status; git --no-pager diff; exit 1)
fi
if [[ -n "${VET_ONLY_PROTO}" ]]; then
exit 0
fi
# - Ensure all source files contain a copyright message.
not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go'
# (Done in two parts because Darwin "git grep" has broken support for compound
# exclusion matches.)
(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output
# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown.
not grep 'func Test[^(]' *_test.go
@ -81,7 +94,7 @@ not git grep -l 'x/net/context' -- "*.go"
git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test'
# - Do not call grpclog directly. Use grpclog.Component instead.
git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
# - Ensure all ptypes proto packages are renamed when importing.
not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go"
@ -91,13 +104,6 @@ git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.
misspell -error .
# - Check that generated proto files are up to date.
if [[ -z "${VET_SKIP_PROTO}" ]]; then
PATH="/home/travis/bin:${PATH}" make proto && \
git status --porcelain 2>&1 | fail_on_output || \
(git status; git --no-pager diff; exit 1)
fi
# - gofmt, goimports, golint (with exceptions for generated code), go vet,
# go mod tidy.
# Perform these checks on each module inside gRPC.
@ -109,7 +115,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do
goimports -l . 2>&1 | not grep -vE "\.pb\.go"
golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:"
go mod tidy
go mod tidy -compat=1.17
git status --porcelain 2>&1 | fail_on_output || \
(git status; git --no-pager diff; exit 1)
popd
@ -119,8 +125,9 @@ done
#
# TODO(dfawley): don't use deprecated functions in examples or first-party
# plugins.
# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs.
SC_OUT="$(mktemp)"
staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true
staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true
# Error if anything other than deprecation warnings are printed.
not grep -v "is deprecated:.*SA1019" "${SC_OUT}"
# Only ignore the following deprecated types/fields/functions.

6
vendor/modules.txt vendored
View File

@ -303,10 +303,10 @@ golang.org/x/text/width
# golang.org/x/time v0.3.0
## explicit
golang.org/x/time/rate
# google.golang.org/genproto v0.0.0-20220706185917-7780775163c4
## explicit; go 1.15
# google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4
## explicit; go 1.19
google.golang.org/genproto/googleapis/rpc/status
# google.golang.org/grpc v1.50.1
# google.golang.org/grpc v1.53.0
## explicit; go 1.17
google.golang.org/grpc
google.golang.org/grpc/attributes