mirror of https://github.com/docker/cli.git
vendor: github.com/containerd/containerd v1.6.10
full diff: https://github.com/containerd/containerd/compare/v1.6.8...v1.6.10 Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
parent
88b33a667f
commit
86038fdb68
|
@ -7,7 +7,7 @@ module github.com/docker/cli
|
||||||
go 1.18
|
go 1.18
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/containerd/containerd v1.6.8
|
github.com/containerd/containerd v1.6.10
|
||||||
github.com/creack/pty v1.1.11
|
github.com/creack/pty v1.1.11
|
||||||
github.com/docker/distribution v2.8.1+incompatible
|
github.com/docker/distribution v2.8.1+incompatible
|
||||||
github.com/docker/docker v20.10.20+incompatible // v22.06.x - see "replace" for the actual version
|
github.com/docker/docker v20.10.20+incompatible // v22.06.x - see "replace" for the actual version
|
||||||
|
@ -71,9 +71,9 @@ require (
|
||||||
golang.org/x/crypto v0.1.0 // indirect
|
golang.org/x/crypto v0.1.0 // indirect
|
||||||
golang.org/x/net v0.1.0 // indirect
|
golang.org/x/net v0.1.0 // indirect
|
||||||
golang.org/x/time v0.1.0 // indirect
|
golang.org/x/time v0.1.0 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
|
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
|
||||||
google.golang.org/grpc v1.45.0 // indirect
|
google.golang.org/grpc v1.47.0 // indirect
|
||||||
google.golang.org/protobuf v1.27.1 // indirect
|
google.golang.org/protobuf v1.28.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
replace (
|
replace (
|
||||||
|
|
31
vendor.sum
31
vendor.sum
|
@ -38,8 +38,7 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
|
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
|
||||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||||
github.com/Microsoft/hcsshim v0.9.4 h1:mnUj0ivWy6UzbB1uLFqKR6F+ZyiDc7j4iGgHTpO+5+I=
|
github.com/Microsoft/hcsshim v0.9.5 h1:AbV+VPfTrIVffukazHcpxmz/sRiE6YaMDzHWR9BXZHo=
|
||||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
|
||||||
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d h1:hi6J4K6DKrR4/ljxn6SF6nURyu785wKMuQcjt7H3VCQ=
|
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d h1:hi6J4K6DKrR4/ljxn6SF6nURyu785wKMuQcjt7H3VCQ=
|
||||||
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
|
@ -64,7 +63,6 @@ github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3k
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||||
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
@ -79,17 +77,16 @@ github.com/cloudflare/cfssl v0.0.0-20180323000720-5d63dbd981b5 h1:PqZ3bA4yzwywiv
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
|
||||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
|
||||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
|
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
|
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
|
||||||
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
|
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
|
||||||
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
|
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
|
||||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||||
github.com/containerd/containerd v1.6.8 h1:h4dOFDwzHmqFEP754PgfgTeVXFnLiRc6kiqC7tplDJs=
|
github.com/containerd/containerd v1.6.10 h1:8aiav7I2ZyQLbTlNMcBXyAU1FtFvp6VuyuW13qSd6Hk=
|
||||||
github.com/containerd/containerd v1.6.8/go.mod h1:By6p5KqPK0/7/CgO/A6t/Gz+CUYUu2zf1hUaaymVXB0=
|
github.com/containerd/containerd v1.6.10/go.mod h1:CVqfxdJ95PDgORwA219AwwLrREZgrTFybXu2HfMKRG0=
|
||||||
github.com/containerd/continuity v0.2.3-0.20220330195504-d132b287edc8 h1:yGFEcFNMhze29DxAAB33v/1OMRYF/cM9iwwgV2P0ZrE=
|
github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
|
||||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
|
@ -127,8 +124,7 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||||
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
||||||
|
@ -198,6 +194,7 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
|
@ -370,7 +367,6 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf
|
||||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
|
||||||
github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94 h1:JmfC365KywYwHB946TTiQWEb8kqPY+pybPLoGE9GgVk=
|
github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94 h1:JmfC365KywYwHB946TTiQWEb8kqPY+pybPLoGE9GgVk=
|
||||||
github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
|
github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
|
||||||
github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||||
|
@ -688,8 +684,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
|
||||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0=
|
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 h1:hrbNEivu7Zn1pxvHk6MBrq9iE22woVILTHqexqBxe6I=
|
||||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||||
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
|
@ -705,9 +701,9 @@ google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
|
||||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||||
google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
|
google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
|
||||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
@ -720,8 +716,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
|
||||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
|
||||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
|
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
||||||
|
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/cenkalti/backoff.v2 v2.2.1 h1:eJ9UAg01/HIHG987TwxvnzK2MgxXq97YY6rYDpY9aII=
|
gopkg.in/cenkalti/backoff.v2 v2.2.1 h1:eJ9UAg01/HIHG987TwxvnzK2MgxXq97YY6rYDpY9aII=
|
||||||
|
|
|
@ -42,7 +42,7 @@
|
||||||
//
|
//
|
||||||
// More details of the specifier syntax and platform spec follow.
|
// More details of the specifier syntax and platform spec follow.
|
||||||
//
|
//
|
||||||
// Declaring Platform Support
|
// # Declaring Platform Support
|
||||||
//
|
//
|
||||||
// Components that have strict platform requirements should use the OCI
|
// Components that have strict platform requirements should use the OCI
|
||||||
// platform specification to declare their support. Typically, this will be
|
// platform specification to declare their support. Typically, this will be
|
||||||
|
@ -60,7 +60,7 @@
|
||||||
// specification when in doubt). ARM should set variant under certain
|
// specification when in doubt). ARM should set variant under certain
|
||||||
// discussions, which are outlined below.
|
// discussions, which are outlined below.
|
||||||
//
|
//
|
||||||
// Platform Specifiers
|
// # Platform Specifiers
|
||||||
//
|
//
|
||||||
// While the OCI platform specifications provide a tool for components to
|
// While the OCI platform specifications provide a tool for components to
|
||||||
// specify structured information, user input typically doesn't need the full
|
// specify structured information, user input typically doesn't need the full
|
||||||
|
@ -77,7 +77,7 @@
|
||||||
// where the architecture may be known but a runtime may support images from
|
// where the architecture may be known but a runtime may support images from
|
||||||
// different operating systems.
|
// different operating systems.
|
||||||
//
|
//
|
||||||
// Normalization
|
// # Normalization
|
||||||
//
|
//
|
||||||
// Because not all users are familiar with the way the Go runtime represents
|
// Because not all users are familiar with the way the Go runtime represents
|
||||||
// platforms, several normalizations have been provided to make this package
|
// platforms, several normalizations have been provided to make this package
|
||||||
|
@ -95,7 +95,7 @@
|
||||||
//
|
//
|
||||||
// We also normalize the operating system `macos` to `darwin`.
|
// We also normalize the operating system `macos` to `darwin`.
|
||||||
//
|
//
|
||||||
// ARM Support
|
// # ARM Support
|
||||||
//
|
//
|
||||||
// To qualify ARM architecture, the Variant field is used to qualify the arm
|
// To qualify ARM architecture, the Variant field is used to qualify the arm
|
||||||
// version. The most common arm version, v7, is represented without the variant
|
// version. The most common arm version, v7, is represented without the variant
|
||||||
|
|
|
@ -0,0 +1,524 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package jsonpb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/encoding/protojson"
|
||||||
|
protoV2 "google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
)
|
||||||
|
|
||||||
|
const wrapJSONUnmarshalV2 = false
|
||||||
|
|
||||||
|
// UnmarshalNext unmarshals the next JSON object from d into m.
|
||||||
|
func UnmarshalNext(d *json.Decoder, m proto.Message) error {
|
||||||
|
return new(Unmarshaler).UnmarshalNext(d, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals a JSON object from r into m.
|
||||||
|
func Unmarshal(r io.Reader, m proto.Message) error {
|
||||||
|
return new(Unmarshaler).Unmarshal(r, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalString unmarshals a JSON object from s into m.
|
||||||
|
func UnmarshalString(s string, m proto.Message) error {
|
||||||
|
return new(Unmarshaler).Unmarshal(strings.NewReader(s), m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshaler is a configurable object for converting from a JSON
|
||||||
|
// representation to a protocol buffer object.
|
||||||
|
type Unmarshaler struct {
|
||||||
|
// AllowUnknownFields specifies whether to allow messages to contain
|
||||||
|
// unknown JSON fields, as opposed to failing to unmarshal.
|
||||||
|
AllowUnknownFields bool
|
||||||
|
|
||||||
|
// AnyResolver is used to resolve the google.protobuf.Any well-known type.
|
||||||
|
// If unset, the global registry is used by default.
|
||||||
|
AnyResolver AnyResolver
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSONPBUnmarshaler is implemented by protobuf messages that customize the way
|
||||||
|
// they are unmarshaled from JSON. Messages that implement this should also
|
||||||
|
// implement JSONPBMarshaler so that the custom format can be produced.
|
||||||
|
//
|
||||||
|
// The JSON unmarshaling must follow the JSON to proto specification:
|
||||||
|
// https://developers.google.com/protocol-buffers/docs/proto3#json
|
||||||
|
//
|
||||||
|
// Deprecated: Custom types should implement protobuf reflection instead.
|
||||||
|
type JSONPBUnmarshaler interface {
|
||||||
|
UnmarshalJSONPB(*Unmarshaler, []byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals a JSON object from r into m.
|
||||||
|
func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error {
|
||||||
|
return u.UnmarshalNext(json.NewDecoder(r), m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalNext unmarshals the next JSON object from d into m.
|
||||||
|
func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error {
|
||||||
|
if m == nil {
|
||||||
|
return errors.New("invalid nil message")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the next JSON object from the stream.
|
||||||
|
raw := json.RawMessage{}
|
||||||
|
if err := d.Decode(&raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for custom unmarshalers first since they may not properly
|
||||||
|
// implement protobuf reflection that the logic below relies on.
|
||||||
|
if jsu, ok := m.(JSONPBUnmarshaler); ok {
|
||||||
|
return jsu.UnmarshalJSONPB(u, raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
mr := proto.MessageReflect(m)
|
||||||
|
|
||||||
|
// NOTE: For historical reasons, a top-level null is treated as a noop.
|
||||||
|
// This is incorrect, but kept for compatibility.
|
||||||
|
if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if wrapJSONUnmarshalV2 {
|
||||||
|
// NOTE: If input message is non-empty, we need to preserve merge semantics
|
||||||
|
// of the old jsonpb implementation. These semantics are not supported by
|
||||||
|
// the protobuf JSON specification.
|
||||||
|
isEmpty := true
|
||||||
|
mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool {
|
||||||
|
isEmpty = false // at least one iteration implies non-empty
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
if !isEmpty {
|
||||||
|
// Perform unmarshaling into a newly allocated, empty message.
|
||||||
|
mr = mr.New()
|
||||||
|
|
||||||
|
// Use a defer to copy all unmarshaled fields into the original message.
|
||||||
|
dst := proto.MessageReflect(m)
|
||||||
|
defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||||
|
dst.Set(fd, v)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal using the v2 JSON unmarshaler.
|
||||||
|
opts := protojson.UnmarshalOptions{
|
||||||
|
DiscardUnknown: u.AllowUnknownFields,
|
||||||
|
}
|
||||||
|
if u.AnyResolver != nil {
|
||||||
|
opts.Resolver = anyResolver{u.AnyResolver}
|
||||||
|
}
|
||||||
|
return opts.Unmarshal(raw, mr.Interface())
|
||||||
|
} else {
|
||||||
|
if err := u.unmarshalMessage(mr, raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return protoV2.CheckInitialized(mr.Interface())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error {
|
||||||
|
md := m.Descriptor()
|
||||||
|
fds := md.Fields()
|
||||||
|
|
||||||
|
if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok {
|
||||||
|
return jsu.UnmarshalJSONPB(u, in)
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(in) == "null" && md.FullName() != "google.protobuf.Value" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch wellKnownType(md.FullName()) {
|
||||||
|
case "Any":
|
||||||
|
var jsonObject map[string]json.RawMessage
|
||||||
|
if err := json.Unmarshal(in, &jsonObject); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rawTypeURL, ok := jsonObject["@type"]
|
||||||
|
if !ok {
|
||||||
|
return errors.New("Any JSON doesn't have '@type'")
|
||||||
|
}
|
||||||
|
typeURL, err := unquoteString(string(rawTypeURL))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL)
|
||||||
|
}
|
||||||
|
m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL))
|
||||||
|
|
||||||
|
var m2 protoreflect.Message
|
||||||
|
if u.AnyResolver != nil {
|
||||||
|
mi, err := u.AnyResolver.Resolve(typeURL)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m2 = proto.MessageReflect(mi)
|
||||||
|
} else {
|
||||||
|
mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
|
||||||
|
if err != nil {
|
||||||
|
if err == protoregistry.NotFound {
|
||||||
|
return fmt.Errorf("could not resolve Any message type: %v", typeURL)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m2 = mt.New()
|
||||||
|
}
|
||||||
|
|
||||||
|
if wellKnownType(m2.Descriptor().FullName()) != "" {
|
||||||
|
rawValue, ok := jsonObject["value"]
|
||||||
|
if !ok {
|
||||||
|
return errors.New("Any JSON doesn't have 'value'")
|
||||||
|
}
|
||||||
|
if err := u.unmarshalMessage(m2, rawValue); err != nil {
|
||||||
|
return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
delete(jsonObject, "@type")
|
||||||
|
rawJSON, err := json.Marshal(jsonObject)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
|
||||||
|
}
|
||||||
|
if err = u.unmarshalMessage(m2, rawJSON); err != nil {
|
||||||
|
return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rawWire, err := protoV2.Marshal(m2.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err)
|
||||||
|
}
|
||||||
|
m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire))
|
||||||
|
return nil
|
||||||
|
case "BoolValue", "BytesValue", "StringValue",
|
||||||
|
"Int32Value", "UInt32Value", "FloatValue",
|
||||||
|
"Int64Value", "UInt64Value", "DoubleValue":
|
||||||
|
fd := fds.ByNumber(1)
|
||||||
|
v, err := u.unmarshalValue(m.NewField(fd), in, fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.Set(fd, v)
|
||||||
|
return nil
|
||||||
|
case "Duration":
|
||||||
|
v, err := unquoteString(string(in))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d, err := time.ParseDuration(v)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("bad Duration: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sec := d.Nanoseconds() / 1e9
|
||||||
|
nsec := d.Nanoseconds() % 1e9
|
||||||
|
m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
|
||||||
|
m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
|
||||||
|
return nil
|
||||||
|
case "Timestamp":
|
||||||
|
v, err := unquoteString(string(in))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t, err := time.Parse(time.RFC3339Nano, v)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("bad Timestamp: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sec := t.Unix()
|
||||||
|
nsec := t.Nanosecond()
|
||||||
|
m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
|
||||||
|
m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
|
||||||
|
return nil
|
||||||
|
case "Value":
|
||||||
|
switch {
|
||||||
|
case string(in) == "null":
|
||||||
|
m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0))
|
||||||
|
case string(in) == "true":
|
||||||
|
m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true))
|
||||||
|
case string(in) == "false":
|
||||||
|
m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false))
|
||||||
|
case hasPrefixAndSuffix('"', in, '"'):
|
||||||
|
s, err := unquoteString(string(in))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unrecognized type for Value %q", in)
|
||||||
|
}
|
||||||
|
m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s))
|
||||||
|
case hasPrefixAndSuffix('[', in, ']'):
|
||||||
|
v := m.Mutable(fds.ByNumber(6))
|
||||||
|
return u.unmarshalMessage(v.Message(), in)
|
||||||
|
case hasPrefixAndSuffix('{', in, '}'):
|
||||||
|
v := m.Mutable(fds.ByNumber(5))
|
||||||
|
return u.unmarshalMessage(v.Message(), in)
|
||||||
|
default:
|
||||||
|
f, err := strconv.ParseFloat(string(in), 0)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unrecognized type for Value %q", in)
|
||||||
|
}
|
||||||
|
m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
case "ListValue":
|
||||||
|
var jsonArray []json.RawMessage
|
||||||
|
if err := json.Unmarshal(in, &jsonArray); err != nil {
|
||||||
|
return fmt.Errorf("bad ListValue: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lv := m.Mutable(fds.ByNumber(1)).List()
|
||||||
|
for _, raw := range jsonArray {
|
||||||
|
ve := lv.NewElement()
|
||||||
|
if err := u.unmarshalMessage(ve.Message(), raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
lv.Append(ve)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
case "Struct":
|
||||||
|
var jsonObject map[string]json.RawMessage
|
||||||
|
if err := json.Unmarshal(in, &jsonObject); err != nil {
|
||||||
|
return fmt.Errorf("bad StructValue: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mv := m.Mutable(fds.ByNumber(1)).Map()
|
||||||
|
for key, raw := range jsonObject {
|
||||||
|
kv := protoreflect.ValueOf(key).MapKey()
|
||||||
|
vv := mv.NewValue()
|
||||||
|
if err := u.unmarshalMessage(vv.Message(), raw); err != nil {
|
||||||
|
return fmt.Errorf("bad value in StructValue for key %q: %v", key, err)
|
||||||
|
}
|
||||||
|
mv.Set(kv, vv)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var jsonObject map[string]json.RawMessage
|
||||||
|
if err := json.Unmarshal(in, &jsonObject); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle known fields.
|
||||||
|
for i := 0; i < fds.Len(); i++ {
|
||||||
|
fd := fds.Get(i)
|
||||||
|
if fd.IsWeak() && fd.Message().IsPlaceholder() {
|
||||||
|
continue // weak reference is not linked in
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search for any raw JSON value associated with this field.
|
||||||
|
var raw json.RawMessage
|
||||||
|
name := string(fd.Name())
|
||||||
|
if fd.Kind() == protoreflect.GroupKind {
|
||||||
|
name = string(fd.Message().Name())
|
||||||
|
}
|
||||||
|
if v, ok := jsonObject[name]; ok {
|
||||||
|
delete(jsonObject, name)
|
||||||
|
raw = v
|
||||||
|
}
|
||||||
|
name = string(fd.JSONName())
|
||||||
|
if v, ok := jsonObject[name]; ok {
|
||||||
|
delete(jsonObject, name)
|
||||||
|
raw = v
|
||||||
|
}
|
||||||
|
|
||||||
|
field := m.NewField(fd)
|
||||||
|
// Unmarshal the field value.
|
||||||
|
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
v, err := u.unmarshalValue(field, raw, fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.Set(fd, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle extension fields.
|
||||||
|
for name, raw := range jsonObject {
|
||||||
|
if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve the extension field by name.
|
||||||
|
xname := protoreflect.FullName(name[len("[") : len(name)-len("]")])
|
||||||
|
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
|
||||||
|
if xt == nil && isMessageSet(md) {
|
||||||
|
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
|
||||||
|
}
|
||||||
|
if xt == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
delete(jsonObject, name)
|
||||||
|
fd := xt.TypeDescriptor()
|
||||||
|
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
|
||||||
|
return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
field := m.NewField(fd)
|
||||||
|
// Unmarshal the field value.
|
||||||
|
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
v, err := u.unmarshalValue(field, raw, fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.Set(fd, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !u.AllowUnknownFields && len(jsonObject) > 0 {
|
||||||
|
for name := range jsonObject {
|
||||||
|
return fmt.Errorf("unknown field %q in %v", name, md.FullName())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool {
|
||||||
|
if md := fd.Message(); md != nil {
|
||||||
|
return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool {
|
||||||
|
if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated {
|
||||||
|
_, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||||
|
switch {
|
||||||
|
case fd.IsList():
|
||||||
|
var jsonArray []json.RawMessage
|
||||||
|
if err := json.Unmarshal(in, &jsonArray); err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
lv := v.List()
|
||||||
|
for _, raw := range jsonArray {
|
||||||
|
ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd)
|
||||||
|
if err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
lv.Append(ve)
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
case fd.IsMap():
|
||||||
|
var jsonObject map[string]json.RawMessage
|
||||||
|
if err := json.Unmarshal(in, &jsonObject); err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
kfd := fd.MapKey()
|
||||||
|
vfd := fd.MapValue()
|
||||||
|
mv := v.Map()
|
||||||
|
for key, raw := range jsonObject {
|
||||||
|
var kv protoreflect.MapKey
|
||||||
|
if kfd.Kind() == protoreflect.StringKind {
|
||||||
|
kv = protoreflect.ValueOf(key).MapKey()
|
||||||
|
} else {
|
||||||
|
v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd)
|
||||||
|
if err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
kv = v.MapKey()
|
||||||
|
}
|
||||||
|
|
||||||
|
vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd)
|
||||||
|
if err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
mv.Set(kv, vv)
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
default:
|
||||||
|
return u.unmarshalSingularValue(v, in, fd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var nonFinite = map[string]float64{
|
||||||
|
`"NaN"`: math.NaN(),
|
||||||
|
`"Infinity"`: math.Inf(+1),
|
||||||
|
`"-Infinity"`: math.Inf(-1),
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||||
|
switch fd.Kind() {
|
||||||
|
case protoreflect.BoolKind:
|
||||||
|
return unmarshalValue(in, new(bool))
|
||||||
|
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||||
|
return unmarshalValue(trimQuote(in), new(int32))
|
||||||
|
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||||
|
return unmarshalValue(trimQuote(in), new(int64))
|
||||||
|
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||||
|
return unmarshalValue(trimQuote(in), new(uint32))
|
||||||
|
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||||
|
return unmarshalValue(trimQuote(in), new(uint64))
|
||||||
|
case protoreflect.FloatKind:
|
||||||
|
if f, ok := nonFinite[string(in)]; ok {
|
||||||
|
return protoreflect.ValueOfFloat32(float32(f)), nil
|
||||||
|
}
|
||||||
|
return unmarshalValue(trimQuote(in), new(float32))
|
||||||
|
case protoreflect.DoubleKind:
|
||||||
|
if f, ok := nonFinite[string(in)]; ok {
|
||||||
|
return protoreflect.ValueOfFloat64(float64(f)), nil
|
||||||
|
}
|
||||||
|
return unmarshalValue(trimQuote(in), new(float64))
|
||||||
|
case protoreflect.StringKind:
|
||||||
|
return unmarshalValue(in, new(string))
|
||||||
|
case protoreflect.BytesKind:
|
||||||
|
return unmarshalValue(in, new([]byte))
|
||||||
|
case protoreflect.EnumKind:
|
||||||
|
if hasPrefixAndSuffix('"', in, '"') {
|
||||||
|
vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in)))
|
||||||
|
if vd == nil {
|
||||||
|
return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName())
|
||||||
|
}
|
||||||
|
return protoreflect.ValueOfEnum(vd.Number()), nil
|
||||||
|
}
|
||||||
|
return unmarshalValue(in, new(protoreflect.EnumNumber))
|
||||||
|
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||||
|
err := u.unmarshalMessage(v.Message(), in)
|
||||||
|
return v, err
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) {
|
||||||
|
err := json.Unmarshal(in, v)
|
||||||
|
return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func unquoteString(in string) (out string, err error) {
|
||||||
|
err = json.Unmarshal([]byte(in), &out)
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool {
|
||||||
|
if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// trimQuote is like unquoteString but simply strips surrounding quotes.
|
||||||
|
// This is incorrect, but is behavior done by the legacy implementation.
|
||||||
|
func trimQuote(in []byte) []byte {
|
||||||
|
if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' {
|
||||||
|
in = in[1 : len(in)-1]
|
||||||
|
}
|
||||||
|
return in
|
||||||
|
}
|
|
@ -0,0 +1,559 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package jsonpb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/encoding/protojson"
|
||||||
|
protoV2 "google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
)
|
||||||
|
|
||||||
|
const wrapJSONMarshalV2 = false
|
||||||
|
|
||||||
|
// Marshaler is a configurable object for marshaling protocol buffer messages
|
||||||
|
// to the specified JSON representation.
|
||||||
|
type Marshaler struct {
|
||||||
|
// OrigName specifies whether to use the original protobuf name for fields.
|
||||||
|
OrigName bool
|
||||||
|
|
||||||
|
// EnumsAsInts specifies whether to render enum values as integers,
|
||||||
|
// as opposed to string values.
|
||||||
|
EnumsAsInts bool
|
||||||
|
|
||||||
|
// EmitDefaults specifies whether to render fields with zero values.
|
||||||
|
EmitDefaults bool
|
||||||
|
|
||||||
|
// Indent controls whether the output is compact or not.
|
||||||
|
// If empty, the output is compact JSON. Otherwise, every JSON object
|
||||||
|
// entry and JSON array value will be on its own line.
|
||||||
|
// Each line will be preceded by repeated copies of Indent, where the
|
||||||
|
// number of copies is the current indentation depth.
|
||||||
|
Indent string
|
||||||
|
|
||||||
|
// AnyResolver is used to resolve the google.protobuf.Any well-known type.
|
||||||
|
// If unset, the global registry is used by default.
|
||||||
|
AnyResolver AnyResolver
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSONPBMarshaler is implemented by protobuf messages that customize the
|
||||||
|
// way they are marshaled to JSON. Messages that implement this should also
|
||||||
|
// implement JSONPBUnmarshaler so that the custom format can be parsed.
|
||||||
|
//
|
||||||
|
// The JSON marshaling must follow the proto to JSON specification:
|
||||||
|
// https://developers.google.com/protocol-buffers/docs/proto3#json
|
||||||
|
//
|
||||||
|
// Deprecated: Custom types should implement protobuf reflection instead.
|
||||||
|
type JSONPBMarshaler interface {
|
||||||
|
MarshalJSONPB(*Marshaler) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal serializes a protobuf message as JSON into w.
|
||||||
|
func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error {
|
||||||
|
b, err := jm.marshal(m)
|
||||||
|
if len(b) > 0 {
|
||||||
|
if _, err := w.Write(b); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalToString serializes a protobuf message as JSON in string form.
|
||||||
|
func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) {
|
||||||
|
b, err := jm.marshal(m)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return string(b), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) {
|
||||||
|
v := reflect.ValueOf(m)
|
||||||
|
if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
|
||||||
|
return nil, errors.New("Marshal called with nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for custom marshalers first since they may not properly
|
||||||
|
// implement protobuf reflection that the logic below relies on.
|
||||||
|
if jsm, ok := m.(JSONPBMarshaler); ok {
|
||||||
|
return jsm.MarshalJSONPB(jm)
|
||||||
|
}
|
||||||
|
|
||||||
|
if wrapJSONMarshalV2 {
|
||||||
|
opts := protojson.MarshalOptions{
|
||||||
|
UseProtoNames: jm.OrigName,
|
||||||
|
UseEnumNumbers: jm.EnumsAsInts,
|
||||||
|
EmitUnpopulated: jm.EmitDefaults,
|
||||||
|
Indent: jm.Indent,
|
||||||
|
}
|
||||||
|
if jm.AnyResolver != nil {
|
||||||
|
opts.Resolver = anyResolver{jm.AnyResolver}
|
||||||
|
}
|
||||||
|
return opts.Marshal(proto.MessageReflect(m).Interface())
|
||||||
|
} else {
|
||||||
|
// Check for unpopulated required fields first.
|
||||||
|
m2 := proto.MessageReflect(m)
|
||||||
|
if err := protoV2.CheckInitialized(m2.Interface()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
w := jsonWriter{Marshaler: jm}
|
||||||
|
err := w.marshalMessage(m2, "", "")
|
||||||
|
return w.buf, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type jsonWriter struct {
|
||||||
|
*Marshaler
|
||||||
|
buf []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *jsonWriter) write(s string) {
|
||||||
|
w.buf = append(w.buf, s...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error {
|
||||||
|
if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok {
|
||||||
|
b, err := jsm.MarshalJSONPB(w.Marshaler)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if typeURL != "" {
|
||||||
|
// we are marshaling this object to an Any type
|
||||||
|
var js map[string]*json.RawMessage
|
||||||
|
if err = json.Unmarshal(b, &js); err != nil {
|
||||||
|
return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err)
|
||||||
|
}
|
||||||
|
turl, err := json.Marshal(typeURL)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
|
||||||
|
}
|
||||||
|
js["@type"] = (*json.RawMessage)(&turl)
|
||||||
|
if b, err = json.Marshal(js); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.write(string(b))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
md := m.Descriptor()
|
||||||
|
fds := md.Fields()
|
||||||
|
|
||||||
|
// Handle well-known types.
|
||||||
|
const secondInNanos = int64(time.Second / time.Nanosecond)
|
||||||
|
switch wellKnownType(md.FullName()) {
|
||||||
|
case "Any":
|
||||||
|
return w.marshalAny(m, indent)
|
||||||
|
case "BoolValue", "BytesValue", "StringValue",
|
||||||
|
"Int32Value", "UInt32Value", "FloatValue",
|
||||||
|
"Int64Value", "UInt64Value", "DoubleValue":
|
||||||
|
fd := fds.ByNumber(1)
|
||||||
|
return w.marshalValue(fd, m.Get(fd), indent)
|
||||||
|
case "Duration":
|
||||||
|
const maxSecondsInDuration = 315576000000
|
||||||
|
// "Generated output always contains 0, 3, 6, or 9 fractional digits,
|
||||||
|
// depending on required precision."
|
||||||
|
s := m.Get(fds.ByNumber(1)).Int()
|
||||||
|
ns := m.Get(fds.ByNumber(2)).Int()
|
||||||
|
if s < -maxSecondsInDuration || s > maxSecondsInDuration {
|
||||||
|
return fmt.Errorf("seconds out of range %v", s)
|
||||||
|
}
|
||||||
|
if ns <= -secondInNanos || ns >= secondInNanos {
|
||||||
|
return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
|
||||||
|
}
|
||||||
|
if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
|
||||||
|
return errors.New("signs of seconds and nanos do not match")
|
||||||
|
}
|
||||||
|
var sign string
|
||||||
|
if s < 0 || ns < 0 {
|
||||||
|
sign, s, ns = "-", -1*s, -1*ns
|
||||||
|
}
|
||||||
|
x := fmt.Sprintf("%s%d.%09d", sign, s, ns)
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
x = strings.TrimSuffix(x, ".000")
|
||||||
|
w.write(fmt.Sprintf(`"%vs"`, x))
|
||||||
|
return nil
|
||||||
|
case "Timestamp":
|
||||||
|
// "RFC 3339, where generated output will always be Z-normalized
|
||||||
|
// and uses 0, 3, 6 or 9 fractional digits."
|
||||||
|
s := m.Get(fds.ByNumber(1)).Int()
|
||||||
|
ns := m.Get(fds.ByNumber(2)).Int()
|
||||||
|
if ns < 0 || ns >= secondInNanos {
|
||||||
|
return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
|
||||||
|
}
|
||||||
|
t := time.Unix(s, ns).UTC()
|
||||||
|
// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
|
||||||
|
x := t.Format("2006-01-02T15:04:05.000000000")
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
x = strings.TrimSuffix(x, ".000")
|
||||||
|
w.write(fmt.Sprintf(`"%vZ"`, x))
|
||||||
|
return nil
|
||||||
|
case "Value":
|
||||||
|
// JSON value; which is a null, number, string, bool, object, or array.
|
||||||
|
od := md.Oneofs().Get(0)
|
||||||
|
fd := m.WhichOneof(od)
|
||||||
|
if fd == nil {
|
||||||
|
return errors.New("nil Value")
|
||||||
|
}
|
||||||
|
return w.marshalValue(fd, m.Get(fd), indent)
|
||||||
|
case "Struct", "ListValue":
|
||||||
|
// JSON object or array.
|
||||||
|
fd := fds.ByNumber(1)
|
||||||
|
return w.marshalValue(fd, m.Get(fd), indent)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.write("{")
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
firstField := true
|
||||||
|
if typeURL != "" {
|
||||||
|
if err := w.marshalTypeURL(indent, typeURL); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
firstField = false
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < fds.Len(); {
|
||||||
|
fd := fds.Get(i)
|
||||||
|
if od := fd.ContainingOneof(); od != nil {
|
||||||
|
fd = m.WhichOneof(od)
|
||||||
|
i += od.Fields().Len()
|
||||||
|
if fd == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
v := m.Get(fd)
|
||||||
|
|
||||||
|
if !m.Has(fd) {
|
||||||
|
if !w.EmitDefaults || fd.ContainingOneof() != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) {
|
||||||
|
v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !firstField {
|
||||||
|
w.writeComma()
|
||||||
|
}
|
||||||
|
if err := w.marshalField(fd, v, indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
firstField = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle proto2 extensions.
|
||||||
|
if md.ExtensionRanges().Len() > 0 {
|
||||||
|
// Collect a sorted list of all extension descriptor and values.
|
||||||
|
type ext struct {
|
||||||
|
desc protoreflect.FieldDescriptor
|
||||||
|
val protoreflect.Value
|
||||||
|
}
|
||||||
|
var exts []ext
|
||||||
|
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||||
|
if fd.IsExtension() {
|
||||||
|
exts = append(exts, ext{fd, v})
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
sort.Slice(exts, func(i, j int) bool {
|
||||||
|
return exts[i].desc.Number() < exts[j].desc.Number()
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, ext := range exts {
|
||||||
|
if !firstField {
|
||||||
|
w.writeComma()
|
||||||
|
}
|
||||||
|
if err := w.marshalField(ext.desc, ext.val, indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
firstField = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write("\n")
|
||||||
|
w.write(indent)
|
||||||
|
}
|
||||||
|
w.write("}")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *jsonWriter) writeComma() {
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write(",\n")
|
||||||
|
} else {
|
||||||
|
w.write(",")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error {
|
||||||
|
// "If the Any contains a value that has a special JSON mapping,
|
||||||
|
// it will be converted as follows: {"@type": xxx, "value": yyy}.
|
||||||
|
// Otherwise, the value will be converted into a JSON object,
|
||||||
|
// and the "@type" field will be inserted to indicate the actual data type."
|
||||||
|
md := m.Descriptor()
|
||||||
|
typeURL := m.Get(md.Fields().ByNumber(1)).String()
|
||||||
|
rawVal := m.Get(md.Fields().ByNumber(2)).Bytes()
|
||||||
|
|
||||||
|
var m2 protoreflect.Message
|
||||||
|
if w.AnyResolver != nil {
|
||||||
|
mi, err := w.AnyResolver.Resolve(typeURL)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m2 = proto.MessageReflect(mi)
|
||||||
|
} else {
|
||||||
|
mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m2 = mt.New()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if wellKnownType(m2.Descriptor().FullName()) == "" {
|
||||||
|
return w.marshalMessage(m2, indent, typeURL)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.write("{")
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write("\n")
|
||||||
|
}
|
||||||
|
if err := w.marshalTypeURL(indent, typeURL); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.writeComma()
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write(indent)
|
||||||
|
w.write(w.Indent)
|
||||||
|
w.write(`"value": `)
|
||||||
|
} else {
|
||||||
|
w.write(`"value":`)
|
||||||
|
}
|
||||||
|
if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write("\n")
|
||||||
|
w.write(indent)
|
||||||
|
}
|
||||||
|
w.write("}")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error {
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write(indent)
|
||||||
|
w.write(w.Indent)
|
||||||
|
}
|
||||||
|
w.write(`"@type":`)
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write(" ")
|
||||||
|
}
|
||||||
|
b, err := json.Marshal(typeURL)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.write(string(b))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalField writes field description and value to the Writer.
|
||||||
|
func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write(indent)
|
||||||
|
w.write(w.Indent)
|
||||||
|
}
|
||||||
|
w.write(`"`)
|
||||||
|
switch {
|
||||||
|
case fd.IsExtension():
|
||||||
|
// For message set, use the fname of the message as the extension name.
|
||||||
|
name := string(fd.FullName())
|
||||||
|
if isMessageSet(fd.ContainingMessage()) {
|
||||||
|
name = strings.TrimSuffix(name, ".message_set_extension")
|
||||||
|
}
|
||||||
|
|
||||||
|
w.write("[" + name + "]")
|
||||||
|
case w.OrigName:
|
||||||
|
name := string(fd.Name())
|
||||||
|
if fd.Kind() == protoreflect.GroupKind {
|
||||||
|
name = string(fd.Message().Name())
|
||||||
|
}
|
||||||
|
w.write(name)
|
||||||
|
default:
|
||||||
|
w.write(string(fd.JSONName()))
|
||||||
|
}
|
||||||
|
w.write(`":`)
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write(" ")
|
||||||
|
}
|
||||||
|
return w.marshalValue(fd, v, indent)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
|
||||||
|
switch {
|
||||||
|
case fd.IsList():
|
||||||
|
w.write("[")
|
||||||
|
comma := ""
|
||||||
|
lv := v.List()
|
||||||
|
for i := 0; i < lv.Len(); i++ {
|
||||||
|
w.write(comma)
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write("\n")
|
||||||
|
w.write(indent)
|
||||||
|
w.write(w.Indent)
|
||||||
|
w.write(w.Indent)
|
||||||
|
}
|
||||||
|
if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
comma = ","
|
||||||
|
}
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write("\n")
|
||||||
|
w.write(indent)
|
||||||
|
w.write(w.Indent)
|
||||||
|
}
|
||||||
|
w.write("]")
|
||||||
|
return nil
|
||||||
|
case fd.IsMap():
|
||||||
|
kfd := fd.MapKey()
|
||||||
|
vfd := fd.MapValue()
|
||||||
|
mv := v.Map()
|
||||||
|
|
||||||
|
// Collect a sorted list of all map keys and values.
|
||||||
|
type entry struct{ key, val protoreflect.Value }
|
||||||
|
var entries []entry
|
||||||
|
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
|
||||||
|
entries = append(entries, entry{k.Value(), v})
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
sort.Slice(entries, func(i, j int) bool {
|
||||||
|
switch kfd.Kind() {
|
||||||
|
case protoreflect.BoolKind:
|
||||||
|
return !entries[i].key.Bool() && entries[j].key.Bool()
|
||||||
|
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||||
|
return entries[i].key.Int() < entries[j].key.Int()
|
||||||
|
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||||
|
return entries[i].key.Uint() < entries[j].key.Uint()
|
||||||
|
case protoreflect.StringKind:
|
||||||
|
return entries[i].key.String() < entries[j].key.String()
|
||||||
|
default:
|
||||||
|
panic("invalid kind")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
w.write(`{`)
|
||||||
|
comma := ""
|
||||||
|
for _, entry := range entries {
|
||||||
|
w.write(comma)
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write("\n")
|
||||||
|
w.write(indent)
|
||||||
|
w.write(w.Indent)
|
||||||
|
w.write(w.Indent)
|
||||||
|
}
|
||||||
|
|
||||||
|
s := fmt.Sprint(entry.key.Interface())
|
||||||
|
b, err := json.Marshal(s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.write(string(b))
|
||||||
|
|
||||||
|
w.write(`:`)
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write(` `)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
comma = ","
|
||||||
|
}
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write("\n")
|
||||||
|
w.write(indent)
|
||||||
|
w.write(w.Indent)
|
||||||
|
}
|
||||||
|
w.write(`}`)
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return w.marshalSingularValue(fd, v, indent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
|
||||||
|
switch {
|
||||||
|
case !v.IsValid():
|
||||||
|
w.write("null")
|
||||||
|
return nil
|
||||||
|
case fd.Message() != nil:
|
||||||
|
return w.marshalMessage(v.Message(), indent+w.Indent, "")
|
||||||
|
case fd.Enum() != nil:
|
||||||
|
if fd.Enum().FullName() == "google.protobuf.NullValue" {
|
||||||
|
w.write("null")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
vd := fd.Enum().Values().ByNumber(v.Enum())
|
||||||
|
if vd == nil || w.EnumsAsInts {
|
||||||
|
w.write(strconv.Itoa(int(v.Enum())))
|
||||||
|
} else {
|
||||||
|
w.write(`"` + string(vd.Name()) + `"`)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
switch v.Interface().(type) {
|
||||||
|
case float32, float64:
|
||||||
|
switch {
|
||||||
|
case math.IsInf(v.Float(), +1):
|
||||||
|
w.write(`"Infinity"`)
|
||||||
|
return nil
|
||||||
|
case math.IsInf(v.Float(), -1):
|
||||||
|
w.write(`"-Infinity"`)
|
||||||
|
return nil
|
||||||
|
case math.IsNaN(v.Float()):
|
||||||
|
w.write(`"NaN"`)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
case int64, uint64:
|
||||||
|
w.write(fmt.Sprintf(`"%d"`, v.Interface()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := json.Marshal(v.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.write(string(b))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,69 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package jsonpb provides functionality to marshal and unmarshal between a
|
||||||
|
// protocol buffer message and JSON. It follows the specification at
|
||||||
|
// https://developers.google.com/protocol-buffers/docs/proto3#json.
|
||||||
|
//
|
||||||
|
// Do not rely on the default behavior of the standard encoding/json package
|
||||||
|
// when called on generated message types as it does not operate correctly.
|
||||||
|
//
|
||||||
|
// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson"
|
||||||
|
// package instead.
|
||||||
|
package jsonpb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
"google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AnyResolver takes a type URL, present in an Any message,
|
||||||
|
// and resolves it into an instance of the associated message.
|
||||||
|
type AnyResolver interface {
|
||||||
|
Resolve(typeURL string) (proto.Message, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type anyResolver struct{ AnyResolver }
|
||||||
|
|
||||||
|
func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
|
||||||
|
return r.FindMessageByURL(string(message))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) {
|
||||||
|
m, err := r.Resolve(url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return protoimpl.X.MessageTypeOf(m), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
|
||||||
|
return protoregistry.GlobalTypes.FindExtensionByName(field)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
|
||||||
|
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
|
||||||
|
}
|
||||||
|
|
||||||
|
func wellKnownType(s protoreflect.FullName) string {
|
||||||
|
if s.Parent() == "google.protobuf" {
|
||||||
|
switch s.Name() {
|
||||||
|
case "Empty", "Any",
|
||||||
|
"BoolValue", "BytesValue", "StringValue",
|
||||||
|
"Int32Value", "UInt32Value", "FloatValue",
|
||||||
|
"Int64Value", "UInt64Value", "DoubleValue",
|
||||||
|
"Duration", "Timestamp",
|
||||||
|
"NullValue", "Struct", "Value", "ListValue":
|
||||||
|
return string(s.Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func isMessageSet(md protoreflect.MessageDescriptor) bool {
|
||||||
|
ms, ok := md.(interface{ IsMessageSet() bool })
|
||||||
|
return ok && ms.IsMessageSet()
|
||||||
|
}
|
|
@ -27,6 +27,7 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/channelz"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/internal"
|
"google.golang.org/grpc/internal"
|
||||||
|
@ -192,7 +193,7 @@ type BuildOptions struct {
|
||||||
// server can ignore this field.
|
// server can ignore this field.
|
||||||
Authority string
|
Authority string
|
||||||
// ChannelzParentID is the parent ClientConn's channelz ID.
|
// ChannelzParentID is the parent ClientConn's channelz ID.
|
||||||
ChannelzParentID int64
|
ChannelzParentID *channelz.Identifier
|
||||||
// CustomUserAgent is the custom user agent set on the parent ClientConn.
|
// CustomUserAgent is the custom user agent set on the parent ClientConn.
|
||||||
// The balancer should set the same custom user agent if it creates a
|
// The balancer should set the same custom user agent if it creates a
|
||||||
// ClientConn.
|
// ClientConn.
|
||||||
|
|
|
@ -20,130 +20,178 @@ package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
|
"google.golang.org/grpc/internal/balancer/gracefulswitch"
|
||||||
"google.golang.org/grpc/internal/buffer"
|
"google.golang.org/grpc/internal/buffer"
|
||||||
"google.golang.org/grpc/internal/channelz"
|
"google.golang.org/grpc/internal/channelz"
|
||||||
"google.golang.org/grpc/internal/grpcsync"
|
"google.golang.org/grpc/internal/grpcsync"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
)
|
)
|
||||||
|
|
||||||
// scStateUpdate contains the subConn and the new state it changed to.
|
// ccBalancerWrapper sits between the ClientConn and the Balancer.
|
||||||
|
//
|
||||||
|
// ccBalancerWrapper implements methods corresponding to the ones on the
|
||||||
|
// balancer.Balancer interface. The ClientConn is free to call these methods
|
||||||
|
// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn
|
||||||
|
// to the Balancer happen synchronously and in order.
|
||||||
|
//
|
||||||
|
// ccBalancerWrapper also implements the balancer.ClientConn interface and is
|
||||||
|
// passed to the Balancer implementations. It invokes unexported methods on the
|
||||||
|
// ClientConn to handle these calls from the Balancer.
|
||||||
|
//
|
||||||
|
// It uses the gracefulswitch.Balancer internally to ensure that balancer
|
||||||
|
// switches happen in a graceful manner.
|
||||||
|
type ccBalancerWrapper struct {
|
||||||
|
cc *ClientConn
|
||||||
|
|
||||||
|
// Since these fields are accessed only from handleXxx() methods which are
|
||||||
|
// synchronized by the watcher goroutine, we do not need a mutex to protect
|
||||||
|
// these fields.
|
||||||
|
balancer *gracefulswitch.Balancer
|
||||||
|
curBalancerName string
|
||||||
|
|
||||||
|
updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher().
|
||||||
|
resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here.
|
||||||
|
closed *grpcsync.Event // Indicates if close has been called.
|
||||||
|
done *grpcsync.Event // Indicates if close has completed its work.
|
||||||
|
}
|
||||||
|
|
||||||
|
// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer
|
||||||
|
// is not created until the switchTo() method is invoked.
|
||||||
|
func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper {
|
||||||
|
ccb := &ccBalancerWrapper{
|
||||||
|
cc: cc,
|
||||||
|
updateCh: buffer.NewUnbounded(),
|
||||||
|
resultCh: buffer.NewUnbounded(),
|
||||||
|
closed: grpcsync.NewEvent(),
|
||||||
|
done: grpcsync.NewEvent(),
|
||||||
|
}
|
||||||
|
go ccb.watcher()
|
||||||
|
ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts)
|
||||||
|
return ccb
|
||||||
|
}
|
||||||
|
|
||||||
|
// The following xxxUpdate structs wrap the arguments received as part of the
|
||||||
|
// corresponding update. The watcher goroutine uses the 'type' of the update to
|
||||||
|
// invoke the appropriate handler routine to handle the update.
|
||||||
|
|
||||||
|
type ccStateUpdate struct {
|
||||||
|
ccs *balancer.ClientConnState
|
||||||
|
}
|
||||||
|
|
||||||
type scStateUpdate struct {
|
type scStateUpdate struct {
|
||||||
sc balancer.SubConn
|
sc balancer.SubConn
|
||||||
state connectivity.State
|
state connectivity.State
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
// exitIdle contains no data and is just a signal sent on the updateCh in
|
type exitIdleUpdate struct{}
|
||||||
// ccBalancerWrapper to instruct the balancer to exit idle.
|
|
||||||
type exitIdle struct{}
|
|
||||||
|
|
||||||
// ccBalancerWrapper is a wrapper on top of cc for balancers.
|
type resolverErrorUpdate struct {
|
||||||
// It implements balancer.ClientConn interface.
|
err error
|
||||||
type ccBalancerWrapper struct {
|
|
||||||
cc *ClientConn
|
|
||||||
balancerMu sync.Mutex // synchronizes calls to the balancer
|
|
||||||
balancer balancer.Balancer
|
|
||||||
hasExitIdle bool
|
|
||||||
updateCh *buffer.Unbounded
|
|
||||||
closed *grpcsync.Event
|
|
||||||
done *grpcsync.Event
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
subConns map[*acBalancerWrapper]struct{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
|
type switchToUpdate struct {
|
||||||
ccb := &ccBalancerWrapper{
|
name string
|
||||||
cc: cc,
|
|
||||||
updateCh: buffer.NewUnbounded(),
|
|
||||||
closed: grpcsync.NewEvent(),
|
|
||||||
done: grpcsync.NewEvent(),
|
|
||||||
subConns: make(map[*acBalancerWrapper]struct{}),
|
|
||||||
}
|
|
||||||
go ccb.watcher()
|
|
||||||
ccb.balancer = b.Build(ccb, bopts)
|
|
||||||
_, ccb.hasExitIdle = ccb.balancer.(balancer.ExitIdler)
|
|
||||||
return ccb
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// watcher balancer functions sequentially, so the balancer can be implemented
|
type subConnUpdate struct {
|
||||||
// lock-free.
|
acbw *acBalancerWrapper
|
||||||
|
}
|
||||||
|
|
||||||
|
// watcher is a long-running goroutine which reads updates from a channel and
|
||||||
|
// invokes corresponding methods on the underlying balancer. It ensures that
|
||||||
|
// these methods are invoked in a synchronous fashion. It also ensures that
|
||||||
|
// these methods are invoked in the order in which the updates were received.
|
||||||
func (ccb *ccBalancerWrapper) watcher() {
|
func (ccb *ccBalancerWrapper) watcher() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case t := <-ccb.updateCh.Get():
|
case u := <-ccb.updateCh.Get():
|
||||||
ccb.updateCh.Load()
|
ccb.updateCh.Load()
|
||||||
if ccb.closed.HasFired() {
|
if ccb.closed.HasFired() {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
switch u := t.(type) {
|
switch update := u.(type) {
|
||||||
|
case *ccStateUpdate:
|
||||||
|
ccb.handleClientConnStateChange(update.ccs)
|
||||||
case *scStateUpdate:
|
case *scStateUpdate:
|
||||||
ccb.balancerMu.Lock()
|
ccb.handleSubConnStateChange(update)
|
||||||
ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err})
|
case *exitIdleUpdate:
|
||||||
ccb.balancerMu.Unlock()
|
ccb.handleExitIdle()
|
||||||
case *acBalancerWrapper:
|
case *resolverErrorUpdate:
|
||||||
ccb.mu.Lock()
|
ccb.handleResolverError(update.err)
|
||||||
if ccb.subConns != nil {
|
case *switchToUpdate:
|
||||||
delete(ccb.subConns, u)
|
ccb.handleSwitchTo(update.name)
|
||||||
ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain)
|
case *subConnUpdate:
|
||||||
}
|
ccb.handleRemoveSubConn(update.acbw)
|
||||||
ccb.mu.Unlock()
|
|
||||||
case exitIdle:
|
|
||||||
if ccb.cc.GetState() == connectivity.Idle {
|
|
||||||
if ei, ok := ccb.balancer.(balancer.ExitIdler); ok {
|
|
||||||
// We already checked that the balancer implements
|
|
||||||
// ExitIdle before pushing the event to updateCh, but
|
|
||||||
// check conditionally again as defensive programming.
|
|
||||||
ccb.balancerMu.Lock()
|
|
||||||
ei.ExitIdle()
|
|
||||||
ccb.balancerMu.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t)
|
logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update)
|
||||||
}
|
}
|
||||||
case <-ccb.closed.Done():
|
case <-ccb.closed.Done():
|
||||||
}
|
}
|
||||||
|
|
||||||
if ccb.closed.HasFired() {
|
if ccb.closed.HasFired() {
|
||||||
ccb.balancerMu.Lock()
|
ccb.handleClose()
|
||||||
ccb.balancer.Close()
|
|
||||||
ccb.balancerMu.Unlock()
|
|
||||||
ccb.mu.Lock()
|
|
||||||
scs := ccb.subConns
|
|
||||||
ccb.subConns = nil
|
|
||||||
ccb.mu.Unlock()
|
|
||||||
ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil})
|
|
||||||
ccb.done.Fire()
|
|
||||||
// Fire done before removing the addr conns. We can safely unblock
|
|
||||||
// ccb.close and allow the removeAddrConns to happen
|
|
||||||
// asynchronously.
|
|
||||||
for acbw := range scs {
|
|
||||||
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) close() {
|
// updateClientConnState is invoked by grpc to push a ClientConnState update to
|
||||||
ccb.closed.Fire()
|
// the underlying balancer.
|
||||||
<-ccb.done.Done()
|
//
|
||||||
|
// Unlike other methods invoked by grpc to push updates to the underlying
|
||||||
|
// balancer, this method cannot simply push the update onto the update channel
|
||||||
|
// and return. It needs to return the error returned by the underlying balancer
|
||||||
|
// back to grpc which propagates that to the resolver.
|
||||||
|
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
|
||||||
|
ccb.updateCh.Put(&ccStateUpdate{ccs: ccs})
|
||||||
|
|
||||||
|
var res interface{}
|
||||||
|
select {
|
||||||
|
case res = <-ccb.resultCh.Get():
|
||||||
|
ccb.resultCh.Load()
|
||||||
|
case <-ccb.closed.Done():
|
||||||
|
// Return early if the balancer wrapper is closed while we are waiting for
|
||||||
|
// the underlying balancer to process a ClientConnState update.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// If the returned error is nil, attempting to type assert to error leads to
|
||||||
|
// panic. So, this needs to handled separately.
|
||||||
|
if res == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return res.(error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) exitIdle() bool {
|
// handleClientConnStateChange handles a ClientConnState update from the update
|
||||||
if !ccb.hasExitIdle {
|
// channel and invokes the appropriate method on the underlying balancer.
|
||||||
return false
|
//
|
||||||
|
// If the addresses specified in the update contain addresses of type "grpclb"
|
||||||
|
// and the selected LB policy is not "grpclb", these addresses will be filtered
|
||||||
|
// out and ccs will be modified with the updated address list.
|
||||||
|
func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) {
|
||||||
|
if ccb.curBalancerName != grpclbName {
|
||||||
|
// Filter any grpclb addresses since we don't have the grpclb balancer.
|
||||||
|
var addrs []resolver.Address
|
||||||
|
for _, addr := range ccs.ResolverState.Addresses {
|
||||||
|
if addr.Type == resolver.GRPCLB {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
ccb.updateCh.Put(exitIdle{})
|
addrs = append(addrs, addr)
|
||||||
return true
|
}
|
||||||
|
ccs.ResolverState.Addresses = addrs
|
||||||
|
}
|
||||||
|
ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
|
// updateSubConnState is invoked by grpc to push a subConn state update to the
|
||||||
|
// underlying balancer.
|
||||||
|
func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) {
|
||||||
// When updating addresses for a SubConn, if the address in use is not in
|
// When updating addresses for a SubConn, if the address in use is not in
|
||||||
// the new addresses, the old ac will be tearDown() and a new ac will be
|
// the new addresses, the old ac will be tearDown() and a new ac will be
|
||||||
// created. tearDown() generates a state change with Shutdown state, we
|
// created. tearDown() generates a state change with Shutdown state, we
|
||||||
|
@ -161,44 +209,125 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
|
// handleSubConnStateChange handles a SubConnState update from the update
|
||||||
ccb.balancerMu.Lock()
|
// channel and invokes the appropriate method on the underlying balancer.
|
||||||
defer ccb.balancerMu.Unlock()
|
func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) {
|
||||||
return ccb.balancer.UpdateClientConnState(*ccs)
|
ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ccb *ccBalancerWrapper) exitIdle() {
|
||||||
|
ccb.updateCh.Put(&exitIdleUpdate{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ccb *ccBalancerWrapper) handleExitIdle() {
|
||||||
|
if ccb.cc.GetState() != connectivity.Idle {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ccb.balancer.ExitIdle()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) resolverError(err error) {
|
func (ccb *ccBalancerWrapper) resolverError(err error) {
|
||||||
ccb.balancerMu.Lock()
|
ccb.updateCh.Put(&resolverErrorUpdate{err: err})
|
||||||
defer ccb.balancerMu.Unlock()
|
}
|
||||||
|
|
||||||
|
func (ccb *ccBalancerWrapper) handleResolverError(err error) {
|
||||||
ccb.balancer.ResolverError(err)
|
ccb.balancer.ResolverError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the
|
||||||
|
// LB policy identified by name.
|
||||||
|
//
|
||||||
|
// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the
|
||||||
|
// first good update from the name resolver, it determines the LB policy to use
|
||||||
|
// and invokes the switchTo() method. Upon receipt of every subsequent update
|
||||||
|
// from the name resolver, it invokes this method.
|
||||||
|
//
|
||||||
|
// the ccBalancerWrapper keeps track of the current LB policy name, and skips
|
||||||
|
// the graceful balancer switching process if the name does not change.
|
||||||
|
func (ccb *ccBalancerWrapper) switchTo(name string) {
|
||||||
|
ccb.updateCh.Put(&switchToUpdate{name: name})
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleSwitchTo handles a balancer switch update from the update channel. It
|
||||||
|
// calls the SwitchTo() method on the gracefulswitch.Balancer with a
|
||||||
|
// balancer.Builder corresponding to name. If no balancer.Builder is registered
|
||||||
|
// for the given name, it uses the default LB policy which is "pick_first".
|
||||||
|
func (ccb *ccBalancerWrapper) handleSwitchTo(name string) {
|
||||||
|
// TODO: Other languages use case-insensitive balancer registries. We should
|
||||||
|
// switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
|
||||||
|
if strings.EqualFold(ccb.curBalancerName, name) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Ensure that name is a registered LB policy when we get here.
|
||||||
|
// We currently only validate the `loadBalancingConfig` field. We need to do
|
||||||
|
// the same for the `loadBalancingPolicy` field and reject the service config
|
||||||
|
// if the specified policy is not registered.
|
||||||
|
builder := balancer.Get(name)
|
||||||
|
if builder == nil {
|
||||||
|
channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name)
|
||||||
|
builder = newPickfirstBuilder()
|
||||||
|
} else {
|
||||||
|
channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ccb.balancer.SwitchTo(builder); err != nil {
|
||||||
|
channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ccb.curBalancerName = builder.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleRemoveSucConn handles a request from the underlying balancer to remove
|
||||||
|
// a subConn.
|
||||||
|
//
|
||||||
|
// See comments in RemoveSubConn() for more details.
|
||||||
|
func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) {
|
||||||
|
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ccb *ccBalancerWrapper) close() {
|
||||||
|
ccb.closed.Fire()
|
||||||
|
<-ccb.done.Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ccb *ccBalancerWrapper) handleClose() {
|
||||||
|
ccb.balancer.Close()
|
||||||
|
ccb.done.Fire()
|
||||||
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||||
if len(addrs) <= 0 {
|
if len(addrs) <= 0 {
|
||||||
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
|
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
|
||||||
}
|
}
|
||||||
ccb.mu.Lock()
|
|
||||||
defer ccb.mu.Unlock()
|
|
||||||
if ccb.subConns == nil {
|
|
||||||
return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
|
|
||||||
}
|
|
||||||
ac, err := ccb.cc.newAddrConn(addrs, opts)
|
ac, err := ccb.cc.newAddrConn(addrs, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
acbw := &acBalancerWrapper{ac: ac}
|
acbw := &acBalancerWrapper{ac: ac}
|
||||||
acbw.ac.mu.Lock()
|
acbw.ac.mu.Lock()
|
||||||
ac.acbw = acbw
|
ac.acbw = acbw
|
||||||
acbw.ac.mu.Unlock()
|
acbw.ac.mu.Unlock()
|
||||||
ccb.subConns[acbw] = struct{}{}
|
|
||||||
return acbw, nil
|
return acbw, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||||
// The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock
|
// Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it
|
||||||
// during switchBalancer() if the old balancer calls RemoveSubConn() in its
|
// was required to handle the RemoveSubConn() method asynchronously by pushing
|
||||||
// Close().
|
// the update onto the update channel. This was done to avoid a deadlock as
|
||||||
ccb.updateCh.Put(sc)
|
// switchBalancer() was holding cc.mu when calling Close() on the old
|
||||||
|
// balancer, which would in turn call RemoveSubConn().
|
||||||
|
//
|
||||||
|
// With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this
|
||||||
|
// asynchronously is probably not required anymore since the switchTo() method
|
||||||
|
// handles the balancer switch by pushing the update onto the channel.
|
||||||
|
// TODO(easwars): Handle this inline.
|
||||||
|
acbw, ok := sc.(*acBalancerWrapper)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ccb.updateCh.Put(&subConnUpdate{acbw: acbw})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
||||||
|
@ -210,11 +339,6 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
||||||
ccb.mu.Lock()
|
|
||||||
defer ccb.mu.Unlock()
|
|
||||||
if ccb.subConns == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Update picker before updating state. Even though the ordering here does
|
// Update picker before updating state. Even though the ordering here does
|
||||||
// not matter, it can lead to multiple calls of Pick in the common start-up
|
// not matter, it can lead to multiple calls of Pick in the common start-up
|
||||||
// case where we wait for ready and then perform an RPC. If the picker is
|
// case where we wait for ready and then perform an RPC. If the picker is
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2020 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package channelz exports internals of the channelz implementation as required
|
||||||
|
// by other gRPC packages.
|
||||||
|
//
|
||||||
|
// The implementation of the channelz spec as defined in
|
||||||
|
// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by
|
||||||
|
// the `internal/channelz` package.
|
||||||
|
//
|
||||||
|
// Experimental
|
||||||
|
//
|
||||||
|
// Notice: All APIs in this package are experimental and may be removed in a
|
||||||
|
// later release.
|
||||||
|
package channelz
|
||||||
|
|
||||||
|
import "google.golang.org/grpc/internal/channelz"
|
||||||
|
|
||||||
|
// Identifier is an opaque identifier which uniquely identifies an entity in the
|
||||||
|
// channelz database.
|
||||||
|
type Identifier = channelz.Identifier
|
|
@ -159,23 +159,20 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if channelz.IsOn() {
|
pid := cc.dopts.channelzParentID
|
||||||
if cc.dopts.channelzParentID != 0 {
|
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target)
|
||||||
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
|
ted := &channelz.TraceEventDesc{
|
||||||
channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{
|
Desc: "Channel created",
|
||||||
Desc: "Channel Created",
|
|
||||||
Severity: channelz.CtInfo,
|
Severity: channelz.CtInfo,
|
||||||
Parent: &channelz.TraceEventDesc{
|
|
||||||
Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID),
|
|
||||||
Severity: channelz.CtInfo,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target)
|
|
||||||
channelz.Info(logger, cc.channelzID, "Channel Created")
|
|
||||||
}
|
}
|
||||||
|
if cc.dopts.channelzParentID != nil {
|
||||||
|
ted.Parent = &channelz.TraceEventDesc{
|
||||||
|
Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()),
|
||||||
|
Severity: channelz.CtInfo,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
channelz.AddTraceEvent(logger, cc.channelzID, 1, ted)
|
||||||
cc.csMgr.channelzID = cc.channelzID
|
cc.csMgr.channelzID = cc.channelzID
|
||||||
}
|
|
||||||
|
|
||||||
if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
|
if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
|
||||||
return nil, errNoTransportSecurity
|
return nil, errNoTransportSecurity
|
||||||
|
@ -281,7 +278,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
||||||
if creds := cc.dopts.copts.TransportCredentials; creds != nil {
|
if creds := cc.dopts.copts.TransportCredentials; creds != nil {
|
||||||
credsClone = creds.Clone()
|
credsClone = creds.Clone()
|
||||||
}
|
}
|
||||||
cc.balancerBuildOpts = balancer.BuildOptions{
|
cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{
|
||||||
DialCreds: credsClone,
|
DialCreds: credsClone,
|
||||||
CredsBundle: cc.dopts.copts.CredsBundle,
|
CredsBundle: cc.dopts.copts.CredsBundle,
|
||||||
Dialer: cc.dopts.copts.Dialer,
|
Dialer: cc.dopts.copts.Dialer,
|
||||||
|
@ -289,7 +286,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
||||||
CustomUserAgent: cc.dopts.copts.UserAgent,
|
CustomUserAgent: cc.dopts.copts.UserAgent,
|
||||||
ChannelzParentID: cc.channelzID,
|
ChannelzParentID: cc.channelzID,
|
||||||
Target: cc.parsedTarget,
|
Target: cc.parsedTarget,
|
||||||
}
|
})
|
||||||
|
|
||||||
// Build the resolver.
|
// Build the resolver.
|
||||||
rWrapper, err := newCCResolverWrapper(cc, resolverBuilder)
|
rWrapper, err := newCCResolverWrapper(cc, resolverBuilder)
|
||||||
|
@ -398,7 +395,7 @@ type connectivityStateManager struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
state connectivity.State
|
state connectivity.State
|
||||||
notifyChan chan struct{}
|
notifyChan chan struct{}
|
||||||
channelzID int64
|
channelzID *channelz.Identifier
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateState updates the connectivity.State of ClientConn.
|
// updateState updates the connectivity.State of ClientConn.
|
||||||
|
@ -464,34 +461,36 @@ var _ ClientConnInterface = (*ClientConn)(nil)
|
||||||
// handshakes. It also handles errors on established connections by
|
// handshakes. It also handles errors on established connections by
|
||||||
// re-resolving the name and reconnecting.
|
// re-resolving the name and reconnecting.
|
||||||
type ClientConn struct {
|
type ClientConn struct {
|
||||||
ctx context.Context
|
ctx context.Context // Initialized using the background context at dial time.
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc // Cancelled on close.
|
||||||
|
|
||||||
target string
|
// The following are initialized at dial time, and are read-only after that.
|
||||||
parsedTarget resolver.Target
|
target string // User's dial target.
|
||||||
authority string
|
parsedTarget resolver.Target // See parseTargetAndFindResolver().
|
||||||
dopts dialOptions
|
authority string // See determineAuthority().
|
||||||
|
dopts dialOptions // Default and user specified dial options.
|
||||||
|
channelzID *channelz.Identifier // Channelz identifier for the channel.
|
||||||
|
balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath.
|
||||||
|
|
||||||
|
// The following provide their own synchronization, and therefore don't
|
||||||
|
// require cc.mu to be held to access them.
|
||||||
csMgr *connectivityStateManager
|
csMgr *connectivityStateManager
|
||||||
|
|
||||||
balancerBuildOpts balancer.BuildOptions
|
|
||||||
blockingpicker *pickerWrapper
|
blockingpicker *pickerWrapper
|
||||||
|
|
||||||
safeConfigSelector iresolver.SafeConfigSelector
|
safeConfigSelector iresolver.SafeConfigSelector
|
||||||
|
czData *channelzData
|
||||||
|
retryThrottler atomic.Value // Updated from service config.
|
||||||
|
|
||||||
mu sync.RWMutex
|
// firstResolveEvent is used to track whether the name resolver sent us at
|
||||||
resolverWrapper *ccResolverWrapper
|
// least one update. RPCs block on this event.
|
||||||
sc *ServiceConfig
|
|
||||||
conns map[*addrConn]struct{}
|
|
||||||
// Keepalive parameter can be updated if a GoAway is received.
|
|
||||||
mkp keepalive.ClientParameters
|
|
||||||
curBalancerName string
|
|
||||||
balancerWrapper *ccBalancerWrapper
|
|
||||||
retryThrottler atomic.Value
|
|
||||||
|
|
||||||
firstResolveEvent *grpcsync.Event
|
firstResolveEvent *grpcsync.Event
|
||||||
|
|
||||||
channelzID int64 // channelz unique identification number
|
// mu protects the following fields.
|
||||||
czData *channelzData
|
// TODO: split mu so the same mutex isn't used for everything.
|
||||||
|
mu sync.RWMutex
|
||||||
|
resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close.
|
||||||
|
sc *ServiceConfig // Latest service config received from the resolver.
|
||||||
|
conns map[*addrConn]struct{} // Set to nil on close.
|
||||||
|
mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway.
|
||||||
|
|
||||||
lceMu sync.Mutex // protects lastConnectionError
|
lceMu sync.Mutex // protects lastConnectionError
|
||||||
lastConnectionError error
|
lastConnectionError error
|
||||||
|
@ -536,14 +535,7 @@ func (cc *ClientConn) GetState() connectivity.State {
|
||||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
|
||||||
// release.
|
// release.
|
||||||
func (cc *ClientConn) Connect() {
|
func (cc *ClientConn) Connect() {
|
||||||
cc.mu.Lock()
|
cc.balancerWrapper.exitIdle()
|
||||||
defer cc.mu.Unlock()
|
|
||||||
if cc.balancerWrapper != nil && cc.balancerWrapper.exitIdle() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for ac := range cc.conns {
|
|
||||||
go ac.connect()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cc *ClientConn) scWatcher() {
|
func (cc *ClientConn) scWatcher() {
|
||||||
|
@ -623,9 +615,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
|
||||||
// with the new addresses.
|
// with the new addresses.
|
||||||
cc.maybeApplyDefaultServiceConfig(nil)
|
cc.maybeApplyDefaultServiceConfig(nil)
|
||||||
|
|
||||||
if cc.balancerWrapper != nil {
|
|
||||||
cc.balancerWrapper.resolverError(err)
|
cc.balancerWrapper.resolverError(err)
|
||||||
}
|
|
||||||
|
|
||||||
// No addresses are valid with err set; return early.
|
// No addresses are valid with err set; return early.
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
|
@ -653,16 +643,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
|
||||||
cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses)
|
cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses)
|
||||||
} else {
|
} else {
|
||||||
ret = balancer.ErrBadResolverState
|
ret = balancer.ErrBadResolverState
|
||||||
if cc.balancerWrapper == nil {
|
if cc.sc == nil {
|
||||||
var err error
|
// Apply the failing LB only if we haven't received valid service config
|
||||||
if s.ServiceConfig.Err != nil {
|
// from the name resolver in the past.
|
||||||
err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err)
|
cc.applyFailingLB(s.ServiceConfig)
|
||||||
} else {
|
|
||||||
err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config)
|
|
||||||
}
|
|
||||||
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc})
|
|
||||||
cc.blockingpicker.updatePicker(base.NewErrPicker(err))
|
|
||||||
cc.csMgr.updateState(connectivity.TransientFailure)
|
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
@ -670,24 +654,12 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
var balCfg serviceconfig.LoadBalancingConfig
|
var balCfg serviceconfig.LoadBalancingConfig
|
||||||
if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil {
|
if cc.sc != nil && cc.sc.lbConfig != nil {
|
||||||
balCfg = cc.sc.lbConfig.cfg
|
balCfg = cc.sc.lbConfig.cfg
|
||||||
}
|
}
|
||||||
|
|
||||||
cbn := cc.curBalancerName
|
|
||||||
bw := cc.balancerWrapper
|
bw := cc.balancerWrapper
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
if cbn != grpclbName {
|
|
||||||
// Filter any grpclb addresses since we don't have the grpclb balancer.
|
|
||||||
for i := 0; i < len(s.Addresses); {
|
|
||||||
if s.Addresses[i].Type == resolver.GRPCLB {
|
|
||||||
copy(s.Addresses[i:], s.Addresses[i+1:])
|
|
||||||
s.Addresses = s.Addresses[:len(s.Addresses)-1]
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
|
uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
|
||||||
if ret == nil {
|
if ret == nil {
|
||||||
ret = uccsErr // prefer ErrBadResolver state since any other error is
|
ret = uccsErr // prefer ErrBadResolver state since any other error is
|
||||||
|
@ -696,56 +668,28 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
// switchBalancer starts the switching from current balancer to the balancer
|
// applyFailingLB is akin to configuring an LB policy on the channel which
|
||||||
// with the given name.
|
// always fails RPCs. Here, an actual LB policy is not configured, but an always
|
||||||
//
|
// erroring picker is configured, which returns errors with information about
|
||||||
// It will NOT send the current address list to the new balancer. If needed,
|
// what was invalid in the received service config. A config selector with no
|
||||||
// caller of this function should send address list to the new balancer after
|
// service config is configured, and the connectivity state of the channel is
|
||||||
// this function returns.
|
// set to TransientFailure.
|
||||||
//
|
//
|
||||||
// Caller must hold cc.mu.
|
// Caller must hold cc.mu.
|
||||||
func (cc *ClientConn) switchBalancer(name string) {
|
func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) {
|
||||||
if strings.EqualFold(cc.curBalancerName, name) {
|
var err error
|
||||||
return
|
if sc.Err != nil {
|
||||||
}
|
err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err)
|
||||||
|
|
||||||
channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name)
|
|
||||||
if cc.dopts.balancerBuilder != nil {
|
|
||||||
channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if cc.balancerWrapper != nil {
|
|
||||||
// Don't hold cc.mu while closing the balancers. The balancers may call
|
|
||||||
// methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex
|
|
||||||
// would cause a deadlock in that case.
|
|
||||||
cc.mu.Unlock()
|
|
||||||
cc.balancerWrapper.close()
|
|
||||||
cc.mu.Lock()
|
|
||||||
}
|
|
||||||
|
|
||||||
builder := balancer.Get(name)
|
|
||||||
if builder == nil {
|
|
||||||
channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName)
|
|
||||||
channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name)
|
|
||||||
builder = newPickfirstBuilder()
|
|
||||||
} else {
|
} else {
|
||||||
channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name)
|
err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config)
|
||||||
}
|
}
|
||||||
|
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
|
||||||
cc.curBalancerName = builder.Name()
|
cc.blockingpicker.updatePicker(base.NewErrPicker(err))
|
||||||
cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
|
cc.csMgr.updateState(connectivity.TransientFailure)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
|
func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
|
||||||
cc.mu.Lock()
|
cc.balancerWrapper.updateSubConnState(sc, s, err)
|
||||||
if cc.conns == nil {
|
|
||||||
cc.mu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// TODO(bar switching) send updates to all balancer wrappers when balancer
|
|
||||||
// gracefully switching is supported.
|
|
||||||
cc.balancerWrapper.handleSubConnStateChange(sc, s, err)
|
|
||||||
cc.mu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
|
// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
|
||||||
|
@ -768,17 +712,21 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
return nil, ErrClientConnClosing
|
return nil, ErrClientConnClosing
|
||||||
}
|
}
|
||||||
if channelz.IsOn() {
|
|
||||||
ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "")
|
var err error
|
||||||
|
ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
|
channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
|
||||||
Desc: "Subchannel Created",
|
Desc: "Subchannel created",
|
||||||
Severity: channelz.CtInfo,
|
Severity: channelz.CtInfo,
|
||||||
Parent: &channelz.TraceEventDesc{
|
Parent: &channelz.TraceEventDesc{
|
||||||
Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID),
|
Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()),
|
||||||
Severity: channelz.CtInfo,
|
Severity: channelz.CtInfo,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
|
||||||
cc.conns[ac] = struct{}{}
|
cc.conns[ac] = struct{}{}
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
return ac, nil
|
return ac, nil
|
||||||
|
@ -853,16 +801,31 @@ func (ac *addrConn) connect() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func equalAddresses(a, b []resolver.Address) bool {
|
||||||
|
if len(a) != len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, v := range a {
|
||||||
|
if !v.Equal(b[i]) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// tryUpdateAddrs tries to update ac.addrs with the new addresses list.
|
// tryUpdateAddrs tries to update ac.addrs with the new addresses list.
|
||||||
//
|
//
|
||||||
// If ac is Connecting, it returns false. The caller should tear down the ac and
|
|
||||||
// create a new one. Note that the backoff will be reset when this happens.
|
|
||||||
//
|
|
||||||
// If ac is TransientFailure, it updates ac.addrs and returns true. The updated
|
// If ac is TransientFailure, it updates ac.addrs and returns true. The updated
|
||||||
// addresses will be picked up by retry in the next iteration after backoff.
|
// addresses will be picked up by retry in the next iteration after backoff.
|
||||||
//
|
//
|
||||||
// If ac is Shutdown or Idle, it updates ac.addrs and returns true.
|
// If ac is Shutdown or Idle, it updates ac.addrs and returns true.
|
||||||
//
|
//
|
||||||
|
// If the addresses is the same as the old list, it does nothing and returns
|
||||||
|
// true.
|
||||||
|
//
|
||||||
|
// If ac is Connecting, it returns false. The caller should tear down the ac and
|
||||||
|
// create a new one. Note that the backoff will be reset when this happens.
|
||||||
|
//
|
||||||
// If ac is Ready, it checks whether current connected address of ac is in the
|
// If ac is Ready, it checks whether current connected address of ac is in the
|
||||||
// new addrs list.
|
// new addrs list.
|
||||||
// - If true, it updates ac.addrs and returns true. The ac will keep using
|
// - If true, it updates ac.addrs and returns true. The ac will keep using
|
||||||
|
@ -879,6 +842,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if equalAddresses(ac.addrs, addrs) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
if ac.state == connectivity.Connecting {
|
if ac.state == connectivity.Connecting {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -959,14 +926,10 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||||||
t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
|
return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
|
||||||
Ctx: ctx,
|
Ctx: ctx,
|
||||||
FullMethodName: method,
|
FullMethodName: method,
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return nil, nil, toRPCErr(err)
|
|
||||||
}
|
|
||||||
return t, done, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) {
|
func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) {
|
||||||
|
@ -991,9 +954,6 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel
|
||||||
cc.retryThrottler.Store((*retryThrottler)(nil))
|
cc.retryThrottler.Store((*retryThrottler)(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
if cc.dopts.balancerBuilder == nil {
|
|
||||||
// Only look at balancer types and switch balancer if balancer dial
|
|
||||||
// option is not set.
|
|
||||||
var newBalancerName string
|
var newBalancerName string
|
||||||
if cc.sc != nil && cc.sc.lbConfig != nil {
|
if cc.sc != nil && cc.sc.lbConfig != nil {
|
||||||
newBalancerName = cc.sc.lbConfig.name
|
newBalancerName = cc.sc.lbConfig.name
|
||||||
|
@ -1013,13 +973,7 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel
|
||||||
newBalancerName = PickFirstBalancerName
|
newBalancerName = PickFirstBalancerName
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cc.switchBalancer(newBalancerName)
|
cc.balancerWrapper.switchTo(newBalancerName)
|
||||||
} else if cc.balancerWrapper == nil {
|
|
||||||
// Balancer dial option was set, and this is the first time handling
|
|
||||||
// resolved addresses. Build a balancer with dopts.balancerBuilder.
|
|
||||||
cc.curBalancerName = cc.dopts.balancerBuilder.Name()
|
|
||||||
cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
|
func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
|
||||||
|
@ -1070,11 +1024,11 @@ func (cc *ClientConn) Close() error {
|
||||||
rWrapper := cc.resolverWrapper
|
rWrapper := cc.resolverWrapper
|
||||||
cc.resolverWrapper = nil
|
cc.resolverWrapper = nil
|
||||||
bWrapper := cc.balancerWrapper
|
bWrapper := cc.balancerWrapper
|
||||||
cc.balancerWrapper = nil
|
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
|
|
||||||
|
// The order of closing matters here since the balancer wrapper assumes the
|
||||||
|
// picker is closed before it is closed.
|
||||||
cc.blockingpicker.close()
|
cc.blockingpicker.close()
|
||||||
|
|
||||||
if bWrapper != nil {
|
if bWrapper != nil {
|
||||||
bWrapper.close()
|
bWrapper.close()
|
||||||
}
|
}
|
||||||
|
@ -1085,22 +1039,22 @@ func (cc *ClientConn) Close() error {
|
||||||
for ac := range conns {
|
for ac := range conns {
|
||||||
ac.tearDown(ErrClientConnClosing)
|
ac.tearDown(ErrClientConnClosing)
|
||||||
}
|
}
|
||||||
if channelz.IsOn() {
|
|
||||||
ted := &channelz.TraceEventDesc{
|
ted := &channelz.TraceEventDesc{
|
||||||
Desc: "Channel Deleted",
|
Desc: "Channel deleted",
|
||||||
Severity: channelz.CtInfo,
|
Severity: channelz.CtInfo,
|
||||||
}
|
}
|
||||||
if cc.dopts.channelzParentID != 0 {
|
if cc.dopts.channelzParentID != nil {
|
||||||
ted.Parent = &channelz.TraceEventDesc{
|
ted.Parent = &channelz.TraceEventDesc{
|
||||||
Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID),
|
Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()),
|
||||||
Severity: channelz.CtInfo,
|
Severity: channelz.CtInfo,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
|
channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
|
||||||
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
|
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
|
||||||
// the entity being deleted, and thus prevent it from being deleted right away.
|
// trace reference to the entity being deleted, and thus prevent it from being
|
||||||
|
// deleted right away.
|
||||||
channelz.RemoveEntry(cc.channelzID)
|
channelz.RemoveEntry(cc.channelzID)
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1130,7 +1084,7 @@ type addrConn struct {
|
||||||
backoffIdx int // Needs to be stateful for resetConnectBackoff.
|
backoffIdx int // Needs to be stateful for resetConnectBackoff.
|
||||||
resetBackoff chan struct{}
|
resetBackoff chan struct{}
|
||||||
|
|
||||||
channelzID int64 // channelz unique identification number.
|
channelzID *channelz.Identifier
|
||||||
czData *channelzData
|
czData *channelzData
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1284,6 +1238,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
||||||
ac.mu.Lock()
|
ac.mu.Lock()
|
||||||
defer ac.mu.Unlock()
|
defer ac.mu.Unlock()
|
||||||
defer connClosed.Fire()
|
defer connClosed.Fire()
|
||||||
|
defer hcancel()
|
||||||
if !hcStarted || hctx.Err() != nil {
|
if !hcStarted || hctx.Err() != nil {
|
||||||
// We didn't start the health check or set the state to READY, so
|
// We didn't start the health check or set the state to READY, so
|
||||||
// no need to do anything else here.
|
// no need to do anything else here.
|
||||||
|
@ -1294,7 +1249,6 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
||||||
// state, since there may be a new transport in this addrConn.
|
// state, since there may be a new transport in this addrConn.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
hcancel()
|
|
||||||
ac.transport = nil
|
ac.transport = nil
|
||||||
// Refresh the name resolver
|
// Refresh the name resolver
|
||||||
ac.cc.resolveNow(resolver.ResolveNowOptions{})
|
ac.cc.resolveNow(resolver.ResolveNowOptions{})
|
||||||
|
@ -1312,14 +1266,13 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
||||||
|
|
||||||
connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
|
connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if channelz.IsOn() {
|
|
||||||
copts.ChannelzParentID = ac.channelzID
|
copts.ChannelzParentID = ac.channelzID
|
||||||
}
|
|
||||||
|
|
||||||
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose)
|
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// newTr is either nil, or closed.
|
// newTr is either nil, or closed.
|
||||||
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v", addr, err)
|
hcancel()
|
||||||
|
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1332,7 +1285,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
||||||
newTr.Close(transport.ErrConnClosing)
|
newTr.Close(transport.ErrConnClosing)
|
||||||
if connectCtx.Err() == context.DeadlineExceeded {
|
if connectCtx.Err() == context.DeadlineExceeded {
|
||||||
err := errors.New("failed to receive server preface within timeout")
|
err := errors.New("failed to receive server preface within timeout")
|
||||||
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err)
|
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -1497,19 +1450,18 @@ func (ac *addrConn) tearDown(err error) {
|
||||||
curTr.GracefulClose()
|
curTr.GracefulClose()
|
||||||
ac.mu.Lock()
|
ac.mu.Lock()
|
||||||
}
|
}
|
||||||
if channelz.IsOn() {
|
|
||||||
channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
|
channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
|
||||||
Desc: "Subchannel Deleted",
|
Desc: "Subchannel deleted",
|
||||||
Severity: channelz.CtInfo,
|
Severity: channelz.CtInfo,
|
||||||
Parent: &channelz.TraceEventDesc{
|
Parent: &channelz.TraceEventDesc{
|
||||||
Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID),
|
Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()),
|
||||||
Severity: channelz.CtInfo,
|
Severity: channelz.CtInfo,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
|
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
|
||||||
// the entity being deleted, and thus prevent it from being deleted right away.
|
// trace reference to the entity being deleted, and thus prevent it from
|
||||||
|
// being deleted right away.
|
||||||
channelz.RemoveEntry(ac.channelzID)
|
channelz.RemoveEntry(ac.channelzID)
|
||||||
}
|
|
||||||
ac.mu.Unlock()
|
ac.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,12 +20,11 @@ package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"google.golang.org/grpc/backoff"
|
"google.golang.org/grpc/backoff"
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/channelz"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/credentials/insecure"
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
"google.golang.org/grpc/internal"
|
"google.golang.org/grpc/internal"
|
||||||
|
@ -55,9 +54,7 @@ type dialOptions struct {
|
||||||
authority string
|
authority string
|
||||||
copts transport.ConnectOptions
|
copts transport.ConnectOptions
|
||||||
callOptions []CallOption
|
callOptions []CallOption
|
||||||
// This is used by WithBalancerName dial option.
|
channelzParentID *channelz.Identifier
|
||||||
balancerBuilder balancer.Builder
|
|
||||||
channelzParentID int64
|
|
||||||
disableServiceConfig bool
|
disableServiceConfig bool
|
||||||
disableRetry bool
|
disableRetry bool
|
||||||
disableHealthCheck bool
|
disableHealthCheck bool
|
||||||
|
@ -195,25 +192,6 @@ func WithDecompressor(dc Decompressor) DialOption {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithBalancerName sets the balancer that the ClientConn will be initialized
|
|
||||||
// with. Balancer registered with balancerName will be used. This function
|
|
||||||
// panics if no balancer was registered by balancerName.
|
|
||||||
//
|
|
||||||
// The balancer cannot be overridden by balancer option specified by service
|
|
||||||
// config.
|
|
||||||
//
|
|
||||||
// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig
|
|
||||||
// instead. Will be removed in a future 1.x release.
|
|
||||||
func WithBalancerName(balancerName string) DialOption {
|
|
||||||
builder := balancer.Get(balancerName)
|
|
||||||
if builder == nil {
|
|
||||||
panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName))
|
|
||||||
}
|
|
||||||
return newFuncDialOption(func(o *dialOptions) {
|
|
||||||
o.balancerBuilder = builder
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithServiceConfig returns a DialOption which has a channel to read the
|
// WithServiceConfig returns a DialOption which has a channel to read the
|
||||||
// service configuration.
|
// service configuration.
|
||||||
//
|
//
|
||||||
|
@ -304,8 +282,8 @@ func WithReturnConnectionError() DialOption {
|
||||||
// WithCredentialsBundle or WithPerRPCCredentials) which require transport
|
// WithCredentialsBundle or WithPerRPCCredentials) which require transport
|
||||||
// security is incompatible and will cause grpc.Dial() to fail.
|
// security is incompatible and will cause grpc.Dial() to fail.
|
||||||
//
|
//
|
||||||
// Deprecated: use WithTransportCredentials and insecure.NewCredentials() instead.
|
// Deprecated: use WithTransportCredentials and insecure.NewCredentials()
|
||||||
// Will be supported throughout 1.x.
|
// instead. Will be supported throughout 1.x.
|
||||||
func WithInsecure() DialOption {
|
func WithInsecure() DialOption {
|
||||||
return newFuncDialOption(func(o *dialOptions) {
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
o.copts.TransportCredentials = insecure.NewCredentials()
|
o.copts.TransportCredentials = insecure.NewCredentials()
|
||||||
|
@ -498,7 +476,7 @@ func WithAuthority(a string) DialOption {
|
||||||
//
|
//
|
||||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||||
// later release.
|
// later release.
|
||||||
func WithChannelzParentID(id int64) DialOption {
|
func WithChannelzParentID(id *channelz.Identifier) DialOption {
|
||||||
return newFuncDialOption(func(o *dialOptions) {
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
o.channelzParentID = id
|
o.channelzParentID = id
|
||||||
})
|
})
|
||||||
|
|
|
@ -108,7 +108,7 @@ var registeredCodecs = make(map[string]Codec)
|
||||||
// more details.
|
// more details.
|
||||||
//
|
//
|
||||||
// NOTE: this function must only be called during initialization time (i.e. in
|
// NOTE: this function must only be called during initialization time (i.e. in
|
||||||
// an init() function), and is not thread-safe. If multiple Compressors are
|
// an init() function), and is not thread-safe. If multiple Codecs are
|
||||||
// registered with the same name, the one registered last will take effect.
|
// registered with the same name, the one registered last will take effect.
|
||||||
func RegisterCodec(codec Codec) {
|
func RegisterCodec(codec Codec) {
|
||||||
if codec == nil {
|
if codec == nil {
|
||||||
|
|
382
vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
generated
vendored
Normal file
382
vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
generated
vendored
Normal file
|
@ -0,0 +1,382 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2022 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package gracefulswitch implements a graceful switch load balancer.
|
||||||
|
package gracefulswitch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/balancer"
|
||||||
|
"google.golang.org/grpc/balancer/base"
|
||||||
|
"google.golang.org/grpc/connectivity"
|
||||||
|
"google.golang.org/grpc/resolver"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed")
|
||||||
|
var _ balancer.Balancer = (*Balancer)(nil)
|
||||||
|
|
||||||
|
// NewBalancer returns a graceful switch Balancer.
|
||||||
|
func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer {
|
||||||
|
return &Balancer{
|
||||||
|
cc: cc,
|
||||||
|
bOpts: opts,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Balancer is a utility to gracefully switch from one balancer to
|
||||||
|
// a new balancer. It implements the balancer.Balancer interface.
|
||||||
|
type Balancer struct {
|
||||||
|
bOpts balancer.BuildOptions
|
||||||
|
cc balancer.ClientConn
|
||||||
|
|
||||||
|
// mu protects the following fields and all fields within balancerCurrent
|
||||||
|
// and balancerPending. mu does not need to be held when calling into the
|
||||||
|
// child balancers, as all calls into these children happen only as a direct
|
||||||
|
// result of a call into the gracefulSwitchBalancer, which are also
|
||||||
|
// guaranteed to be synchronous. There is one exception: an UpdateState call
|
||||||
|
// from a child balancer when current and pending are populated can lead to
|
||||||
|
// calling Close() on the current. To prevent that racing with an
|
||||||
|
// UpdateSubConnState from the channel, we hold currentMu during Close and
|
||||||
|
// UpdateSubConnState calls.
|
||||||
|
mu sync.Mutex
|
||||||
|
balancerCurrent *balancerWrapper
|
||||||
|
balancerPending *balancerWrapper
|
||||||
|
closed bool // set to true when this balancer is closed
|
||||||
|
|
||||||
|
// currentMu must be locked before mu. This mutex guards against this
|
||||||
|
// sequence of events: UpdateSubConnState() called, finds the
|
||||||
|
// balancerCurrent, gives up lock, updateState comes in, causes Close() on
|
||||||
|
// balancerCurrent before the UpdateSubConnState is called on the
|
||||||
|
// balancerCurrent.
|
||||||
|
currentMu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// swap swaps out the current lb with the pending lb and updates the ClientConn.
|
||||||
|
// The caller must hold gsb.mu.
|
||||||
|
func (gsb *Balancer) swap() {
|
||||||
|
gsb.cc.UpdateState(gsb.balancerPending.lastState)
|
||||||
|
cur := gsb.balancerCurrent
|
||||||
|
gsb.balancerCurrent = gsb.balancerPending
|
||||||
|
gsb.balancerPending = nil
|
||||||
|
go func() {
|
||||||
|
gsb.currentMu.Lock()
|
||||||
|
defer gsb.currentMu.Unlock()
|
||||||
|
cur.Close()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function that checks if the balancer passed in is current or pending.
|
||||||
|
// The caller must hold gsb.mu.
|
||||||
|
func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool {
|
||||||
|
return bw == gsb.balancerCurrent || bw == gsb.balancerPending
|
||||||
|
}
|
||||||
|
|
||||||
|
// SwitchTo initializes the graceful switch process, which completes based on
|
||||||
|
// connectivity state changes on the current/pending balancer. Thus, the switch
|
||||||
|
// process is not complete when this method returns. This method must be called
|
||||||
|
// synchronously alongside the rest of the balancer.Balancer methods this
|
||||||
|
// Graceful Switch Balancer implements.
|
||||||
|
func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
|
||||||
|
gsb.mu.Lock()
|
||||||
|
if gsb.closed {
|
||||||
|
gsb.mu.Unlock()
|
||||||
|
return errBalancerClosed
|
||||||
|
}
|
||||||
|
bw := &balancerWrapper{
|
||||||
|
gsb: gsb,
|
||||||
|
lastState: balancer.State{
|
||||||
|
ConnectivityState: connectivity.Connecting,
|
||||||
|
Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable),
|
||||||
|
},
|
||||||
|
subconns: make(map[balancer.SubConn]bool),
|
||||||
|
}
|
||||||
|
balToClose := gsb.balancerPending // nil if there is no pending balancer
|
||||||
|
if gsb.balancerCurrent == nil {
|
||||||
|
gsb.balancerCurrent = bw
|
||||||
|
} else {
|
||||||
|
gsb.balancerPending = bw
|
||||||
|
}
|
||||||
|
gsb.mu.Unlock()
|
||||||
|
balToClose.Close()
|
||||||
|
// This function takes a builder instead of a balancer because builder.Build
|
||||||
|
// can call back inline, and this utility needs to handle the callbacks.
|
||||||
|
newBalancer := builder.Build(bw, gsb.bOpts)
|
||||||
|
if newBalancer == nil {
|
||||||
|
// This is illegal and should never happen; we clear the balancerWrapper
|
||||||
|
// we were constructing if it happens to avoid a potential panic.
|
||||||
|
gsb.mu.Lock()
|
||||||
|
if gsb.balancerPending != nil {
|
||||||
|
gsb.balancerPending = nil
|
||||||
|
} else {
|
||||||
|
gsb.balancerCurrent = nil
|
||||||
|
}
|
||||||
|
gsb.mu.Unlock()
|
||||||
|
return balancer.ErrBadResolverState
|
||||||
|
}
|
||||||
|
|
||||||
|
// This write doesn't need to take gsb.mu because this field never gets read
|
||||||
|
// or written to on any calls from the current or pending. Calls from grpc
|
||||||
|
// to this balancer are guaranteed to be called synchronously, so this
|
||||||
|
// bw.Balancer field will never be forwarded to until this SwitchTo()
|
||||||
|
// function returns.
|
||||||
|
bw.Balancer = newBalancer
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns nil if the graceful switch balancer is closed.
|
||||||
|
func (gsb *Balancer) latestBalancer() *balancerWrapper {
|
||||||
|
gsb.mu.Lock()
|
||||||
|
defer gsb.mu.Unlock()
|
||||||
|
if gsb.balancerPending != nil {
|
||||||
|
return gsb.balancerPending
|
||||||
|
}
|
||||||
|
return gsb.balancerCurrent
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateClientConnState forwards the update to the latest balancer created.
|
||||||
|
func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error {
|
||||||
|
// The resolver data is only relevant to the most recent LB Policy.
|
||||||
|
balToUpdate := gsb.latestBalancer()
|
||||||
|
if balToUpdate == nil {
|
||||||
|
return errBalancerClosed
|
||||||
|
}
|
||||||
|
// Perform this call without gsb.mu to prevent deadlocks if the child calls
|
||||||
|
// back into the channel. The latest balancer can never be closed during a
|
||||||
|
// call from the channel, even without gsb.mu held.
|
||||||
|
return balToUpdate.UpdateClientConnState(state)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolverError forwards the error to the latest balancer created.
|
||||||
|
func (gsb *Balancer) ResolverError(err error) {
|
||||||
|
// The resolver data is only relevant to the most recent LB Policy.
|
||||||
|
balToUpdate := gsb.latestBalancer()
|
||||||
|
if balToUpdate == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Perform this call without gsb.mu to prevent deadlocks if the child calls
|
||||||
|
// back into the channel. The latest balancer can never be closed during a
|
||||||
|
// call from the channel, even without gsb.mu held.
|
||||||
|
balToUpdate.ResolverError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExitIdle forwards the call to the latest balancer created.
|
||||||
|
//
|
||||||
|
// If the latest balancer does not support ExitIdle, the subConns are
|
||||||
|
// re-connected to manually.
|
||||||
|
func (gsb *Balancer) ExitIdle() {
|
||||||
|
balToUpdate := gsb.latestBalancer()
|
||||||
|
if balToUpdate == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// There is no need to protect this read with a mutex, as the write to the
|
||||||
|
// Balancer field happens in SwitchTo, which completes before this can be
|
||||||
|
// called.
|
||||||
|
if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok {
|
||||||
|
ei.ExitIdle()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for sc := range balToUpdate.subconns {
|
||||||
|
sc.Connect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSubConnState forwards the update to the appropriate child.
|
||||||
|
func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||||
|
gsb.currentMu.Lock()
|
||||||
|
defer gsb.currentMu.Unlock()
|
||||||
|
gsb.mu.Lock()
|
||||||
|
// Forward update to the appropriate child. Even if there is a pending
|
||||||
|
// balancer, the current balancer should continue to get SubConn updates to
|
||||||
|
// maintain the proper state while the pending is still connecting.
|
||||||
|
var balToUpdate *balancerWrapper
|
||||||
|
if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] {
|
||||||
|
balToUpdate = gsb.balancerCurrent
|
||||||
|
} else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] {
|
||||||
|
balToUpdate = gsb.balancerPending
|
||||||
|
}
|
||||||
|
gsb.mu.Unlock()
|
||||||
|
if balToUpdate == nil {
|
||||||
|
// SubConn belonged to a stale lb policy that has not yet fully closed,
|
||||||
|
// or the balancer was already closed.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
balToUpdate.UpdateSubConnState(sc, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes any active child balancers.
|
||||||
|
func (gsb *Balancer) Close() {
|
||||||
|
gsb.mu.Lock()
|
||||||
|
gsb.closed = true
|
||||||
|
currentBalancerToClose := gsb.balancerCurrent
|
||||||
|
gsb.balancerCurrent = nil
|
||||||
|
pendingBalancerToClose := gsb.balancerPending
|
||||||
|
gsb.balancerPending = nil
|
||||||
|
gsb.mu.Unlock()
|
||||||
|
|
||||||
|
currentBalancerToClose.Close()
|
||||||
|
pendingBalancerToClose.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer
|
||||||
|
// methods to help cleanup SubConns created by the wrapped balancer.
|
||||||
|
//
|
||||||
|
// It implements the balancer.ClientConn interface and is passed down in that
|
||||||
|
// capacity to the wrapped balancer. It maintains a set of subConns created by
|
||||||
|
// the wrapped balancer and calls from the latter to create/update/remove
|
||||||
|
// SubConns update this set before being forwarded to the parent ClientConn.
|
||||||
|
// State updates from the wrapped balancer can result in invocation of the
|
||||||
|
// graceful switch logic.
|
||||||
|
type balancerWrapper struct {
|
||||||
|
balancer.Balancer
|
||||||
|
gsb *Balancer
|
||||||
|
|
||||||
|
lastState balancer.State
|
||||||
|
subconns map[balancer.SubConn]bool // subconns created by this balancer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||||
|
if state.ConnectivityState == connectivity.Shutdown {
|
||||||
|
bw.gsb.mu.Lock()
|
||||||
|
delete(bw.subconns, sc)
|
||||||
|
bw.gsb.mu.Unlock()
|
||||||
|
}
|
||||||
|
// There is no need to protect this read with a mutex, as the write to the
|
||||||
|
// Balancer field happens in SwitchTo, which completes before this can be
|
||||||
|
// called.
|
||||||
|
bw.Balancer.UpdateSubConnState(sc, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the underlying LB policy and removes the subconns it created. bw
|
||||||
|
// must not be referenced via balancerCurrent or balancerPending in gsb when
|
||||||
|
// called. gsb.mu must not be held. Does not panic with a nil receiver.
|
||||||
|
func (bw *balancerWrapper) Close() {
|
||||||
|
// before Close is called.
|
||||||
|
if bw == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// There is no need to protect this read with a mutex, as Close() is
|
||||||
|
// impossible to be called concurrently with the write in SwitchTo(). The
|
||||||
|
// callsites of Close() for this balancer in Graceful Switch Balancer will
|
||||||
|
// never be called until SwitchTo() returns.
|
||||||
|
bw.Balancer.Close()
|
||||||
|
bw.gsb.mu.Lock()
|
||||||
|
for sc := range bw.subconns {
|
||||||
|
bw.gsb.cc.RemoveSubConn(sc)
|
||||||
|
}
|
||||||
|
bw.gsb.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bw *balancerWrapper) UpdateState(state balancer.State) {
|
||||||
|
// Hold the mutex for this entire call to ensure it cannot occur
|
||||||
|
// concurrently with other updateState() calls. This causes updates to
|
||||||
|
// lastState and calls to cc.UpdateState to happen atomically.
|
||||||
|
bw.gsb.mu.Lock()
|
||||||
|
defer bw.gsb.mu.Unlock()
|
||||||
|
bw.lastState = state
|
||||||
|
|
||||||
|
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if bw == bw.gsb.balancerCurrent {
|
||||||
|
// In the case that the current balancer exits READY, and there is a pending
|
||||||
|
// balancer, you can forward the pending balancer's cached State up to
|
||||||
|
// ClientConn and swap the pending into the current. This is because there
|
||||||
|
// is no reason to gracefully switch from and keep using the old policy as
|
||||||
|
// the ClientConn is not connected to any backends.
|
||||||
|
if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil {
|
||||||
|
bw.gsb.swap()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Even if there is a pending balancer waiting to be gracefully switched to,
|
||||||
|
// continue to forward current balancer updates to the Client Conn. Ignoring
|
||||||
|
// state + picker from the current would cause undefined behavior/cause the
|
||||||
|
// system to behave incorrectly from the current LB policies perspective.
|
||||||
|
// Also, the current LB is still being used by grpc to choose SubConns per
|
||||||
|
// RPC, and thus should use the most updated form of the current balancer.
|
||||||
|
bw.gsb.cc.UpdateState(state)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// This method is now dealing with a state update from the pending balancer.
|
||||||
|
// If the current balancer is currently in a state other than READY, the new
|
||||||
|
// policy can be swapped into place immediately. This is because there is no
|
||||||
|
// reason to gracefully switch from and keep using the old policy as the
|
||||||
|
// ClientConn is not connected to any backends.
|
||||||
|
if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready {
|
||||||
|
bw.gsb.swap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||||
|
bw.gsb.mu.Lock()
|
||||||
|
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||||
|
bw.gsb.mu.Unlock()
|
||||||
|
return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
|
||||||
|
}
|
||||||
|
bw.gsb.mu.Unlock()
|
||||||
|
|
||||||
|
sc, err := bw.gsb.cc.NewSubConn(addrs, opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
bw.gsb.mu.Lock()
|
||||||
|
if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call
|
||||||
|
bw.gsb.cc.RemoveSubConn(sc)
|
||||||
|
bw.gsb.mu.Unlock()
|
||||||
|
return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
|
||||||
|
}
|
||||||
|
bw.subconns[sc] = true
|
||||||
|
bw.gsb.mu.Unlock()
|
||||||
|
return sc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) {
|
||||||
|
// Ignore ResolveNow requests from anything other than the most recent
|
||||||
|
// balancer, because older balancers were already removed from the config.
|
||||||
|
if bw != bw.gsb.latestBalancer() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bw.gsb.cc.ResolveNow(opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||||
|
bw.gsb.mu.Lock()
|
||||||
|
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||||
|
bw.gsb.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bw.gsb.mu.Unlock()
|
||||||
|
bw.gsb.cc.RemoveSubConn(sc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
||||||
|
bw.gsb.mu.Lock()
|
||||||
|
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||||
|
bw.gsb.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bw.gsb.mu.Unlock()
|
||||||
|
bw.gsb.cc.UpdateAddresses(sc, addrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bw *balancerWrapper) Target() string {
|
||||||
|
return bw.gsb.cc.Target()
|
||||||
|
}
|
|
@ -31,7 +31,7 @@ import (
|
||||||
// Logger is the global binary logger. It can be used to get binary logger for
|
// Logger is the global binary logger. It can be used to get binary logger for
|
||||||
// each method.
|
// each method.
|
||||||
type Logger interface {
|
type Logger interface {
|
||||||
getMethodLogger(methodName string) *MethodLogger
|
GetMethodLogger(methodName string) MethodLogger
|
||||||
}
|
}
|
||||||
|
|
||||||
// binLogger is the global binary logger for the binary. One of this should be
|
// binLogger is the global binary logger for the binary. One of this should be
|
||||||
|
@ -49,17 +49,24 @@ func SetLogger(l Logger) {
|
||||||
binLogger = l
|
binLogger = l
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetLogger gets the binarg logger.
|
||||||
|
//
|
||||||
|
// Only call this at init time.
|
||||||
|
func GetLogger() Logger {
|
||||||
|
return binLogger
|
||||||
|
}
|
||||||
|
|
||||||
// GetMethodLogger returns the methodLogger for the given methodName.
|
// GetMethodLogger returns the methodLogger for the given methodName.
|
||||||
//
|
//
|
||||||
// methodName should be in the format of "/service/method".
|
// methodName should be in the format of "/service/method".
|
||||||
//
|
//
|
||||||
// Each methodLogger returned by this method is a new instance. This is to
|
// Each methodLogger returned by this method is a new instance. This is to
|
||||||
// generate sequence id within the call.
|
// generate sequence id within the call.
|
||||||
func GetMethodLogger(methodName string) *MethodLogger {
|
func GetMethodLogger(methodName string) MethodLogger {
|
||||||
if binLogger == nil {
|
if binLogger == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return binLogger.getMethodLogger(methodName)
|
return binLogger.GetMethodLogger(methodName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -68,17 +75,29 @@ func init() {
|
||||||
binLogger = NewLoggerFromConfigString(configStr)
|
binLogger = NewLoggerFromConfigString(configStr)
|
||||||
}
|
}
|
||||||
|
|
||||||
type methodLoggerConfig struct {
|
// MethodLoggerConfig contains the setting for logging behavior of a method
|
||||||
|
// logger. Currently, it contains the max length of header and message.
|
||||||
|
type MethodLoggerConfig struct {
|
||||||
// Max length of header and message.
|
// Max length of header and message.
|
||||||
hdr, msg uint64
|
Header, Message uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoggerConfig contains the config for loggers to create method loggers.
|
||||||
|
type LoggerConfig struct {
|
||||||
|
All *MethodLoggerConfig
|
||||||
|
Services map[string]*MethodLoggerConfig
|
||||||
|
Methods map[string]*MethodLoggerConfig
|
||||||
|
|
||||||
|
Blacklist map[string]struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
type logger struct {
|
type logger struct {
|
||||||
all *methodLoggerConfig
|
config LoggerConfig
|
||||||
services map[string]*methodLoggerConfig
|
}
|
||||||
methods map[string]*methodLoggerConfig
|
|
||||||
|
|
||||||
blacklist map[string]struct{}
|
// NewLoggerFromConfig builds a logger with the given LoggerConfig.
|
||||||
|
func NewLoggerFromConfig(config LoggerConfig) Logger {
|
||||||
|
return &logger{config: config}
|
||||||
}
|
}
|
||||||
|
|
||||||
// newEmptyLogger creates an empty logger. The map fields need to be filled in
|
// newEmptyLogger creates an empty logger. The map fields need to be filled in
|
||||||
|
@ -88,57 +107,57 @@ func newEmptyLogger() *logger {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set method logger for "*".
|
// Set method logger for "*".
|
||||||
func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error {
|
func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error {
|
||||||
if l.all != nil {
|
if l.config.All != nil {
|
||||||
return fmt.Errorf("conflicting global rules found")
|
return fmt.Errorf("conflicting global rules found")
|
||||||
}
|
}
|
||||||
l.all = ml
|
l.config.All = ml
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set method logger for "service/*".
|
// Set method logger for "service/*".
|
||||||
//
|
//
|
||||||
// New methodLogger with same service overrides the old one.
|
// New methodLogger with same service overrides the old one.
|
||||||
func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
|
func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error {
|
||||||
if _, ok := l.services[service]; ok {
|
if _, ok := l.config.Services[service]; ok {
|
||||||
return fmt.Errorf("conflicting service rules for service %v found", service)
|
return fmt.Errorf("conflicting service rules for service %v found", service)
|
||||||
}
|
}
|
||||||
if l.services == nil {
|
if l.config.Services == nil {
|
||||||
l.services = make(map[string]*methodLoggerConfig)
|
l.config.Services = make(map[string]*MethodLoggerConfig)
|
||||||
}
|
}
|
||||||
l.services[service] = ml
|
l.config.Services[service] = ml
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set method logger for "service/method".
|
// Set method logger for "service/method".
|
||||||
//
|
//
|
||||||
// New methodLogger with same method overrides the old one.
|
// New methodLogger with same method overrides the old one.
|
||||||
func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
|
func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error {
|
||||||
if _, ok := l.blacklist[method]; ok {
|
if _, ok := l.config.Blacklist[method]; ok {
|
||||||
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
||||||
}
|
}
|
||||||
if _, ok := l.methods[method]; ok {
|
if _, ok := l.config.Methods[method]; ok {
|
||||||
return fmt.Errorf("conflicting method rules for method %v found", method)
|
return fmt.Errorf("conflicting method rules for method %v found", method)
|
||||||
}
|
}
|
||||||
if l.methods == nil {
|
if l.config.Methods == nil {
|
||||||
l.methods = make(map[string]*methodLoggerConfig)
|
l.config.Methods = make(map[string]*MethodLoggerConfig)
|
||||||
}
|
}
|
||||||
l.methods[method] = ml
|
l.config.Methods[method] = ml
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set blacklist method for "-service/method".
|
// Set blacklist method for "-service/method".
|
||||||
func (l *logger) setBlacklist(method string) error {
|
func (l *logger) setBlacklist(method string) error {
|
||||||
if _, ok := l.blacklist[method]; ok {
|
if _, ok := l.config.Blacklist[method]; ok {
|
||||||
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
||||||
}
|
}
|
||||||
if _, ok := l.methods[method]; ok {
|
if _, ok := l.config.Methods[method]; ok {
|
||||||
return fmt.Errorf("conflicting method rules for method %v found", method)
|
return fmt.Errorf("conflicting method rules for method %v found", method)
|
||||||
}
|
}
|
||||||
if l.blacklist == nil {
|
if l.config.Blacklist == nil {
|
||||||
l.blacklist = make(map[string]struct{})
|
l.config.Blacklist = make(map[string]struct{})
|
||||||
}
|
}
|
||||||
l.blacklist[method] = struct{}{}
|
l.config.Blacklist[method] = struct{}{}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -148,23 +167,23 @@ func (l *logger) setBlacklist(method string) error {
|
||||||
//
|
//
|
||||||
// Each methodLogger returned by this method is a new instance. This is to
|
// Each methodLogger returned by this method is a new instance. This is to
|
||||||
// generate sequence id within the call.
|
// generate sequence id within the call.
|
||||||
func (l *logger) getMethodLogger(methodName string) *MethodLogger {
|
func (l *logger) GetMethodLogger(methodName string) MethodLogger {
|
||||||
s, m, err := grpcutil.ParseMethod(methodName)
|
s, m, err := grpcutil.ParseMethod(methodName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
|
grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if ml, ok := l.methods[s+"/"+m]; ok {
|
if ml, ok := l.config.Methods[s+"/"+m]; ok {
|
||||||
return newMethodLogger(ml.hdr, ml.msg)
|
return newMethodLogger(ml.Header, ml.Message)
|
||||||
}
|
}
|
||||||
if _, ok := l.blacklist[s+"/"+m]; ok {
|
if _, ok := l.config.Blacklist[s+"/"+m]; ok {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if ml, ok := l.services[s]; ok {
|
if ml, ok := l.config.Services[s]; ok {
|
||||||
return newMethodLogger(ml.hdr, ml.msg)
|
return newMethodLogger(ml.Header, ml.Message)
|
||||||
}
|
}
|
||||||
if l.all == nil {
|
if l.config.All == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return newMethodLogger(l.all.hdr, l.all.msg)
|
return newMethodLogger(l.config.All.Header, l.config.All.Message)
|
||||||
}
|
}
|
||||||
|
|
|
@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid config: %q, %v", config, err)
|
return fmt.Errorf("invalid config: %q, %v", config, err)
|
||||||
}
|
}
|
||||||
if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
|
||||||
return fmt.Errorf("invalid config: %v", err)
|
return fmt.Errorf("invalid config: %v", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error {
|
||||||
return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
|
return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
|
||||||
}
|
}
|
||||||
if m == "*" {
|
if m == "*" {
|
||||||
if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
|
||||||
return fmt.Errorf("invalid config: %v", err)
|
return fmt.Errorf("invalid config: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
|
||||||
return fmt.Errorf("invalid config: %v", err)
|
return fmt.Errorf("invalid config: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,7 +48,11 @@ func (g *callIDGenerator) reset() {
|
||||||
var idGen callIDGenerator
|
var idGen callIDGenerator
|
||||||
|
|
||||||
// MethodLogger is the sub-logger for each method.
|
// MethodLogger is the sub-logger for each method.
|
||||||
type MethodLogger struct {
|
type MethodLogger interface {
|
||||||
|
Log(LogEntryConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
type methodLogger struct {
|
||||||
headerMaxLen, messageMaxLen uint64
|
headerMaxLen, messageMaxLen uint64
|
||||||
|
|
||||||
callID uint64
|
callID uint64
|
||||||
|
@ -57,8 +61,8 @@ type MethodLogger struct {
|
||||||
sink Sink // TODO(blog): make this plugable.
|
sink Sink // TODO(blog): make this plugable.
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMethodLogger(h, m uint64) *MethodLogger {
|
func newMethodLogger(h, m uint64) *methodLogger {
|
||||||
return &MethodLogger{
|
return &methodLogger{
|
||||||
headerMaxLen: h,
|
headerMaxLen: h,
|
||||||
messageMaxLen: m,
|
messageMaxLen: m,
|
||||||
|
|
||||||
|
@ -69,8 +73,10 @@ func newMethodLogger(h, m uint64) *MethodLogger {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Log creates a proto binary log entry, and logs it to the sink.
|
// Build is an internal only method for building the proto message out of the
|
||||||
func (ml *MethodLogger) Log(c LogEntryConfig) {
|
// input event. It's made public to enable other library to reuse as much logic
|
||||||
|
// in methodLogger as possible.
|
||||||
|
func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
|
||||||
m := c.toProto()
|
m := c.toProto()
|
||||||
timestamp, _ := ptypes.TimestampProto(time.Now())
|
timestamp, _ := ptypes.TimestampProto(time.Now())
|
||||||
m.Timestamp = timestamp
|
m.Timestamp = timestamp
|
||||||
|
@ -85,11 +91,15 @@ func (ml *MethodLogger) Log(c LogEntryConfig) {
|
||||||
case *pb.GrpcLogEntry_Message:
|
case *pb.GrpcLogEntry_Message:
|
||||||
m.PayloadTruncated = ml.truncateMessage(pay.Message)
|
m.PayloadTruncated = ml.truncateMessage(pay.Message)
|
||||||
}
|
}
|
||||||
|
return m
|
||||||
ml.sink.Write(m)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
// Log creates a proto binary log entry, and logs it to the sink.
|
||||||
|
func (ml *methodLogger) Log(c LogEntryConfig) {
|
||||||
|
ml.sink.Write(ml.Build(c))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
||||||
if ml.headerMaxLen == maxUInt {
|
if ml.headerMaxLen == maxUInt {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -119,7 +129,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
||||||
return truncated
|
return truncated
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
|
func (ml *methodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
|
||||||
if ml.messageMaxLen == maxUInt {
|
if ml.messageMaxLen == maxUInt {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,6 +25,7 @@ package channelz
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -184,54 +185,77 @@ func GetServer(id int64) *ServerMetric {
|
||||||
return db.get().GetServer(id)
|
return db.get().GetServer(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterChannel registers the given channel c in channelz database with ref
|
// RegisterChannel registers the given channel c in the channelz database with
|
||||||
// as its reference name, and add it to the child list of its parent (identified
|
// ref as its reference name, and adds it to the child list of its parent
|
||||||
// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
|
// (identified by pid). pid == nil means no parent.
|
||||||
// assigned to this channel.
|
//
|
||||||
func RegisterChannel(c Channel, pid int64, ref string) int64 {
|
// Returns a unique channelz identifier assigned to this channel.
|
||||||
|
//
|
||||||
|
// If channelz is not turned ON, the channelz database is not mutated.
|
||||||
|
func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier {
|
||||||
id := idGen.genID()
|
id := idGen.genID()
|
||||||
|
var parent int64
|
||||||
|
isTopChannel := true
|
||||||
|
if pid != nil {
|
||||||
|
isTopChannel = false
|
||||||
|
parent = pid.Int()
|
||||||
|
}
|
||||||
|
|
||||||
|
if !IsOn() {
|
||||||
|
return newIdentifer(RefChannel, id, pid)
|
||||||
|
}
|
||||||
|
|
||||||
cn := &channel{
|
cn := &channel{
|
||||||
refName: ref,
|
refName: ref,
|
||||||
c: c,
|
c: c,
|
||||||
subChans: make(map[int64]string),
|
subChans: make(map[int64]string),
|
||||||
nestedChans: make(map[int64]string),
|
nestedChans: make(map[int64]string),
|
||||||
id: id,
|
id: id,
|
||||||
pid: pid,
|
pid: parent,
|
||||||
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
||||||
}
|
}
|
||||||
if pid == 0 {
|
db.get().addChannel(id, cn, isTopChannel, parent)
|
||||||
db.get().addChannel(id, cn, true, pid)
|
return newIdentifer(RefChannel, id, pid)
|
||||||
} else {
|
|
||||||
db.get().addChannel(id, cn, false, pid)
|
|
||||||
}
|
|
||||||
return id
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterSubChannel registers the given channel c in channelz database with ref
|
// RegisterSubChannel registers the given subChannel c in the channelz database
|
||||||
// as its reference name, and add it to the child list of its parent (identified
|
// with ref as its reference name, and adds it to the child list of its parent
|
||||||
// by pid). It returns the unique channelz tracking id assigned to this subchannel.
|
// (identified by pid).
|
||||||
func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
|
//
|
||||||
if pid == 0 {
|
// Returns a unique channelz identifier assigned to this subChannel.
|
||||||
logger.Error("a SubChannel's parent id cannot be 0")
|
//
|
||||||
return 0
|
// If channelz is not turned ON, the channelz database is not mutated.
|
||||||
|
func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) {
|
||||||
|
if pid == nil {
|
||||||
|
return nil, errors.New("a SubChannel's parent id cannot be nil")
|
||||||
}
|
}
|
||||||
id := idGen.genID()
|
id := idGen.genID()
|
||||||
|
if !IsOn() {
|
||||||
|
return newIdentifer(RefSubChannel, id, pid), nil
|
||||||
|
}
|
||||||
|
|
||||||
sc := &subChannel{
|
sc := &subChannel{
|
||||||
refName: ref,
|
refName: ref,
|
||||||
c: c,
|
c: c,
|
||||||
sockets: make(map[int64]string),
|
sockets: make(map[int64]string),
|
||||||
id: id,
|
id: id,
|
||||||
pid: pid,
|
pid: pid.Int(),
|
||||||
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
||||||
}
|
}
|
||||||
db.get().addSubChannel(id, sc, pid)
|
db.get().addSubChannel(id, sc, pid.Int())
|
||||||
return id
|
return newIdentifer(RefSubChannel, id, pid), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterServer registers the given server s in channelz database. It returns
|
// RegisterServer registers the given server s in channelz database. It returns
|
||||||
// the unique channelz tracking id assigned to this server.
|
// the unique channelz tracking id assigned to this server.
|
||||||
func RegisterServer(s Server, ref string) int64 {
|
//
|
||||||
|
// If channelz is not turned ON, the channelz database is not mutated.
|
||||||
|
func RegisterServer(s Server, ref string) *Identifier {
|
||||||
id := idGen.genID()
|
id := idGen.genID()
|
||||||
|
if !IsOn() {
|
||||||
|
return newIdentifer(RefServer, id, nil)
|
||||||
|
}
|
||||||
|
|
||||||
svr := &server{
|
svr := &server{
|
||||||
refName: ref,
|
refName: ref,
|
||||||
s: s,
|
s: s,
|
||||||
|
@ -240,71 +264,92 @@ func RegisterServer(s Server, ref string) int64 {
|
||||||
id: id,
|
id: id,
|
||||||
}
|
}
|
||||||
db.get().addServer(id, svr)
|
db.get().addServer(id, svr)
|
||||||
return id
|
return newIdentifer(RefServer, id, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterListenSocket registers the given listen socket s in channelz database
|
// RegisterListenSocket registers the given listen socket s in channelz database
|
||||||
// with ref as its reference name, and add it to the child list of its parent
|
// with ref as its reference name, and add it to the child list of its parent
|
||||||
// (identified by pid). It returns the unique channelz tracking id assigned to
|
// (identified by pid). It returns the unique channelz tracking id assigned to
|
||||||
// this listen socket.
|
// this listen socket.
|
||||||
func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
|
//
|
||||||
if pid == 0 {
|
// If channelz is not turned ON, the channelz database is not mutated.
|
||||||
logger.Error("a ListenSocket's parent id cannot be 0")
|
func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
|
||||||
return 0
|
if pid == nil {
|
||||||
|
return nil, errors.New("a ListenSocket's parent id cannot be 0")
|
||||||
}
|
}
|
||||||
id := idGen.genID()
|
id := idGen.genID()
|
||||||
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
|
if !IsOn() {
|
||||||
db.get().addListenSocket(id, ls, pid)
|
return newIdentifer(RefListenSocket, id, pid), nil
|
||||||
return id
|
}
|
||||||
|
|
||||||
|
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()}
|
||||||
|
db.get().addListenSocket(id, ls, pid.Int())
|
||||||
|
return newIdentifer(RefListenSocket, id, pid), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterNormalSocket registers the given normal socket s in channelz database
|
// RegisterNormalSocket registers the given normal socket s in channelz database
|
||||||
// with ref as its reference name, and add it to the child list of its parent
|
// with ref as its reference name, and adds it to the child list of its parent
|
||||||
// (identified by pid). It returns the unique channelz tracking id assigned to
|
// (identified by pid). It returns the unique channelz tracking id assigned to
|
||||||
// this normal socket.
|
// this normal socket.
|
||||||
func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
|
//
|
||||||
if pid == 0 {
|
// If channelz is not turned ON, the channelz database is not mutated.
|
||||||
logger.Error("a NormalSocket's parent id cannot be 0")
|
func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
|
||||||
return 0
|
if pid == nil {
|
||||||
|
return nil, errors.New("a NormalSocket's parent id cannot be 0")
|
||||||
}
|
}
|
||||||
id := idGen.genID()
|
id := idGen.genID()
|
||||||
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
|
if !IsOn() {
|
||||||
db.get().addNormalSocket(id, ns, pid)
|
return newIdentifer(RefNormalSocket, id, pid), nil
|
||||||
return id
|
}
|
||||||
|
|
||||||
|
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()}
|
||||||
|
db.get().addNormalSocket(id, ns, pid.Int())
|
||||||
|
return newIdentifer(RefNormalSocket, id, pid), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveEntry removes an entry with unique channelz tracking id to be id from
|
// RemoveEntry removes an entry with unique channelz tracking id to be id from
|
||||||
// channelz database.
|
// channelz database.
|
||||||
func RemoveEntry(id int64) {
|
//
|
||||||
db.get().removeEntry(id)
|
// If channelz is not turned ON, this function is a no-op.
|
||||||
|
func RemoveEntry(id *Identifier) {
|
||||||
|
if !IsOn() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
db.get().removeEntry(id.Int())
|
||||||
}
|
}
|
||||||
|
|
||||||
// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added
|
// TraceEventDesc is what the caller of AddTraceEvent should provide to describe
|
||||||
// to the channel trace.
|
// the event to be added to the channel trace.
|
||||||
// The Parent field is optional. It is used for event that will be recorded in the entity's parent
|
//
|
||||||
// trace also.
|
// The Parent field is optional. It is used for an event that will be recorded
|
||||||
|
// in the entity's parent trace.
|
||||||
type TraceEventDesc struct {
|
type TraceEventDesc struct {
|
||||||
Desc string
|
Desc string
|
||||||
Severity Severity
|
Severity Severity
|
||||||
Parent *TraceEventDesc
|
Parent *TraceEventDesc
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
|
// AddTraceEvent adds trace related to the entity with specified id, using the
|
||||||
func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) {
|
// provided TraceEventDesc.
|
||||||
for d := desc; d != nil; d = d.Parent {
|
//
|
||||||
switch d.Severity {
|
// If channelz is not turned ON, this will simply log the event descriptions.
|
||||||
|
func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) {
|
||||||
|
// Log only the trace description associated with the bottom most entity.
|
||||||
|
switch desc.Severity {
|
||||||
case CtUnknown, CtInfo:
|
case CtUnknown, CtInfo:
|
||||||
l.InfoDepth(depth+1, d.Desc)
|
l.InfoDepth(depth+1, withParens(id)+desc.Desc)
|
||||||
case CtWarning:
|
case CtWarning:
|
||||||
l.WarningDepth(depth+1, d.Desc)
|
l.WarningDepth(depth+1, withParens(id)+desc.Desc)
|
||||||
case CtError:
|
case CtError:
|
||||||
l.ErrorDepth(depth+1, d.Desc)
|
l.ErrorDepth(depth+1, withParens(id)+desc.Desc)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if getMaxTraceEntry() == 0 {
|
if getMaxTraceEntry() == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
db.get().traceEvent(id, desc)
|
if IsOn() {
|
||||||
|
db.get().traceEvent(id.Int(), desc)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// channelMap is the storage data structure for channelz.
|
// channelMap is the storage data structure for channelz.
|
||||||
|
|
|
@ -0,0 +1,75 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2022 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package channelz
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// Identifier is an opaque identifier which uniquely identifies an entity in the
|
||||||
|
// channelz database.
|
||||||
|
type Identifier struct {
|
||||||
|
typ RefChannelType
|
||||||
|
id int64
|
||||||
|
str string
|
||||||
|
pid *Identifier
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the entity type corresponding to id.
|
||||||
|
func (id *Identifier) Type() RefChannelType {
|
||||||
|
return id.typ
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int returns the integer identifier corresponding to id.
|
||||||
|
func (id *Identifier) Int() int64 {
|
||||||
|
return id.id
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of the entity corresponding to id.
|
||||||
|
//
|
||||||
|
// This includes some information about the parent as well. Examples:
|
||||||
|
// Top-level channel: [Channel #channel-number]
|
||||||
|
// Nested channel: [Channel #parent-channel-number Channel #channel-number]
|
||||||
|
// Sub channel: [Channel #parent-channel SubChannel #subchannel-number]
|
||||||
|
func (id *Identifier) String() string {
|
||||||
|
return id.str
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal returns true if other is the same as id.
|
||||||
|
func (id *Identifier) Equal(other *Identifier) bool {
|
||||||
|
if (id != nil) != (other != nil) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if id == nil && other == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return id.typ == other.typ && id.id == other.id && id.pid == other.pid
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIdentifierForTesting returns a new opaque identifier to be used only for
|
||||||
|
// testing purposes.
|
||||||
|
func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier {
|
||||||
|
return newIdentifer(typ, id, pid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier {
|
||||||
|
str := fmt.Sprintf("%s #%d", typ, id)
|
||||||
|
if pid != nil {
|
||||||
|
str = fmt.Sprintf("%s %s", pid, str)
|
||||||
|
}
|
||||||
|
return &Identifier{typ: typ, id: id, str: str, pid: pid}
|
||||||
|
}
|
|
@ -26,77 +26,54 @@ import (
|
||||||
|
|
||||||
var logger = grpclog.Component("channelz")
|
var logger = grpclog.Component("channelz")
|
||||||
|
|
||||||
|
func withParens(id *Identifier) string {
|
||||||
|
return "[" + id.String() + "] "
|
||||||
|
}
|
||||||
|
|
||||||
// Info logs and adds a trace event if channelz is on.
|
// Info logs and adds a trace event if channelz is on.
|
||||||
func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
|
func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||||
if IsOn() {
|
|
||||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||||
Desc: fmt.Sprint(args...),
|
Desc: fmt.Sprint(args...),
|
||||||
Severity: CtInfo,
|
Severity: CtInfo,
|
||||||
})
|
})
|
||||||
} else {
|
|
||||||
l.InfoDepth(1, args...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Infof logs and adds a trace event if channelz is on.
|
// Infof logs and adds a trace event if channelz is on.
|
||||||
func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
|
func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
||||||
msg := fmt.Sprintf(format, args...)
|
|
||||||
if IsOn() {
|
|
||||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||||
Desc: msg,
|
Desc: fmt.Sprintf(format, args...),
|
||||||
Severity: CtInfo,
|
Severity: CtInfo,
|
||||||
})
|
})
|
||||||
} else {
|
|
||||||
l.InfoDepth(1, msg)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warning logs and adds a trace event if channelz is on.
|
// Warning logs and adds a trace event if channelz is on.
|
||||||
func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
|
func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||||
if IsOn() {
|
|
||||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||||
Desc: fmt.Sprint(args...),
|
Desc: fmt.Sprint(args...),
|
||||||
Severity: CtWarning,
|
Severity: CtWarning,
|
||||||
})
|
})
|
||||||
} else {
|
|
||||||
l.WarningDepth(1, args...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warningf logs and adds a trace event if channelz is on.
|
// Warningf logs and adds a trace event if channelz is on.
|
||||||
func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
|
func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
||||||
msg := fmt.Sprintf(format, args...)
|
|
||||||
if IsOn() {
|
|
||||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||||
Desc: msg,
|
Desc: fmt.Sprintf(format, args...),
|
||||||
Severity: CtWarning,
|
Severity: CtWarning,
|
||||||
})
|
})
|
||||||
} else {
|
|
||||||
l.WarningDepth(1, msg)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error logs and adds a trace event if channelz is on.
|
// Error logs and adds a trace event if channelz is on.
|
||||||
func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
|
func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||||
if IsOn() {
|
|
||||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||||
Desc: fmt.Sprint(args...),
|
Desc: fmt.Sprint(args...),
|
||||||
Severity: CtError,
|
Severity: CtError,
|
||||||
})
|
})
|
||||||
} else {
|
|
||||||
l.ErrorDepth(1, args...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errorf logs and adds a trace event if channelz is on.
|
// Errorf logs and adds a trace event if channelz is on.
|
||||||
func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
|
func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
||||||
msg := fmt.Sprintf(format, args...)
|
|
||||||
if IsOn() {
|
|
||||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||||
Desc: msg,
|
Desc: fmt.Sprintf(format, args...),
|
||||||
Severity: CtError,
|
Severity: CtError,
|
||||||
})
|
})
|
||||||
} else {
|
|
||||||
l.ErrorDepth(1, msg)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -686,12 +686,33 @@ const (
|
||||||
type RefChannelType int
|
type RefChannelType int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// RefUnknown indicates an unknown entity type, the zero value for this type.
|
||||||
|
RefUnknown RefChannelType = iota
|
||||||
// RefChannel indicates the referenced entity is a Channel.
|
// RefChannel indicates the referenced entity is a Channel.
|
||||||
RefChannel RefChannelType = iota
|
RefChannel
|
||||||
// RefSubChannel indicates the referenced entity is a SubChannel.
|
// RefSubChannel indicates the referenced entity is a SubChannel.
|
||||||
RefSubChannel
|
RefSubChannel
|
||||||
|
// RefServer indicates the referenced entity is a Server.
|
||||||
|
RefServer
|
||||||
|
// RefListenSocket indicates the referenced entity is a ListenSocket.
|
||||||
|
RefListenSocket
|
||||||
|
// RefNormalSocket indicates the referenced entity is a NormalSocket.
|
||||||
|
RefNormalSocket
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var refChannelTypeToString = map[RefChannelType]string{
|
||||||
|
RefUnknown: "Unknown",
|
||||||
|
RefChannel: "Channel",
|
||||||
|
RefSubChannel: "SubChannel",
|
||||||
|
RefServer: "Server",
|
||||||
|
RefListenSocket: "ListenSocket",
|
||||||
|
RefNormalSocket: "NormalSocket",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r RefChannelType) String() string {
|
||||||
|
return refChannelTypeToString[r]
|
||||||
|
}
|
||||||
|
|
||||||
func (c *channelTrace) dumpData() *ChannelTrace {
|
func (c *channelTrace) dumpData() *ChannelTrace {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
|
ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
|
||||||
|
|
|
@ -85,3 +85,9 @@ const (
|
||||||
// that supports backend returned by grpclb balancer.
|
// that supports backend returned by grpclb balancer.
|
||||||
CredsBundleModeBackendFromBalancer = "backend-from-balancer"
|
CredsBundleModeBackendFromBalancer = "backend-from-balancer"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// RLSLoadBalancingPolicyName is the name of the RLS LB policy.
|
||||||
|
//
|
||||||
|
// It currently has an experimental suffix which would be removed once
|
||||||
|
// end-to-end testing of the policy is completed.
|
||||||
|
const RLSLoadBalancingPolicyName = "rls_experimental"
|
||||||
|
|
|
@ -22,6 +22,9 @@
|
||||||
package metadata
|
package metadata
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
)
|
)
|
||||||
|
@ -72,3 +75,46 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address {
|
||||||
addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md))
|
addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md))
|
||||||
return addr
|
return addr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate returns an error if the input md contains invalid keys or values.
|
||||||
|
//
|
||||||
|
// If the header is not a pseudo-header, the following items are checked:
|
||||||
|
// - header names must contain one or more characters from this set [0-9 a-z _ - .].
|
||||||
|
// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed.
|
||||||
|
// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E].
|
||||||
|
func Validate(md metadata.MD) error {
|
||||||
|
for k, vals := range md {
|
||||||
|
// pseudo-header will be ignored
|
||||||
|
if k[0] == ':' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// check key, for i that saving a conversion if not using for range
|
||||||
|
for i := 0; i < len(k); i++ {
|
||||||
|
r := k[i]
|
||||||
|
if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
|
||||||
|
return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(k, "-bin") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// check value
|
||||||
|
for _, val := range vals {
|
||||||
|
if hasNotPrintable(val) {
|
||||||
|
return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E
|
||||||
|
func hasNotPrintable(msg string) bool {
|
||||||
|
// for i that saving a conversion if not using for range
|
||||||
|
for i := 0; i < len(msg); i++ {
|
||||||
|
if msg[i] < 0x20 || msg[i] > 0x7E {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,82 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2021 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package pretty defines helper functions to pretty-print structs for logging.
|
||||||
|
package pretty
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/jsonpb"
|
||||||
|
protov1 "github.com/golang/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/encoding/protojson"
|
||||||
|
protov2 "google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
const jsonIndent = " "
|
||||||
|
|
||||||
|
// ToJSON marshals the input into a json string.
|
||||||
|
//
|
||||||
|
// If marshal fails, it falls back to fmt.Sprintf("%+v").
|
||||||
|
func ToJSON(e interface{}) string {
|
||||||
|
switch ee := e.(type) {
|
||||||
|
case protov1.Message:
|
||||||
|
mm := jsonpb.Marshaler{Indent: jsonIndent}
|
||||||
|
ret, err := mm.MarshalToString(ee)
|
||||||
|
if err != nil {
|
||||||
|
// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
|
||||||
|
// messages are not imported, and this will fail because the message
|
||||||
|
// is not found.
|
||||||
|
return fmt.Sprintf("%+v", ee)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
case protov2.Message:
|
||||||
|
mm := protojson.MarshalOptions{
|
||||||
|
Multiline: true,
|
||||||
|
Indent: jsonIndent,
|
||||||
|
}
|
||||||
|
ret, err := mm.Marshal(ee)
|
||||||
|
if err != nil {
|
||||||
|
// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
|
||||||
|
// messages are not imported, and this will fail because the message
|
||||||
|
// is not found.
|
||||||
|
return fmt.Sprintf("%+v", ee)
|
||||||
|
}
|
||||||
|
return string(ret)
|
||||||
|
default:
|
||||||
|
ret, err := json.MarshalIndent(ee, "", jsonIndent)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Sprintf("%+v", ee)
|
||||||
|
}
|
||||||
|
return string(ret)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FormatJSON formats the input json bytes with indentation.
|
||||||
|
//
|
||||||
|
// If Indent fails, it returns the unchanged input as string.
|
||||||
|
func FormatJSON(b []byte) string {
|
||||||
|
var out bytes.Buffer
|
||||||
|
err := json.Indent(&out, b, "", jsonIndent)
|
||||||
|
if err != nil {
|
||||||
|
return string(b)
|
||||||
|
}
|
||||||
|
return out.String()
|
||||||
|
}
|
|
@ -137,6 +137,7 @@ type earlyAbortStream struct {
|
||||||
streamID uint32
|
streamID uint32
|
||||||
contentSubtype string
|
contentSubtype string
|
||||||
status *status.Status
|
status *status.Status
|
||||||
|
rst bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
|
func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
|
||||||
|
@ -786,6 +787,11 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error {
|
||||||
if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil {
|
if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if eas.rst {
|
||||||
|
if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -132,7 +132,7 @@ type http2Client struct {
|
||||||
kpDormant bool
|
kpDormant bool
|
||||||
|
|
||||||
// Fields below are for channelz metric collection.
|
// Fields below are for channelz metric collection.
|
||||||
channelzID int64 // channelz unique identification number
|
channelzID *channelz.Identifier
|
||||||
czData *channelzData
|
czData *channelzData
|
||||||
|
|
||||||
onGoAway func(GoAwayReason)
|
onGoAway func(GoAwayReason)
|
||||||
|
@ -351,8 +351,9 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||||
}
|
}
|
||||||
t.statsHandler.HandleConn(t.ctx, connBegin)
|
t.statsHandler.HandleConn(t.ctx, connBegin)
|
||||||
}
|
}
|
||||||
if channelz.IsOn() {
|
t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
|
||||||
t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
if t.keepaliveEnabled {
|
if t.keepaliveEnabled {
|
||||||
t.kpDormancyCond = sync.NewCond(&t.mu)
|
t.kpDormancyCond = sync.NewCond(&t.mu)
|
||||||
|
@ -630,8 +631,8 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
|
||||||
// the wire. However, there are two notable exceptions:
|
// the wire. However, there are two notable exceptions:
|
||||||
//
|
//
|
||||||
// 1. If the stream headers violate the max header list size allowed by the
|
// 1. If the stream headers violate the max header list size allowed by the
|
||||||
// server. In this case there is no reason to retry at all, as it is
|
// server. It's possible this could succeed on another transport, even if
|
||||||
// assumed the RPC would continue to fail on subsequent attempts.
|
// it's unlikely, but do not transparently retry.
|
||||||
// 2. If the credentials errored when requesting their headers. In this case,
|
// 2. If the credentials errored when requesting their headers. In this case,
|
||||||
// it's possible a retry can fix the problem, but indefinitely transparently
|
// it's possible a retry can fix the problem, but indefinitely transparently
|
||||||
// retrying is not appropriate as it is likely the credentials, if they can
|
// retrying is not appropriate as it is likely the credentials, if they can
|
||||||
|
@ -639,8 +640,7 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
|
||||||
type NewStreamError struct {
|
type NewStreamError struct {
|
||||||
Err error
|
Err error
|
||||||
|
|
||||||
DoNotRetry bool
|
AllowTransparentRetry bool
|
||||||
DoNotTransparentRetry bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e NewStreamError) Error() string {
|
func (e NewStreamError) Error() string {
|
||||||
|
@ -649,11 +649,11 @@ func (e NewStreamError) Error() string {
|
||||||
|
|
||||||
// NewStream creates a stream and registers it into the transport as "active"
|
// NewStream creates a stream and registers it into the transport as "active"
|
||||||
// streams. All non-nil errors returned will be *NewStreamError.
|
// streams. All non-nil errors returned will be *NewStreamError.
|
||||||
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
|
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) {
|
||||||
ctx = peer.NewContext(ctx, t.getPeer())
|
ctx = peer.NewContext(ctx, t.getPeer())
|
||||||
headerFields, err := t.createHeaderFields(ctx, callHdr)
|
headerFields, err := t.createHeaderFields(ctx, callHdr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true}
|
return nil, &NewStreamError{Err: err, AllowTransparentRetry: false}
|
||||||
}
|
}
|
||||||
s := t.newStream(ctx, callHdr)
|
s := t.newStream(ctx, callHdr)
|
||||||
cleanup := func(err error) {
|
cleanup := func(err error) {
|
||||||
|
@ -753,13 +753,14 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
||||||
return true
|
return true
|
||||||
}, hdr)
|
}, hdr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, &NewStreamError{Err: err}
|
// Connection closed.
|
||||||
|
return nil, &NewStreamError{Err: err, AllowTransparentRetry: true}
|
||||||
}
|
}
|
||||||
if success {
|
if success {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if hdrListSizeErr != nil {
|
if hdrListSizeErr != nil {
|
||||||
return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true}
|
return nil, &NewStreamError{Err: hdrListSizeErr}
|
||||||
}
|
}
|
||||||
firstTry = false
|
firstTry = false
|
||||||
select {
|
select {
|
||||||
|
@ -767,9 +768,9 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return nil, &NewStreamError{Err: ContextErr(ctx.Err())}
|
return nil, &NewStreamError{Err: ContextErr(ctx.Err())}
|
||||||
case <-t.goAway:
|
case <-t.goAway:
|
||||||
return nil, &NewStreamError{Err: errStreamDrain}
|
return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true}
|
||||||
case <-t.ctx.Done():
|
case <-t.ctx.Done():
|
||||||
return nil, &NewStreamError{Err: ErrConnClosing}
|
return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if t.statsHandler != nil {
|
if t.statsHandler != nil {
|
||||||
|
@ -898,9 +899,7 @@ func (t *http2Client) Close(err error) {
|
||||||
t.controlBuf.finish()
|
t.controlBuf.finish()
|
||||||
t.cancel()
|
t.cancel()
|
||||||
t.conn.Close()
|
t.conn.Close()
|
||||||
if channelz.IsOn() {
|
|
||||||
channelz.RemoveEntry(t.channelzID)
|
channelz.RemoveEntry(t.channelzID)
|
||||||
}
|
|
||||||
// Append info about previous goaways if there were any, since this may be important
|
// Append info about previous goaways if there were any, since this may be important
|
||||||
// for understanding the root cause for this connection to be closed.
|
// for understanding the root cause for this connection to be closed.
|
||||||
_, goAwayDebugMessage := t.GetGoAwayReason()
|
_, goAwayDebugMessage := t.GetGoAwayReason()
|
||||||
|
|
|
@ -21,7 +21,6 @@ package transport
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
|
@ -36,6 +35,7 @@ import (
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
"golang.org/x/net/http2/hpack"
|
"golang.org/x/net/http2/hpack"
|
||||||
"google.golang.org/grpc/internal/grpcutil"
|
"google.golang.org/grpc/internal/grpcutil"
|
||||||
|
"google.golang.org/grpc/internal/syscall"
|
||||||
|
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
@ -52,10 +52,10 @@ import (
|
||||||
var (
|
var (
|
||||||
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
|
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
|
||||||
// the stream's state.
|
// the stream's state.
|
||||||
ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
|
ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times")
|
||||||
// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
|
// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
|
||||||
// than the limit set by peer.
|
// than the limit set by peer.
|
||||||
ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
|
ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer")
|
||||||
)
|
)
|
||||||
|
|
||||||
// serverConnectionCounter counts the number of connections a server has seen
|
// serverConnectionCounter counts the number of connections a server has seen
|
||||||
|
@ -117,7 +117,7 @@ type http2Server struct {
|
||||||
idle time.Time
|
idle time.Time
|
||||||
|
|
||||||
// Fields below are for channelz metric collection.
|
// Fields below are for channelz metric collection.
|
||||||
channelzID int64 // channelz unique identification number
|
channelzID *channelz.Identifier
|
||||||
czData *channelzData
|
czData *channelzData
|
||||||
bufferPool *bufferPool
|
bufferPool *bufferPool
|
||||||
|
|
||||||
|
@ -231,6 +231,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||||
if kp.Timeout == 0 {
|
if kp.Timeout == 0 {
|
||||||
kp.Timeout = defaultServerKeepaliveTimeout
|
kp.Timeout = defaultServerKeepaliveTimeout
|
||||||
}
|
}
|
||||||
|
if kp.Time != infinity {
|
||||||
|
if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
|
||||||
|
return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
kep := config.KeepalivePolicy
|
kep := config.KeepalivePolicy
|
||||||
if kep.MinTime == 0 {
|
if kep.MinTime == 0 {
|
||||||
kep.MinTime = defaultKeepalivePolicyMinTime
|
kep.MinTime = defaultKeepalivePolicyMinTime
|
||||||
|
@ -275,12 +280,12 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||||
connBegin := &stats.ConnBegin{}
|
connBegin := &stats.ConnBegin{}
|
||||||
t.stats.HandleConn(t.ctx, connBegin)
|
t.stats.HandleConn(t.ctx, connBegin)
|
||||||
}
|
}
|
||||||
if channelz.IsOn() {
|
t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
|
||||||
t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
|
t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
|
||||||
|
|
||||||
t.framer.writer.Flush()
|
t.framer.writer.Flush()
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -443,6 +448,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||||
streamID: streamID,
|
streamID: streamID,
|
||||||
contentSubtype: s.contentSubtype,
|
contentSubtype: s.contentSubtype,
|
||||||
status: status.New(codes.Internal, errMsg),
|
status: status.New(codes.Internal, errMsg),
|
||||||
|
rst: !frame.StreamEnded(),
|
||||||
})
|
})
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -516,14 +522,16 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||||
}
|
}
|
||||||
if httpMethod != http.MethodPost {
|
if httpMethod != http.MethodPost {
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
|
errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod)
|
||||||
if logger.V(logLevel) {
|
if logger.V(logLevel) {
|
||||||
logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod)
|
logger.Infof("transport: %v", errMsg)
|
||||||
}
|
}
|
||||||
t.controlBuf.put(&cleanupStream{
|
t.controlBuf.put(&earlyAbortStream{
|
||||||
|
httpStatus: 405,
|
||||||
streamID: streamID,
|
streamID: streamID,
|
||||||
rst: true,
|
contentSubtype: s.contentSubtype,
|
||||||
rstCode: http2.ErrCodeProtocol,
|
status: status.New(codes.Internal, errMsg),
|
||||||
onWrite: func() {},
|
rst: !frame.StreamEnded(),
|
||||||
})
|
})
|
||||||
s.cancel()
|
s.cancel()
|
||||||
return false
|
return false
|
||||||
|
@ -544,6 +552,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||||
streamID: s.id,
|
streamID: s.id,
|
||||||
contentSubtype: s.contentSubtype,
|
contentSubtype: s.contentSubtype,
|
||||||
status: stat,
|
status: stat,
|
||||||
|
rst: !frame.StreamEnded(),
|
||||||
})
|
})
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -925,11 +934,25 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *http2Server) streamContextErr(s *Stream) error {
|
||||||
|
select {
|
||||||
|
case <-t.done:
|
||||||
|
return ErrConnClosing
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return ContextErr(s.ctx.Err())
|
||||||
|
}
|
||||||
|
|
||||||
// WriteHeader sends the header metadata md back to the client.
|
// WriteHeader sends the header metadata md back to the client.
|
||||||
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||||
if s.updateHeaderSent() || s.getState() == streamDone {
|
if s.updateHeaderSent() {
|
||||||
return ErrIllegalHeaderWrite
|
return ErrIllegalHeaderWrite
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s.getState() == streamDone {
|
||||||
|
return t.streamContextErr(s)
|
||||||
|
}
|
||||||
|
|
||||||
s.hdrMu.Lock()
|
s.hdrMu.Lock()
|
||||||
if md.Len() > 0 {
|
if md.Len() > 0 {
|
||||||
if s.header.Len() > 0 {
|
if s.header.Len() > 0 {
|
||||||
|
@ -940,7 +963,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||||
}
|
}
|
||||||
if err := t.writeHeaderLocked(s); err != nil {
|
if err := t.writeHeaderLocked(s); err != nil {
|
||||||
s.hdrMu.Unlock()
|
s.hdrMu.Unlock()
|
||||||
return err
|
return status.Convert(err).Err()
|
||||||
}
|
}
|
||||||
s.hdrMu.Unlock()
|
s.hdrMu.Unlock()
|
||||||
return nil
|
return nil
|
||||||
|
@ -1056,23 +1079,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||||
func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
|
func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
|
||||||
if !s.isHeaderSent() { // Headers haven't been written yet.
|
if !s.isHeaderSent() { // Headers haven't been written yet.
|
||||||
if err := t.WriteHeader(s, nil); err != nil {
|
if err := t.WriteHeader(s, nil); err != nil {
|
||||||
if _, ok := err.(ConnectionError); ok {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// TODO(mmukhi, dfawley): Make sure this is the right code to return.
|
|
||||||
return status.Errorf(codes.Internal, "transport: %v", err)
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// Writing headers checks for this condition.
|
// Writing headers checks for this condition.
|
||||||
if s.getState() == streamDone {
|
if s.getState() == streamDone {
|
||||||
// TODO(mmukhi, dfawley): Should the server write also return io.EOF?
|
return t.streamContextErr(s)
|
||||||
s.cancel()
|
|
||||||
select {
|
|
||||||
case <-t.done:
|
|
||||||
return ErrConnClosing
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
return ContextErr(s.ctx.Err())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
df := &dataFrame{
|
df := &dataFrame{
|
||||||
|
@ -1082,12 +1094,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
||||||
onEachWrite: t.setResetPingStrikes,
|
onEachWrite: t.setResetPingStrikes,
|
||||||
}
|
}
|
||||||
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
|
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
|
||||||
select {
|
return t.streamContextErr(s)
|
||||||
case <-t.done:
|
|
||||||
return ErrConnClosing
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
return ContextErr(s.ctx.Err())
|
|
||||||
}
|
}
|
||||||
return t.controlBuf.put(df)
|
return t.controlBuf.put(df)
|
||||||
}
|
}
|
||||||
|
@ -1210,9 +1217,7 @@ func (t *http2Server) Close() {
|
||||||
if err := t.conn.Close(); err != nil && logger.V(logLevel) {
|
if err := t.conn.Close(); err != nil && logger.V(logLevel) {
|
||||||
logger.Infof("transport: error closing conn during Close: %v", err)
|
logger.Infof("transport: error closing conn during Close: %v", err)
|
||||||
}
|
}
|
||||||
if channelz.IsOn() {
|
|
||||||
channelz.RemoveEntry(t.channelzID)
|
channelz.RemoveEntry(t.channelzID)
|
||||||
}
|
|
||||||
// Cancel all active streams.
|
// Cancel all active streams.
|
||||||
for _, s := range streams {
|
for _, s := range streams {
|
||||||
s.cancel()
|
s.cancel()
|
||||||
|
@ -1225,10 +1230,6 @@ func (t *http2Server) Close() {
|
||||||
|
|
||||||
// deleteStream deletes the stream s from transport's active streams.
|
// deleteStream deletes the stream s from transport's active streams.
|
||||||
func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
|
func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
|
||||||
// In case stream sending and receiving are invoked in separate
|
|
||||||
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
|
||||||
// called to interrupt the potential blocking on other goroutines.
|
|
||||||
s.cancel()
|
|
||||||
|
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
if _, ok := t.activeStreams[s.id]; ok {
|
if _, ok := t.activeStreams[s.id]; ok {
|
||||||
|
@ -1250,6 +1251,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
|
||||||
|
|
||||||
// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
|
// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
|
||||||
func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
|
func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
|
||||||
|
// In case stream sending and receiving are invoked in separate
|
||||||
|
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
||||||
|
// called to interrupt the potential blocking on other goroutines.
|
||||||
|
s.cancel()
|
||||||
|
|
||||||
oldState := s.swapState(streamDone)
|
oldState := s.swapState(streamDone)
|
||||||
if oldState == streamDone {
|
if oldState == streamDone {
|
||||||
// If the stream was already done, return.
|
// If the stream was already done, return.
|
||||||
|
@ -1269,6 +1275,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h
|
||||||
|
|
||||||
// closeStream clears the footprint of a stream when the stream is not needed any more.
|
// closeStream clears the footprint of a stream when the stream is not needed any more.
|
||||||
func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
|
func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
|
||||||
|
// In case stream sending and receiving are invoked in separate
|
||||||
|
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
||||||
|
// called to interrupt the potential blocking on other goroutines.
|
||||||
|
s.cancel()
|
||||||
|
|
||||||
s.swapState(streamDone)
|
s.swapState(streamDone)
|
||||||
t.deleteStream(s, eosReceived)
|
t.deleteStream(s, eosReceived)
|
||||||
|
|
||||||
|
|
|
@ -34,6 +34,7 @@ import (
|
||||||
|
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/internal/channelz"
|
||||||
"google.golang.org/grpc/keepalive"
|
"google.golang.org/grpc/keepalive"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
|
@ -529,7 +530,7 @@ type ServerConfig struct {
|
||||||
InitialConnWindowSize int32
|
InitialConnWindowSize int32
|
||||||
WriteBufferSize int
|
WriteBufferSize int
|
||||||
ReadBufferSize int
|
ReadBufferSize int
|
||||||
ChannelzParentID int64
|
ChannelzParentID *channelz.Identifier
|
||||||
MaxHeaderListSize *uint32
|
MaxHeaderListSize *uint32
|
||||||
HeaderTableSize *uint32
|
HeaderTableSize *uint32
|
||||||
}
|
}
|
||||||
|
@ -563,7 +564,7 @@ type ConnectOptions struct {
|
||||||
// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
|
// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
|
||||||
ReadBufferSize int
|
ReadBufferSize int
|
||||||
// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
|
// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
|
||||||
ChannelzParentID int64
|
ChannelzParentID *channelz.Identifier
|
||||||
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
|
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
|
||||||
MaxHeaderListSize *uint32
|
MaxHeaderListSize *uint32
|
||||||
// UseProxy specifies if a proxy should be used.
|
// UseProxy specifies if a proxy should be used.
|
||||||
|
|
|
@ -188,7 +188,9 @@ func FromIncomingContext(ctx context.Context) (MD, bool) {
|
||||||
// map, and there's no guarantee that the MD attached to the context is
|
// map, and there's no guarantee that the MD attached to the context is
|
||||||
// created using our helper functions.
|
// created using our helper functions.
|
||||||
key := strings.ToLower(k)
|
key := strings.ToLower(k)
|
||||||
out[key] = v
|
s := make([]string, len(v))
|
||||||
|
copy(s, v)
|
||||||
|
out[key] = s
|
||||||
}
|
}
|
||||||
return out, true
|
return out, true
|
||||||
}
|
}
|
||||||
|
@ -226,7 +228,9 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) {
|
||||||
// map, and there's no guarantee that the MD attached to the context is
|
// map, and there's no guarantee that the MD attached to the context is
|
||||||
// created using our helper functions.
|
// created using our helper functions.
|
||||||
key := strings.ToLower(k)
|
key := strings.ToLower(k)
|
||||||
out[key] = v
|
s := make([]string, len(v))
|
||||||
|
copy(s, v)
|
||||||
|
out[key] = s
|
||||||
}
|
}
|
||||||
for _, added := range raw.added {
|
for _, added := range raw.added {
|
||||||
if len(added)%2 == 1 {
|
if len(added)%2 == 1 {
|
||||||
|
|
|
@ -131,7 +131,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||||||
}
|
}
|
||||||
if _, ok := status.FromError(err); ok {
|
if _, ok := status.FromError(err); ok {
|
||||||
// Status error: end the RPC unconditionally with this status.
|
// Status error: end the RPC unconditionally with this status.
|
||||||
return nil, nil, err
|
return nil, nil, dropError{error: err}
|
||||||
}
|
}
|
||||||
// For all other errors, wait for ready RPCs should block and other
|
// For all other errors, wait for ready RPCs should block and other
|
||||||
// RPCs should fail with unavailable.
|
// RPCs should fail with unavailable.
|
||||||
|
@ -175,3 +175,9 @@ func (pw *pickerWrapper) close() {
|
||||||
pw.done = true
|
pw.done = true
|
||||||
close(pw.blockingCh)
|
close(pw.blockingCh)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// dropError is a wrapper error that indicates the LB policy wishes to drop the
|
||||||
|
// RPC and not retry it.
|
||||||
|
type dropError struct {
|
||||||
|
error
|
||||||
|
}
|
||||||
|
|
|
@ -46,77 +46,105 @@ func (*pickfirstBuilder) Name() string {
|
||||||
type pickfirstBalancer struct {
|
type pickfirstBalancer struct {
|
||||||
state connectivity.State
|
state connectivity.State
|
||||||
cc balancer.ClientConn
|
cc balancer.ClientConn
|
||||||
sc balancer.SubConn
|
subConn balancer.SubConn
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *pickfirstBalancer) ResolverError(err error) {
|
func (b *pickfirstBalancer) ResolverError(err error) {
|
||||||
switch b.state {
|
|
||||||
case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting:
|
|
||||||
// Set a failing picker if we don't have a good picker.
|
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
|
|
||||||
Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if logger.V(2) {
|
if logger.V(2) {
|
||||||
logger.Infof("pickfirstBalancer: ResolverError called with error %v", err)
|
logger.Infof("pickfirstBalancer: ResolverError called with error %v", err)
|
||||||
}
|
}
|
||||||
|
if b.subConn == nil {
|
||||||
|
b.state = connectivity.TransientFailure
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error {
|
if b.state != connectivity.TransientFailure {
|
||||||
if len(cs.ResolverState.Addresses) == 0 {
|
// The picker will not change since the balancer does not currently
|
||||||
|
// report an error.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.cc.UpdateState(balancer.State{
|
||||||
|
ConnectivityState: connectivity.TransientFailure,
|
||||||
|
Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
|
||||||
|
if len(state.ResolverState.Addresses) == 0 {
|
||||||
|
// The resolver reported an empty address list. Treat it like an error by
|
||||||
|
// calling b.ResolverError.
|
||||||
|
if b.subConn != nil {
|
||||||
|
// Remove the old subConn. All addresses were removed, so it is no longer
|
||||||
|
// valid.
|
||||||
|
b.cc.RemoveSubConn(b.subConn)
|
||||||
|
b.subConn = nil
|
||||||
|
}
|
||||||
b.ResolverError(errors.New("produced zero addresses"))
|
b.ResolverError(errors.New("produced zero addresses"))
|
||||||
return balancer.ErrBadResolverState
|
return balancer.ErrBadResolverState
|
||||||
}
|
}
|
||||||
if b.sc == nil {
|
|
||||||
var err error
|
if b.subConn != nil {
|
||||||
b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{})
|
b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if logger.V(2) {
|
if logger.V(2) {
|
||||||
logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
|
logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
|
||||||
}
|
}
|
||||||
b.state = connectivity.TransientFailure
|
b.state = connectivity.TransientFailure
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
|
b.cc.UpdateState(balancer.State{
|
||||||
|
ConnectivityState: connectivity.TransientFailure,
|
||||||
Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)},
|
Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)},
|
||||||
})
|
})
|
||||||
return balancer.ErrBadResolverState
|
return balancer.ErrBadResolverState
|
||||||
}
|
}
|
||||||
|
b.subConn = subConn
|
||||||
b.state = connectivity.Idle
|
b.state = connectivity.Idle
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}})
|
b.cc.UpdateState(balancer.State{
|
||||||
b.sc.Connect()
|
ConnectivityState: connectivity.Idle,
|
||||||
} else {
|
Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}},
|
||||||
b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses)
|
})
|
||||||
b.sc.Connect()
|
b.subConn.Connect()
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) {
|
func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
|
||||||
if logger.V(2) {
|
if logger.V(2) {
|
||||||
logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s)
|
logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state)
|
||||||
}
|
}
|
||||||
if b.sc != sc {
|
if b.subConn != subConn {
|
||||||
if logger.V(2) {
|
if logger.V(2) {
|
||||||
logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
|
logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
b.state = s.ConnectivityState
|
b.state = state.ConnectivityState
|
||||||
if s.ConnectivityState == connectivity.Shutdown {
|
if state.ConnectivityState == connectivity.Shutdown {
|
||||||
b.sc = nil
|
b.subConn = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
switch s.ConnectivityState {
|
switch state.ConnectivityState {
|
||||||
case connectivity.Ready:
|
case connectivity.Ready:
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}})
|
b.cc.UpdateState(balancer.State{
|
||||||
|
ConnectivityState: state.ConnectivityState,
|
||||||
|
Picker: &picker{result: balancer.PickResult{SubConn: subConn}},
|
||||||
|
})
|
||||||
case connectivity.Connecting:
|
case connectivity.Connecting:
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}})
|
b.cc.UpdateState(balancer.State{
|
||||||
|
ConnectivityState: state.ConnectivityState,
|
||||||
|
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
|
||||||
|
})
|
||||||
case connectivity.Idle:
|
case connectivity.Idle:
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}})
|
b.cc.UpdateState(balancer.State{
|
||||||
|
ConnectivityState: state.ConnectivityState,
|
||||||
|
Picker: &idlePicker{subConn: subConn},
|
||||||
|
})
|
||||||
case connectivity.TransientFailure:
|
case connectivity.TransientFailure:
|
||||||
b.cc.UpdateState(balancer.State{
|
b.cc.UpdateState(balancer.State{
|
||||||
ConnectivityState: s.ConnectivityState,
|
ConnectivityState: state.ConnectivityState,
|
||||||
Picker: &picker{err: s.ConnectionError},
|
Picker: &picker{err: state.ConnectionError},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -125,8 +153,8 @@ func (b *pickfirstBalancer) Close() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *pickfirstBalancer) ExitIdle() {
|
func (b *pickfirstBalancer) ExitIdle() {
|
||||||
if b.sc != nil && b.state == connectivity.Idle {
|
if b.subConn != nil && b.state == connectivity.Idle {
|
||||||
b.sc.Connect()
|
b.subConn.Connect()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,18 +163,18 @@ type picker struct {
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
|
||||||
return p.result, p.err
|
return p.result, p.err
|
||||||
}
|
}
|
||||||
|
|
||||||
// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
|
// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
|
||||||
// CONNECTING when Pick is called.
|
// CONNECTING when Pick is called.
|
||||||
type idlePicker struct {
|
type idlePicker struct {
|
||||||
sc balancer.SubConn
|
subConn balancer.SubConn
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *idlePicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
|
||||||
i.sc.Connect()
|
i.subConn.Connect()
|
||||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,7 @@ import (
|
||||||
|
|
||||||
"google.golang.org/grpc/attributes"
|
"google.golang.org/grpc/attributes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/internal/pretty"
|
||||||
"google.golang.org/grpc/serviceconfig"
|
"google.golang.org/grpc/serviceconfig"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -139,13 +140,18 @@ type Address struct {
|
||||||
|
|
||||||
// Equal returns whether a and o are identical. Metadata is compared directly,
|
// Equal returns whether a and o are identical. Metadata is compared directly,
|
||||||
// not with any recursive introspection.
|
// not with any recursive introspection.
|
||||||
func (a *Address) Equal(o Address) bool {
|
func (a Address) Equal(o Address) bool {
|
||||||
return a.Addr == o.Addr && a.ServerName == o.ServerName &&
|
return a.Addr == o.Addr && a.ServerName == o.ServerName &&
|
||||||
a.Attributes.Equal(o.Attributes) &&
|
a.Attributes.Equal(o.Attributes) &&
|
||||||
a.BalancerAttributes.Equal(o.BalancerAttributes) &&
|
a.BalancerAttributes.Equal(o.BalancerAttributes) &&
|
||||||
a.Type == o.Type && a.Metadata == o.Metadata
|
a.Type == o.Type && a.Metadata == o.Metadata
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// String returns JSON formatted string representation of the address.
|
||||||
|
func (a Address) String() string {
|
||||||
|
return pretty.ToJSON(a)
|
||||||
|
}
|
||||||
|
|
||||||
// BuildOptions includes additional information for the builder to create
|
// BuildOptions includes additional information for the builder to create
|
||||||
// the resolver.
|
// the resolver.
|
||||||
type BuildOptions struct {
|
type BuildOptions struct {
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
@ -27,6 +26,7 @@ import (
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/internal/channelz"
|
"google.golang.org/grpc/internal/channelz"
|
||||||
"google.golang.org/grpc/internal/grpcsync"
|
"google.golang.org/grpc/internal/grpcsync"
|
||||||
|
"google.golang.org/grpc/internal/pretty"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
"google.golang.org/grpc/serviceconfig"
|
"google.golang.org/grpc/serviceconfig"
|
||||||
)
|
)
|
||||||
|
@ -97,10 +97,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
|
||||||
if ccr.done.HasFired() {
|
if ccr.done.HasFired() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s)
|
|
||||||
if channelz.IsOn() {
|
|
||||||
ccr.addChannelzTraceEvent(s)
|
ccr.addChannelzTraceEvent(s)
|
||||||
}
|
|
||||||
ccr.curState = s
|
ccr.curState = s
|
||||||
if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState {
|
if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState {
|
||||||
return balancer.ErrBadResolverState
|
return balancer.ErrBadResolverState
|
||||||
|
@ -125,10 +122,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
|
||||||
if ccr.done.HasFired() {
|
if ccr.done.HasFired() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs)
|
|
||||||
if channelz.IsOn() {
|
|
||||||
ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
|
ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
|
||||||
}
|
|
||||||
ccr.curState.Addresses = addrs
|
ccr.curState.Addresses = addrs
|
||||||
ccr.cc.updateResolverState(ccr.curState, nil)
|
ccr.cc.updateResolverState(ccr.curState, nil)
|
||||||
}
|
}
|
||||||
|
@ -141,7 +135,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
|
||||||
if ccr.done.HasFired() {
|
if ccr.done.HasFired() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc)
|
channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc)
|
||||||
if ccr.cc.dopts.disableServiceConfig {
|
if ccr.cc.dopts.disableServiceConfig {
|
||||||
channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config")
|
channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config")
|
||||||
return
|
return
|
||||||
|
@ -151,9 +145,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
|
||||||
channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
|
channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if channelz.IsOn() {
|
|
||||||
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
|
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
|
||||||
}
|
|
||||||
ccr.curState.ServiceConfig = scpr
|
ccr.curState.ServiceConfig = scpr
|
||||||
ccr.cc.updateResolverState(ccr.curState, nil)
|
ccr.cc.updateResolverState(ccr.curState, nil)
|
||||||
}
|
}
|
||||||
|
@ -180,8 +172,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
|
||||||
} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
|
} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
|
||||||
updates = append(updates, "resolver returned new addresses")
|
updates = append(updates, "resolver returned new addresses")
|
||||||
}
|
}
|
||||||
channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{
|
channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
|
||||||
Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")),
|
|
||||||
Severity: channelz.CtInfo,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -134,7 +134,7 @@ type Server struct {
|
||||||
channelzRemoveOnce sync.Once
|
channelzRemoveOnce sync.Once
|
||||||
serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
|
serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
|
||||||
|
|
||||||
channelzID int64 // channelz unique identification number
|
channelzID *channelz.Identifier
|
||||||
czData *channelzData
|
czData *channelzData
|
||||||
|
|
||||||
serverWorkerChannels []chan *serverWorkerData
|
serverWorkerChannels []chan *serverWorkerData
|
||||||
|
@ -584,9 +584,8 @@ func NewServer(opt ...ServerOption) *Server {
|
||||||
s.initServerWorkers()
|
s.initServerWorkers()
|
||||||
}
|
}
|
||||||
|
|
||||||
if channelz.IsOn() {
|
|
||||||
s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
|
s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
|
||||||
}
|
channelz.Info(logger, s.channelzID, "Server created")
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -712,7 +711,7 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped")
|
||||||
|
|
||||||
type listenSocket struct {
|
type listenSocket struct {
|
||||||
net.Listener
|
net.Listener
|
||||||
channelzID int64
|
channelzID *channelz.Identifier
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
|
func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
|
||||||
|
@ -724,9 +723,8 @@ func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
|
||||||
|
|
||||||
func (l *listenSocket) Close() error {
|
func (l *listenSocket) Close() error {
|
||||||
err := l.Listener.Close()
|
err := l.Listener.Close()
|
||||||
if channelz.IsOn() {
|
|
||||||
channelz.RemoveEntry(l.channelzID)
|
channelz.RemoveEntry(l.channelzID)
|
||||||
}
|
channelz.Info(logger, l.channelzID, "ListenSocket deleted")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -759,11 +757,6 @@ func (s *Server) Serve(lis net.Listener) error {
|
||||||
ls := &listenSocket{Listener: lis}
|
ls := &listenSocket{Listener: lis}
|
||||||
s.lis[ls] = true
|
s.lis[ls] = true
|
||||||
|
|
||||||
if channelz.IsOn() {
|
|
||||||
ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String())
|
|
||||||
}
|
|
||||||
s.mu.Unlock()
|
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
if s.lis != nil && s.lis[ls] {
|
if s.lis != nil && s.lis[ls] {
|
||||||
|
@ -773,8 +766,16 @@ func (s *Server) Serve(lis net.Listener) error {
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var tempDelay time.Duration // how long to sleep on accept failure
|
var err error
|
||||||
|
ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String())
|
||||||
|
if err != nil {
|
||||||
|
s.mu.Unlock()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.mu.Unlock()
|
||||||
|
channelz.Info(logger, ls.channelzID, "ListenSocket created")
|
||||||
|
|
||||||
|
var tempDelay time.Duration // how long to sleep on accept failure
|
||||||
for {
|
for {
|
||||||
rawConn, err := lis.Accept()
|
rawConn, err := lis.Accept()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1709,11 +1710,7 @@ func (s *Server) Stop() {
|
||||||
s.done.Fire()
|
s.done.Fire()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
s.channelzRemoveOnce.Do(func() {
|
s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) })
|
||||||
if channelz.IsOn() {
|
|
||||||
channelz.RemoveEntry(s.channelzID)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
listeners := s.lis
|
listeners := s.lis
|
||||||
|
@ -1751,11 +1748,7 @@ func (s *Server) GracefulStop() {
|
||||||
s.quit.Fire()
|
s.quit.Fire()
|
||||||
defer s.done.Fire()
|
defer s.done.Fire()
|
||||||
|
|
||||||
s.channelzRemoveOnce.Do(func() {
|
s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) })
|
||||||
if channelz.IsOn() {
|
|
||||||
channelz.RemoveEntry(s.channelzID)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
if s.conns == nil {
|
if s.conns == nil {
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
|
@ -1808,12 +1801,26 @@ func (s *Server) getCodec(contentSubtype string) baseCodec {
|
||||||
return codec
|
return codec
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetHeader sets the header metadata.
|
// SetHeader sets the header metadata to be sent from the server to the client.
|
||||||
// When called multiple times, all the provided metadata will be merged.
|
// The context provided must be the context passed to the server's handler.
|
||||||
// All the metadata will be sent out when one of the following happens:
|
//
|
||||||
// - grpc.SendHeader() is called;
|
// Streaming RPCs should prefer the SetHeader method of the ServerStream.
|
||||||
// - The first response is sent out;
|
//
|
||||||
// - An RPC status is sent out (error or success).
|
// When called multiple times, all the provided metadata will be merged. All
|
||||||
|
// the metadata will be sent out when one of the following happens:
|
||||||
|
//
|
||||||
|
// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader.
|
||||||
|
// - The first response message is sent. For unary handlers, this occurs when
|
||||||
|
// the handler returns; for streaming handlers, this can happen when stream's
|
||||||
|
// SendMsg method is called.
|
||||||
|
// - An RPC status is sent out (error or success). This occurs when the handler
|
||||||
|
// returns.
|
||||||
|
//
|
||||||
|
// SetHeader will fail if called after any of the events above.
|
||||||
|
//
|
||||||
|
// The error returned is compatible with the status package. However, the
|
||||||
|
// status code will often not match the RPC status as seen by the client
|
||||||
|
// application, and therefore, should not be relied upon for this purpose.
|
||||||
func SetHeader(ctx context.Context, md metadata.MD) error {
|
func SetHeader(ctx context.Context, md metadata.MD) error {
|
||||||
if md.Len() == 0 {
|
if md.Len() == 0 {
|
||||||
return nil
|
return nil
|
||||||
|
@ -1825,8 +1832,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
|
||||||
return stream.SetHeader(md)
|
return stream.SetHeader(md)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendHeader sends header metadata. It may be called at most once.
|
// SendHeader sends header metadata. It may be called at most once, and may not
|
||||||
// The provided md and headers set by SetHeader() will be sent.
|
// be called after any event that causes headers to be sent (see SetHeader for
|
||||||
|
// a complete list). The provided md and headers set by SetHeader() will be
|
||||||
|
// sent.
|
||||||
|
//
|
||||||
|
// The error returned is compatible with the status package. However, the
|
||||||
|
// status code will often not match the RPC status as seen by the client
|
||||||
|
// application, and therefore, should not be relied upon for this purpose.
|
||||||
func SendHeader(ctx context.Context, md metadata.MD) error {
|
func SendHeader(ctx context.Context, md metadata.MD) error {
|
||||||
stream := ServerTransportStreamFromContext(ctx)
|
stream := ServerTransportStreamFromContext(ctx)
|
||||||
if stream == nil {
|
if stream == nil {
|
||||||
|
@ -1840,6 +1853,10 @@ func SendHeader(ctx context.Context, md metadata.MD) error {
|
||||||
|
|
||||||
// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
|
// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
|
||||||
// When called more than once, all the provided metadata will be merged.
|
// When called more than once, all the provided metadata will be merged.
|
||||||
|
//
|
||||||
|
// The error returned is compatible with the status package. However, the
|
||||||
|
// status code will often not match the RPC status as seen by the client
|
||||||
|
// application, and therefore, should not be relied upon for this purpose.
|
||||||
func SetTrailer(ctx context.Context, md metadata.MD) error {
|
func SetTrailer(ctx context.Context, md metadata.MD) error {
|
||||||
if md.Len() == 0 {
|
if md.Len() == 0 {
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -381,6 +381,9 @@ func init() {
|
||||||
//
|
//
|
||||||
// If any of them is NOT *ServiceConfig, return false.
|
// If any of them is NOT *ServiceConfig, return false.
|
||||||
func equalServiceConfig(a, b serviceconfig.Config) bool {
|
func equalServiceConfig(a, b serviceconfig.Config) bool {
|
||||||
|
if a == nil && b == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
aa, ok := a.(*ServiceConfig)
|
aa, ok := a.(*ServiceConfig)
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
|
|
|
@ -36,6 +36,7 @@ import (
|
||||||
"google.golang.org/grpc/internal/channelz"
|
"google.golang.org/grpc/internal/channelz"
|
||||||
"google.golang.org/grpc/internal/grpcrand"
|
"google.golang.org/grpc/internal/grpcrand"
|
||||||
"google.golang.org/grpc/internal/grpcutil"
|
"google.golang.org/grpc/internal/grpcutil"
|
||||||
|
imetadata "google.golang.org/grpc/internal/metadata"
|
||||||
iresolver "google.golang.org/grpc/internal/resolver"
|
iresolver "google.golang.org/grpc/internal/resolver"
|
||||||
"google.golang.org/grpc/internal/serviceconfig"
|
"google.golang.org/grpc/internal/serviceconfig"
|
||||||
"google.golang.org/grpc/internal/transport"
|
"google.golang.org/grpc/internal/transport"
|
||||||
|
@ -166,6 +167,11 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
||||||
}
|
}
|
||||||
|
|
||||||
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
|
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
|
||||||
|
if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok {
|
||||||
|
if err := imetadata.Validate(md); err != nil {
|
||||||
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
if channelz.IsOn() {
|
if channelz.IsOn() {
|
||||||
cc.incrCallsStarted()
|
cc.incrCallsStarted()
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -297,14 +303,28 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
|
||||||
}
|
}
|
||||||
cs.binlog = binarylog.GetMethodLogger(method)
|
cs.binlog = binarylog.GetMethodLogger(method)
|
||||||
|
|
||||||
if err := cs.newAttemptLocked(false /* isTransparent */); err != nil {
|
cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */)
|
||||||
|
if err != nil {
|
||||||
cs.finish(err)
|
cs.finish(err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
op := func(a *csAttempt) error { return a.newStream() }
|
// Pick the transport to use and create a new stream on the transport.
|
||||||
|
// Assign cs.attempt upon success.
|
||||||
|
op := func(a *csAttempt) error {
|
||||||
|
if err := a.getTransport(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := a.newStream(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Because this operation is always called either here (while creating
|
||||||
|
// the clientStream) or by the retry code while locked when replaying
|
||||||
|
// the operation, it is safe to access cs.attempt directly.
|
||||||
|
cs.attempt = a
|
||||||
|
return nil
|
||||||
|
}
|
||||||
if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
|
if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
|
||||||
cs.finish(err)
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -343,9 +363,15 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
|
||||||
return cs, nil
|
return cs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// newAttemptLocked creates a new attempt with a transport.
|
// newAttemptLocked creates a new csAttempt without a transport or stream.
|
||||||
// If it succeeds, then it replaces clientStream's attempt with this new attempt.
|
func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) {
|
||||||
func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) {
|
if err := cs.ctx.Err(); err != nil {
|
||||||
|
return nil, toRPCErr(err)
|
||||||
|
}
|
||||||
|
if err := cs.cc.ctx.Err(); err != nil {
|
||||||
|
return nil, ErrClientConnClosing
|
||||||
|
}
|
||||||
|
|
||||||
ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp)
|
ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp)
|
||||||
method := cs.callHdr.Method
|
method := cs.callHdr.Method
|
||||||
sh := cs.cc.dopts.copts.StatsHandler
|
sh := cs.cc.dopts.copts.StatsHandler
|
||||||
|
@ -379,27 +405,6 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) {
|
||||||
ctx = trace.NewContext(ctx, trInfo.tr)
|
ctx = trace.NewContext(ctx, trInfo.tr)
|
||||||
}
|
}
|
||||||
|
|
||||||
newAttempt := &csAttempt{
|
|
||||||
ctx: ctx,
|
|
||||||
beginTime: beginTime,
|
|
||||||
cs: cs,
|
|
||||||
dc: cs.cc.dopts.dc,
|
|
||||||
statsHandler: sh,
|
|
||||||
trInfo: trInfo,
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if retErr != nil {
|
|
||||||
// This attempt is not set in the clientStream, so it's finish won't
|
|
||||||
// be called. Call it here for stats and trace in case they are not
|
|
||||||
// nil.
|
|
||||||
newAttempt.finish(retErr)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err := ctx.Err(); err != nil {
|
|
||||||
return toRPCErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cs.cc.parsedTarget.Scheme == "xds" {
|
if cs.cc.parsedTarget.Scheme == "xds" {
|
||||||
// Add extra metadata (metadata that will be added by transport) to context
|
// Add extra metadata (metadata that will be added by transport) to context
|
||||||
// so the balancer can see them.
|
// so the balancer can see them.
|
||||||
|
@ -407,16 +412,32 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) {
|
||||||
"content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),
|
"content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method)
|
|
||||||
|
return &csAttempt{
|
||||||
|
ctx: ctx,
|
||||||
|
beginTime: beginTime,
|
||||||
|
cs: cs,
|
||||||
|
dc: cs.cc.dopts.dc,
|
||||||
|
statsHandler: sh,
|
||||||
|
trInfo: trInfo,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *csAttempt) getTransport() error {
|
||||||
|
cs := a.cs
|
||||||
|
|
||||||
|
var err error
|
||||||
|
a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if de, ok := err.(dropError); ok {
|
||||||
|
err = de.error
|
||||||
|
a.drop = true
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if trInfo != nil {
|
if a.trInfo != nil {
|
||||||
trInfo.firstLine.SetRemoteAddr(t.RemoteAddr())
|
a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr())
|
||||||
}
|
}
|
||||||
newAttempt.t = t
|
|
||||||
newAttempt.done = done
|
|
||||||
cs.attempt = newAttempt
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -425,12 +446,21 @@ func (a *csAttempt) newStream() error {
|
||||||
cs.callHdr.PreviousAttempts = cs.numRetries
|
cs.callHdr.PreviousAttempts = cs.numRetries
|
||||||
s, err := a.t.NewStream(a.ctx, cs.callHdr)
|
s, err := a.t.NewStream(a.ctx, cs.callHdr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Return without converting to an RPC error so retry code can
|
nse, ok := err.(*transport.NewStreamError)
|
||||||
// inspect.
|
if !ok {
|
||||||
|
// Unexpected.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
cs.attempt.s = s
|
|
||||||
cs.attempt.p = &parser{r: s}
|
if nse.AllowTransparentRetry {
|
||||||
|
a.allowTransparentRetry = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap and convert error.
|
||||||
|
return toRPCErr(nse.Err)
|
||||||
|
}
|
||||||
|
a.s = s
|
||||||
|
a.p = &parser{r: s}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -456,7 +486,7 @@ type clientStream struct {
|
||||||
|
|
||||||
retryThrottler *retryThrottler // The throttler active when the RPC began.
|
retryThrottler *retryThrottler // The throttler active when the RPC began.
|
||||||
|
|
||||||
binlog *binarylog.MethodLogger // Binary logger, can be nil.
|
binlog binarylog.MethodLogger // Binary logger, can be nil.
|
||||||
// serverHeaderBinlogged is a boolean for whether server header has been
|
// serverHeaderBinlogged is a boolean for whether server header has been
|
||||||
// logged. Server header will be logged when the first time one of those
|
// logged. Server header will be logged when the first time one of those
|
||||||
// happens: stream.Header(), stream.Recv().
|
// happens: stream.Header(), stream.Recv().
|
||||||
|
@ -508,6 +538,11 @@ type csAttempt struct {
|
||||||
|
|
||||||
statsHandler stats.Handler
|
statsHandler stats.Handler
|
||||||
beginTime time.Time
|
beginTime time.Time
|
||||||
|
|
||||||
|
// set for newStream errors that may be transparently retried
|
||||||
|
allowTransparentRetry bool
|
||||||
|
// set for pick errors that are returned as a status
|
||||||
|
drop bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) commitAttemptLocked() {
|
func (cs *clientStream) commitAttemptLocked() {
|
||||||
|
@ -527,41 +562,21 @@ func (cs *clientStream) commitAttempt() {
|
||||||
// shouldRetry returns nil if the RPC should be retried; otherwise it returns
|
// shouldRetry returns nil if the RPC should be retried; otherwise it returns
|
||||||
// the error that should be returned by the operation. If the RPC should be
|
// the error that should be returned by the operation. If the RPC should be
|
||||||
// retried, the bool indicates whether it is being retried transparently.
|
// retried, the bool indicates whether it is being retried transparently.
|
||||||
func (cs *clientStream) shouldRetry(err error) (bool, error) {
|
func (a *csAttempt) shouldRetry(err error) (bool, error) {
|
||||||
if cs.attempt.s == nil {
|
cs := a.cs
|
||||||
// Error from NewClientStream.
|
|
||||||
nse, ok := err.(*transport.NewStreamError)
|
|
||||||
if !ok {
|
|
||||||
// Unexpected, but assume no I/O was performed and the RPC is not
|
|
||||||
// fatal, so retry indefinitely.
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unwrap and convert error.
|
if cs.finished || cs.committed || a.drop {
|
||||||
err = toRPCErr(nse.Err)
|
// RPC is finished or committed or was dropped by the picker; cannot retry.
|
||||||
|
|
||||||
// Never retry DoNotRetry errors, which indicate the RPC should not be
|
|
||||||
// retried due to max header list size violation, etc.
|
|
||||||
if nse.DoNotRetry {
|
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
if a.s == nil && a.allowTransparentRetry {
|
||||||
// In the event of a non-IO operation error from NewStream, we never
|
|
||||||
// attempted to write anything to the wire, so we can retry
|
|
||||||
// indefinitely.
|
|
||||||
if !nse.DoNotTransparentRetry {
|
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if cs.finished || cs.committed {
|
|
||||||
// RPC is finished or committed; cannot retry.
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
// Wait for the trailers.
|
// Wait for the trailers.
|
||||||
unprocessed := false
|
unprocessed := false
|
||||||
if cs.attempt.s != nil {
|
if a.s != nil {
|
||||||
<-cs.attempt.s.Done()
|
<-a.s.Done()
|
||||||
unprocessed = cs.attempt.s.Unprocessed()
|
unprocessed = a.s.Unprocessed()
|
||||||
}
|
}
|
||||||
if cs.firstAttempt && unprocessed {
|
if cs.firstAttempt && unprocessed {
|
||||||
// First attempt, stream unprocessed: transparently retry.
|
// First attempt, stream unprocessed: transparently retry.
|
||||||
|
@ -573,14 +588,14 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) {
|
||||||
|
|
||||||
pushback := 0
|
pushback := 0
|
||||||
hasPushback := false
|
hasPushback := false
|
||||||
if cs.attempt.s != nil {
|
if a.s != nil {
|
||||||
if !cs.attempt.s.TrailersOnly() {
|
if !a.s.TrailersOnly() {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(retry): Move down if the spec changes to not check server pushback
|
// TODO(retry): Move down if the spec changes to not check server pushback
|
||||||
// before considering this a failure for throttling.
|
// before considering this a failure for throttling.
|
||||||
sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"]
|
sps := a.s.Trailer()["grpc-retry-pushback-ms"]
|
||||||
if len(sps) == 1 {
|
if len(sps) == 1 {
|
||||||
var e error
|
var e error
|
||||||
if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
|
if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
|
||||||
|
@ -597,10 +612,10 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var code codes.Code
|
var code codes.Code
|
||||||
if cs.attempt.s != nil {
|
if a.s != nil {
|
||||||
code = cs.attempt.s.Status().Code()
|
code = a.s.Status().Code()
|
||||||
} else {
|
} else {
|
||||||
code = status.Convert(err).Code()
|
code = status.Code(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
rp := cs.methodConfig.RetryPolicy
|
rp := cs.methodConfig.RetryPolicy
|
||||||
|
@ -645,19 +660,24 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns nil if a retry was performed and succeeded; error otherwise.
|
// Returns nil if a retry was performed and succeeded; error otherwise.
|
||||||
func (cs *clientStream) retryLocked(lastErr error) error {
|
func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error {
|
||||||
for {
|
for {
|
||||||
cs.attempt.finish(toRPCErr(lastErr))
|
attempt.finish(toRPCErr(lastErr))
|
||||||
isTransparent, err := cs.shouldRetry(lastErr)
|
isTransparent, err := attempt.shouldRetry(lastErr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cs.commitAttemptLocked()
|
cs.commitAttemptLocked()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
cs.firstAttempt = false
|
cs.firstAttempt = false
|
||||||
if err := cs.newAttemptLocked(isTransparent); err != nil {
|
attempt, err = cs.newAttemptLocked(isTransparent)
|
||||||
|
if err != nil {
|
||||||
|
// Only returns error if the clientconn is closed or the context of
|
||||||
|
// the stream is canceled.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if lastErr = cs.replayBufferLocked(); lastErr == nil {
|
// Note that the first op in the replay buffer always sets cs.attempt
|
||||||
|
// if it is able to pick a transport and create a stream.
|
||||||
|
if lastErr = cs.replayBufferLocked(attempt); lastErr == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -667,8 +687,11 @@ func (cs *clientStream) Context() context.Context {
|
||||||
cs.commitAttempt()
|
cs.commitAttempt()
|
||||||
// No need to lock before using attempt, since we know it is committed and
|
// No need to lock before using attempt, since we know it is committed and
|
||||||
// cannot change.
|
// cannot change.
|
||||||
|
if cs.attempt.s != nil {
|
||||||
return cs.attempt.s.Context()
|
return cs.attempt.s.Context()
|
||||||
}
|
}
|
||||||
|
return cs.ctx
|
||||||
|
}
|
||||||
|
|
||||||
func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
|
func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
|
||||||
cs.mu.Lock()
|
cs.mu.Lock()
|
||||||
|
@ -697,7 +720,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func())
|
||||||
cs.mu.Unlock()
|
cs.mu.Unlock()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := cs.retryLocked(err); err != nil {
|
if err := cs.retryLocked(a, err); err != nil {
|
||||||
cs.mu.Unlock()
|
cs.mu.Unlock()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -728,7 +751,7 @@ func (cs *clientStream) Header() (metadata.MD, error) {
|
||||||
cs.binlog.Log(logEntry)
|
cs.binlog.Log(logEntry)
|
||||||
cs.serverHeaderBinlogged = true
|
cs.serverHeaderBinlogged = true
|
||||||
}
|
}
|
||||||
return m, err
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) Trailer() metadata.MD {
|
func (cs *clientStream) Trailer() metadata.MD {
|
||||||
|
@ -746,10 +769,9 @@ func (cs *clientStream) Trailer() metadata.MD {
|
||||||
return cs.attempt.s.Trailer()
|
return cs.attempt.s.Trailer()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) replayBufferLocked() error {
|
func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error {
|
||||||
a := cs.attempt
|
|
||||||
for _, f := range cs.buffer {
|
for _, f := range cs.buffer {
|
||||||
if err := f(a); err != nil {
|
if err := f(attempt); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -797,22 +819,17 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
||||||
if len(payload) > *cs.callInfo.maxSendMessageSize {
|
if len(payload) > *cs.callInfo.maxSendMessageSize {
|
||||||
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
|
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
|
||||||
}
|
}
|
||||||
msgBytes := data // Store the pointer before setting to nil. For binary logging.
|
|
||||||
op := func(a *csAttempt) error {
|
op := func(a *csAttempt) error {
|
||||||
err := a.sendMsg(m, hdr, payload, data)
|
return a.sendMsg(m, hdr, payload, data)
|
||||||
// nil out the message and uncomp when replaying; they are only needed for
|
|
||||||
// stats which is disabled for subsequent attempts.
|
|
||||||
m, data = nil, nil
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
|
err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
|
||||||
if cs.binlog != nil && err == nil {
|
if cs.binlog != nil && err == nil {
|
||||||
cs.binlog.Log(&binarylog.ClientMessage{
|
cs.binlog.Log(&binarylog.ClientMessage{
|
||||||
OnClientSide: true,
|
OnClientSide: true,
|
||||||
Message: msgBytes,
|
Message: data,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) RecvMsg(m interface{}) error {
|
func (cs *clientStream) RecvMsg(m interface{}) error {
|
||||||
|
@ -1364,8 +1381,10 @@ func (as *addrConnStream) finish(err error) {
|
||||||
|
|
||||||
// ServerStream defines the server-side behavior of a streaming RPC.
|
// ServerStream defines the server-side behavior of a streaming RPC.
|
||||||
//
|
//
|
||||||
// All errors returned from ServerStream methods are compatible with the
|
// Errors returned from ServerStream methods are compatible with the status
|
||||||
// status package.
|
// package. However, the status code will often not match the RPC status as
|
||||||
|
// seen by the client application, and therefore, should not be relied upon for
|
||||||
|
// this purpose.
|
||||||
type ServerStream interface {
|
type ServerStream interface {
|
||||||
// SetHeader sets the header metadata. It may be called multiple times.
|
// SetHeader sets the header metadata. It may be called multiple times.
|
||||||
// When call multiple times, all the provided metadata will be merged.
|
// When call multiple times, all the provided metadata will be merged.
|
||||||
|
@ -1428,7 +1447,7 @@ type serverStream struct {
|
||||||
|
|
||||||
statsHandler stats.Handler
|
statsHandler stats.Handler
|
||||||
|
|
||||||
binlog *binarylog.MethodLogger
|
binlog binarylog.MethodLogger
|
||||||
// serverHeaderBinlogged indicates whether server header has been logged. It
|
// serverHeaderBinlogged indicates whether server header has been logged. It
|
||||||
// will happen when one of the following two happens: stream.SendHeader(),
|
// will happen when one of the following two happens: stream.SendHeader(),
|
||||||
// stream.Send().
|
// stream.Send().
|
||||||
|
@ -1448,11 +1467,20 @@ func (ss *serverStream) SetHeader(md metadata.MD) error {
|
||||||
if md.Len() == 0 {
|
if md.Len() == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
err := imetadata.Validate(md)
|
||||||
|
if err != nil {
|
||||||
|
return status.Error(codes.Internal, err.Error())
|
||||||
|
}
|
||||||
return ss.s.SetHeader(md)
|
return ss.s.SetHeader(md)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ss *serverStream) SendHeader(md metadata.MD) error {
|
func (ss *serverStream) SendHeader(md metadata.MD) error {
|
||||||
err := ss.t.WriteHeader(ss.s, md)
|
err := imetadata.Validate(md)
|
||||||
|
if err != nil {
|
||||||
|
return status.Error(codes.Internal, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ss.t.WriteHeader(ss.s, md)
|
||||||
if ss.binlog != nil && !ss.serverHeaderBinlogged {
|
if ss.binlog != nil && !ss.serverHeaderBinlogged {
|
||||||
h, _ := ss.s.Header()
|
h, _ := ss.s.Header()
|
||||||
ss.binlog.Log(&binarylog.ServerHeader{
|
ss.binlog.Log(&binarylog.ServerHeader{
|
||||||
|
@ -1467,6 +1495,9 @@ func (ss *serverStream) SetTrailer(md metadata.MD) {
|
||||||
if md.Len() == 0 {
|
if md.Len() == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if err := imetadata.Validate(md); err != nil {
|
||||||
|
logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err)
|
||||||
|
}
|
||||||
ss.s.SetTrailer(md)
|
ss.s.SetTrailer(md)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,4 +19,4 @@
|
||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
// Version is the current grpc version.
|
// Version is the current grpc version.
|
||||||
const Version = "1.45.0"
|
const Version = "1.47.0"
|
||||||
|
|
|
@ -0,0 +1,665 @@
|
||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package protojson
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/internal/encoding/json"
|
||||||
|
"google.golang.org/protobuf/internal/encoding/messageset"
|
||||||
|
"google.golang.org/protobuf/internal/errors"
|
||||||
|
"google.golang.org/protobuf/internal/flags"
|
||||||
|
"google.golang.org/protobuf/internal/genid"
|
||||||
|
"google.golang.org/protobuf/internal/pragma"
|
||||||
|
"google.golang.org/protobuf/internal/set"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
pref "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Unmarshal reads the given []byte into the given proto.Message.
|
||||||
|
// The provided message must be mutable (e.g., a non-nil pointer to a message).
|
||||||
|
func Unmarshal(b []byte, m proto.Message) error {
|
||||||
|
return UnmarshalOptions{}.Unmarshal(b, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalOptions is a configurable JSON format parser.
|
||||||
|
type UnmarshalOptions struct {
|
||||||
|
pragma.NoUnkeyedLiterals
|
||||||
|
|
||||||
|
// If AllowPartial is set, input for messages that will result in missing
|
||||||
|
// required fields will not return an error.
|
||||||
|
AllowPartial bool
|
||||||
|
|
||||||
|
// If DiscardUnknown is set, unknown fields are ignored.
|
||||||
|
DiscardUnknown bool
|
||||||
|
|
||||||
|
// Resolver is used for looking up types when unmarshaling
|
||||||
|
// google.protobuf.Any messages or extension fields.
|
||||||
|
// If nil, this defaults to using protoregistry.GlobalTypes.
|
||||||
|
Resolver interface {
|
||||||
|
protoregistry.MessageTypeResolver
|
||||||
|
protoregistry.ExtensionTypeResolver
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal reads the given []byte and populates the given proto.Message
|
||||||
|
// using options in the UnmarshalOptions object.
|
||||||
|
// It will clear the message first before setting the fields.
|
||||||
|
// If it returns an error, the given message may be partially set.
|
||||||
|
// The provided message must be mutable (e.g., a non-nil pointer to a message).
|
||||||
|
func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
|
||||||
|
return o.unmarshal(b, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshal is a centralized function that all unmarshal operations go through.
|
||||||
|
// For profiling purposes, avoid changing the name of this function or
|
||||||
|
// introducing other code paths for unmarshal that do not go through this.
|
||||||
|
func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error {
|
||||||
|
proto.Reset(m)
|
||||||
|
|
||||||
|
if o.Resolver == nil {
|
||||||
|
o.Resolver = protoregistry.GlobalTypes
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := decoder{json.NewDecoder(b), o}
|
||||||
|
if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for EOF.
|
||||||
|
tok, err := dec.Read()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if tok.Kind() != json.EOF {
|
||||||
|
return dec.unexpectedTokenError(tok)
|
||||||
|
}
|
||||||
|
|
||||||
|
if o.AllowPartial {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return proto.CheckInitialized(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
type decoder struct {
|
||||||
|
*json.Decoder
|
||||||
|
opts UnmarshalOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// newError returns an error object with position info.
|
||||||
|
func (d decoder) newError(pos int, f string, x ...interface{}) error {
|
||||||
|
line, column := d.Position(pos)
|
||||||
|
head := fmt.Sprintf("(line %d:%d): ", line, column)
|
||||||
|
return errors.New(head+f, x...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// unexpectedTokenError returns a syntax error for the given unexpected token.
|
||||||
|
func (d decoder) unexpectedTokenError(tok json.Token) error {
|
||||||
|
return d.syntaxError(tok.Pos(), "unexpected token %s", tok.RawString())
|
||||||
|
}
|
||||||
|
|
||||||
|
// syntaxError returns a syntax error for given position.
|
||||||
|
func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
|
||||||
|
line, column := d.Position(pos)
|
||||||
|
head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
|
||||||
|
return errors.New(head+f, x...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshalMessage unmarshals a message into the given protoreflect.Message.
|
||||||
|
func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error {
|
||||||
|
if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil {
|
||||||
|
return unmarshal(d, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
tok, err := d.Read()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if tok.Kind() != json.ObjectOpen {
|
||||||
|
return d.unexpectedTokenError(tok)
|
||||||
|
}
|
||||||
|
|
||||||
|
messageDesc := m.Descriptor()
|
||||||
|
if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
|
||||||
|
return errors.New("no support for proto1 MessageSets")
|
||||||
|
}
|
||||||
|
|
||||||
|
var seenNums set.Ints
|
||||||
|
var seenOneofs set.Ints
|
||||||
|
fieldDescs := messageDesc.Fields()
|
||||||
|
for {
|
||||||
|
// Read field name.
|
||||||
|
tok, err := d.Read()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch tok.Kind() {
|
||||||
|
default:
|
||||||
|
return d.unexpectedTokenError(tok)
|
||||||
|
case json.ObjectClose:
|
||||||
|
return nil
|
||||||
|
case json.Name:
|
||||||
|
// Continue below.
|
||||||
|
}
|
||||||
|
|
||||||
|
name := tok.Name()
|
||||||
|
// Unmarshaling a non-custom embedded message in Any will contain the
|
||||||
|
// JSON field "@type" which should be skipped because it is not a field
|
||||||
|
// of the embedded message, but simply an artifact of the Any format.
|
||||||
|
if skipTypeURL && name == "@type" {
|
||||||
|
d.Read()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the FieldDescriptor.
|
||||||
|
var fd pref.FieldDescriptor
|
||||||
|
if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") {
|
||||||
|
// Only extension names are in [name] format.
|
||||||
|
extName := pref.FullName(name[1 : len(name)-1])
|
||||||
|
extType, err := d.opts.Resolver.FindExtensionByName(extName)
|
||||||
|
if err != nil && err != protoregistry.NotFound {
|
||||||
|
return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err)
|
||||||
|
}
|
||||||
|
if extType != nil {
|
||||||
|
fd = extType.TypeDescriptor()
|
||||||
|
if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() {
|
||||||
|
return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// The name can either be the JSON name or the proto field name.
|
||||||
|
fd = fieldDescs.ByJSONName(name)
|
||||||
|
if fd == nil {
|
||||||
|
fd = fieldDescs.ByTextName(name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if flags.ProtoLegacy {
|
||||||
|
if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
|
||||||
|
fd = nil // reset since the weak reference is not linked in
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fd == nil {
|
||||||
|
// Field is unknown.
|
||||||
|
if d.opts.DiscardUnknown {
|
||||||
|
if err := d.skipJSONValue(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do not allow duplicate fields.
|
||||||
|
num := uint64(fd.Number())
|
||||||
|
if seenNums.Has(num) {
|
||||||
|
return d.newError(tok.Pos(), "duplicate field %v", tok.RawString())
|
||||||
|
}
|
||||||
|
seenNums.Set(num)
|
||||||
|
|
||||||
|
// No need to set values for JSON null unless the field type is
|
||||||
|
// google.protobuf.Value or google.protobuf.NullValue.
|
||||||
|
if tok, _ := d.Peek(); tok.Kind() == json.Null && !isKnownValue(fd) && !isNullValue(fd) {
|
||||||
|
d.Read()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case fd.IsList():
|
||||||
|
list := m.Mutable(fd).List()
|
||||||
|
if err := d.unmarshalList(list, fd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case fd.IsMap():
|
||||||
|
mmap := m.Mutable(fd).Map()
|
||||||
|
if err := d.unmarshalMap(mmap, fd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// If field is a oneof, check if it has already been set.
|
||||||
|
if od := fd.ContainingOneof(); od != nil {
|
||||||
|
idx := uint64(od.Index())
|
||||||
|
if seenOneofs.Has(idx) {
|
||||||
|
return d.newError(tok.Pos(), "error parsing %s, oneof %v is already set", tok.RawString(), od.FullName())
|
||||||
|
}
|
||||||
|
seenOneofs.Set(idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Required or optional fields.
|
||||||
|
if err := d.unmarshalSingular(m, fd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isKnownValue(fd pref.FieldDescriptor) bool {
|
||||||
|
md := fd.Message()
|
||||||
|
return md != nil && md.FullName() == genid.Value_message_fullname
|
||||||
|
}
|
||||||
|
|
||||||
|
func isNullValue(fd pref.FieldDescriptor) bool {
|
||||||
|
ed := fd.Enum()
|
||||||
|
return ed != nil && ed.FullName() == genid.NullValue_enum_fullname
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshalSingular unmarshals to the non-repeated field specified
|
||||||
|
// by the given FieldDescriptor.
|
||||||
|
func (d decoder) unmarshalSingular(m pref.Message, fd pref.FieldDescriptor) error {
|
||||||
|
var val pref.Value
|
||||||
|
var err error
|
||||||
|
switch fd.Kind() {
|
||||||
|
case pref.MessageKind, pref.GroupKind:
|
||||||
|
val = m.NewField(fd)
|
||||||
|
err = d.unmarshalMessage(val.Message(), false)
|
||||||
|
default:
|
||||||
|
val, err = d.unmarshalScalar(fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.Set(fd, val)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by
|
||||||
|
// the given FieldDescriptor.
|
||||||
|
func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) {
|
||||||
|
const b32 int = 32
|
||||||
|
const b64 int = 64
|
||||||
|
|
||||||
|
tok, err := d.Read()
|
||||||
|
if err != nil {
|
||||||
|
return pref.Value{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
kind := fd.Kind()
|
||||||
|
switch kind {
|
||||||
|
case pref.BoolKind:
|
||||||
|
if tok.Kind() == json.Bool {
|
||||||
|
return pref.ValueOfBool(tok.Bool()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
|
||||||
|
if v, ok := unmarshalInt(tok, b32); ok {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
|
||||||
|
if v, ok := unmarshalInt(tok, b64); ok {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case pref.Uint32Kind, pref.Fixed32Kind:
|
||||||
|
if v, ok := unmarshalUint(tok, b32); ok {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case pref.Uint64Kind, pref.Fixed64Kind:
|
||||||
|
if v, ok := unmarshalUint(tok, b64); ok {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case pref.FloatKind:
|
||||||
|
if v, ok := unmarshalFloat(tok, b32); ok {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case pref.DoubleKind:
|
||||||
|
if v, ok := unmarshalFloat(tok, b64); ok {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case pref.StringKind:
|
||||||
|
if tok.Kind() == json.String {
|
||||||
|
return pref.ValueOfString(tok.ParsedString()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case pref.BytesKind:
|
||||||
|
if v, ok := unmarshalBytes(tok); ok {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case pref.EnumKind:
|
||||||
|
if v, ok := unmarshalEnum(tok, fd); ok {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind))
|
||||||
|
}
|
||||||
|
|
||||||
|
return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalInt(tok json.Token, bitSize int) (pref.Value, bool) {
|
||||||
|
switch tok.Kind() {
|
||||||
|
case json.Number:
|
||||||
|
return getInt(tok, bitSize)
|
||||||
|
|
||||||
|
case json.String:
|
||||||
|
// Decode number from string.
|
||||||
|
s := strings.TrimSpace(tok.ParsedString())
|
||||||
|
if len(s) != len(tok.ParsedString()) {
|
||||||
|
return pref.Value{}, false
|
||||||
|
}
|
||||||
|
dec := json.NewDecoder([]byte(s))
|
||||||
|
tok, err := dec.Read()
|
||||||
|
if err != nil {
|
||||||
|
return pref.Value{}, false
|
||||||
|
}
|
||||||
|
return getInt(tok, bitSize)
|
||||||
|
}
|
||||||
|
return pref.Value{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func getInt(tok json.Token, bitSize int) (pref.Value, bool) {
|
||||||
|
n, ok := tok.Int(bitSize)
|
||||||
|
if !ok {
|
||||||
|
return pref.Value{}, false
|
||||||
|
}
|
||||||
|
if bitSize == 32 {
|
||||||
|
return pref.ValueOfInt32(int32(n)), true
|
||||||
|
}
|
||||||
|
return pref.ValueOfInt64(n), true
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalUint(tok json.Token, bitSize int) (pref.Value, bool) {
|
||||||
|
switch tok.Kind() {
|
||||||
|
case json.Number:
|
||||||
|
return getUint(tok, bitSize)
|
||||||
|
|
||||||
|
case json.String:
|
||||||
|
// Decode number from string.
|
||||||
|
s := strings.TrimSpace(tok.ParsedString())
|
||||||
|
if len(s) != len(tok.ParsedString()) {
|
||||||
|
return pref.Value{}, false
|
||||||
|
}
|
||||||
|
dec := json.NewDecoder([]byte(s))
|
||||||
|
tok, err := dec.Read()
|
||||||
|
if err != nil {
|
||||||
|
return pref.Value{}, false
|
||||||
|
}
|
||||||
|
return getUint(tok, bitSize)
|
||||||
|
}
|
||||||
|
return pref.Value{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func getUint(tok json.Token, bitSize int) (pref.Value, bool) {
|
||||||
|
n, ok := tok.Uint(bitSize)
|
||||||
|
if !ok {
|
||||||
|
return pref.Value{}, false
|
||||||
|
}
|
||||||
|
if bitSize == 32 {
|
||||||
|
return pref.ValueOfUint32(uint32(n)), true
|
||||||
|
}
|
||||||
|
return pref.ValueOfUint64(n), true
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalFloat(tok json.Token, bitSize int) (pref.Value, bool) {
|
||||||
|
switch tok.Kind() {
|
||||||
|
case json.Number:
|
||||||
|
return getFloat(tok, bitSize)
|
||||||
|
|
||||||
|
case json.String:
|
||||||
|
s := tok.ParsedString()
|
||||||
|
switch s {
|
||||||
|
case "NaN":
|
||||||
|
if bitSize == 32 {
|
||||||
|
return pref.ValueOfFloat32(float32(math.NaN())), true
|
||||||
|
}
|
||||||
|
return pref.ValueOfFloat64(math.NaN()), true
|
||||||
|
case "Infinity":
|
||||||
|
if bitSize == 32 {
|
||||||
|
return pref.ValueOfFloat32(float32(math.Inf(+1))), true
|
||||||
|
}
|
||||||
|
return pref.ValueOfFloat64(math.Inf(+1)), true
|
||||||
|
case "-Infinity":
|
||||||
|
if bitSize == 32 {
|
||||||
|
return pref.ValueOfFloat32(float32(math.Inf(-1))), true
|
||||||
|
}
|
||||||
|
return pref.ValueOfFloat64(math.Inf(-1)), true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode number from string.
|
||||||
|
if len(s) != len(strings.TrimSpace(s)) {
|
||||||
|
return pref.Value{}, false
|
||||||
|
}
|
||||||
|
dec := json.NewDecoder([]byte(s))
|
||||||
|
tok, err := dec.Read()
|
||||||
|
if err != nil {
|
||||||
|
return pref.Value{}, false
|
||||||
|
}
|
||||||
|
return getFloat(tok, bitSize)
|
||||||
|
}
|
||||||
|
return pref.Value{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFloat(tok json.Token, bitSize int) (pref.Value, bool) {
|
||||||
|
n, ok := tok.Float(bitSize)
|
||||||
|
if !ok {
|
||||||
|
return pref.Value{}, false
|
||||||
|
}
|
||||||
|
if bitSize == 32 {
|
||||||
|
return pref.ValueOfFloat32(float32(n)), true
|
||||||
|
}
|
||||||
|
return pref.ValueOfFloat64(n), true
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalBytes(tok json.Token) (pref.Value, bool) {
|
||||||
|
if tok.Kind() != json.String {
|
||||||
|
return pref.Value{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
s := tok.ParsedString()
|
||||||
|
enc := base64.StdEncoding
|
||||||
|
if strings.ContainsAny(s, "-_") {
|
||||||
|
enc = base64.URLEncoding
|
||||||
|
}
|
||||||
|
if len(s)%4 != 0 {
|
||||||
|
enc = enc.WithPadding(base64.NoPadding)
|
||||||
|
}
|
||||||
|
b, err := enc.DecodeString(s)
|
||||||
|
if err != nil {
|
||||||
|
return pref.Value{}, false
|
||||||
|
}
|
||||||
|
return pref.ValueOfBytes(b), true
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalEnum(tok json.Token, fd pref.FieldDescriptor) (pref.Value, bool) {
|
||||||
|
switch tok.Kind() {
|
||||||
|
case json.String:
|
||||||
|
// Lookup EnumNumber based on name.
|
||||||
|
s := tok.ParsedString()
|
||||||
|
if enumVal := fd.Enum().Values().ByName(pref.Name(s)); enumVal != nil {
|
||||||
|
return pref.ValueOfEnum(enumVal.Number()), true
|
||||||
|
}
|
||||||
|
|
||||||
|
case json.Number:
|
||||||
|
if n, ok := tok.Int(32); ok {
|
||||||
|
return pref.ValueOfEnum(pref.EnumNumber(n)), true
|
||||||
|
}
|
||||||
|
|
||||||
|
case json.Null:
|
||||||
|
// This is only valid for google.protobuf.NullValue.
|
||||||
|
if isNullValue(fd) {
|
||||||
|
return pref.ValueOfEnum(0), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return pref.Value{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error {
|
||||||
|
tok, err := d.Read()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if tok.Kind() != json.ArrayOpen {
|
||||||
|
return d.unexpectedTokenError(tok)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch fd.Kind() {
|
||||||
|
case pref.MessageKind, pref.GroupKind:
|
||||||
|
for {
|
||||||
|
tok, err := d.Peek()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if tok.Kind() == json.ArrayClose {
|
||||||
|
d.Read()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
val := list.NewElement()
|
||||||
|
if err := d.unmarshalMessage(val.Message(), false); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
list.Append(val)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
for {
|
||||||
|
tok, err := d.Peek()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if tok.Kind() == json.ArrayClose {
|
||||||
|
d.Read()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
val, err := d.unmarshalScalar(fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
list.Append(val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d decoder) unmarshalMap(mmap pref.Map, fd pref.FieldDescriptor) error {
|
||||||
|
tok, err := d.Read()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if tok.Kind() != json.ObjectOpen {
|
||||||
|
return d.unexpectedTokenError(tok)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine ahead whether map entry is a scalar type or a message type in
|
||||||
|
// order to call the appropriate unmarshalMapValue func inside the for loop
|
||||||
|
// below.
|
||||||
|
var unmarshalMapValue func() (pref.Value, error)
|
||||||
|
switch fd.MapValue().Kind() {
|
||||||
|
case pref.MessageKind, pref.GroupKind:
|
||||||
|
unmarshalMapValue = func() (pref.Value, error) {
|
||||||
|
val := mmap.NewValue()
|
||||||
|
if err := d.unmarshalMessage(val.Message(), false); err != nil {
|
||||||
|
return pref.Value{}, err
|
||||||
|
}
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
unmarshalMapValue = func() (pref.Value, error) {
|
||||||
|
return d.unmarshalScalar(fd.MapValue())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Loop:
|
||||||
|
for {
|
||||||
|
// Read field name.
|
||||||
|
tok, err := d.Read()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch tok.Kind() {
|
||||||
|
default:
|
||||||
|
return d.unexpectedTokenError(tok)
|
||||||
|
case json.ObjectClose:
|
||||||
|
break Loop
|
||||||
|
case json.Name:
|
||||||
|
// Continue.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal field name.
|
||||||
|
pkey, err := d.unmarshalMapKey(tok, fd.MapKey())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for duplicate field name.
|
||||||
|
if mmap.Has(pkey) {
|
||||||
|
return d.newError(tok.Pos(), "duplicate map key %v", tok.RawString())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read and unmarshal field value.
|
||||||
|
pval, err := unmarshalMapValue()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
mmap.Set(pkey, pval)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey.
|
||||||
|
// A map key type is any integral or string type.
|
||||||
|
func (d decoder) unmarshalMapKey(tok json.Token, fd pref.FieldDescriptor) (pref.MapKey, error) {
|
||||||
|
const b32 = 32
|
||||||
|
const b64 = 64
|
||||||
|
const base10 = 10
|
||||||
|
|
||||||
|
name := tok.Name()
|
||||||
|
kind := fd.Kind()
|
||||||
|
switch kind {
|
||||||
|
case pref.StringKind:
|
||||||
|
return pref.ValueOfString(name).MapKey(), nil
|
||||||
|
|
||||||
|
case pref.BoolKind:
|
||||||
|
switch name {
|
||||||
|
case "true":
|
||||||
|
return pref.ValueOfBool(true).MapKey(), nil
|
||||||
|
case "false":
|
||||||
|
return pref.ValueOfBool(false).MapKey(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
|
||||||
|
if n, err := strconv.ParseInt(name, base10, b32); err == nil {
|
||||||
|
return pref.ValueOfInt32(int32(n)).MapKey(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
|
||||||
|
if n, err := strconv.ParseInt(name, base10, b64); err == nil {
|
||||||
|
return pref.ValueOfInt64(int64(n)).MapKey(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case pref.Uint32Kind, pref.Fixed32Kind:
|
||||||
|
if n, err := strconv.ParseUint(name, base10, b32); err == nil {
|
||||||
|
return pref.ValueOfUint32(uint32(n)).MapKey(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case pref.Uint64Kind, pref.Fixed64Kind:
|
||||||
|
if n, err := strconv.ParseUint(name, base10, b64); err == nil {
|
||||||
|
return pref.ValueOfUint64(uint64(n)).MapKey(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("invalid kind for map key: %v", kind))
|
||||||
|
}
|
||||||
|
|
||||||
|
return pref.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString())
|
||||||
|
}
|
|
@ -0,0 +1,11 @@
|
||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package protojson marshals and unmarshals protocol buffer messages as JSON
|
||||||
|
// format. It follows the guide at
|
||||||
|
// https://developers.google.com/protocol-buffers/docs/proto3#json.
|
||||||
|
//
|
||||||
|
// This package produces a different output than the standard "encoding/json"
|
||||||
|
// package, which does not operate correctly on protocol buffer messages.
|
||||||
|
package protojson
|
|
@ -0,0 +1,344 @@
|
||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package protojson
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/internal/encoding/json"
|
||||||
|
"google.golang.org/protobuf/internal/encoding/messageset"
|
||||||
|
"google.golang.org/protobuf/internal/errors"
|
||||||
|
"google.golang.org/protobuf/internal/filedesc"
|
||||||
|
"google.golang.org/protobuf/internal/flags"
|
||||||
|
"google.golang.org/protobuf/internal/genid"
|
||||||
|
"google.golang.org/protobuf/internal/order"
|
||||||
|
"google.golang.org/protobuf/internal/pragma"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
pref "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
)
|
||||||
|
|
||||||
|
const defaultIndent = " "
|
||||||
|
|
||||||
|
// Format formats the message as a multiline string.
|
||||||
|
// This function is only intended for human consumption and ignores errors.
|
||||||
|
// Do not depend on the output being stable. It may change over time across
|
||||||
|
// different versions of the program.
|
||||||
|
func Format(m proto.Message) string {
|
||||||
|
return MarshalOptions{Multiline: true}.Format(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal writes the given proto.Message in JSON format using default options.
|
||||||
|
// Do not depend on the output being stable. It may change over time across
|
||||||
|
// different versions of the program.
|
||||||
|
func Marshal(m proto.Message) ([]byte, error) {
|
||||||
|
return MarshalOptions{}.Marshal(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalOptions is a configurable JSON format marshaler.
|
||||||
|
type MarshalOptions struct {
|
||||||
|
pragma.NoUnkeyedLiterals
|
||||||
|
|
||||||
|
// Multiline specifies whether the marshaler should format the output in
|
||||||
|
// indented-form with every textual element on a new line.
|
||||||
|
// If Indent is an empty string, then an arbitrary indent is chosen.
|
||||||
|
Multiline bool
|
||||||
|
|
||||||
|
// Indent specifies the set of indentation characters to use in a multiline
|
||||||
|
// formatted output such that every entry is preceded by Indent and
|
||||||
|
// terminated by a newline. If non-empty, then Multiline is treated as true.
|
||||||
|
// Indent can only be composed of space or tab characters.
|
||||||
|
Indent string
|
||||||
|
|
||||||
|
// AllowPartial allows messages that have missing required fields to marshal
|
||||||
|
// without returning an error. If AllowPartial is false (the default),
|
||||||
|
// Marshal will return error if there are any missing required fields.
|
||||||
|
AllowPartial bool
|
||||||
|
|
||||||
|
// UseProtoNames uses proto field name instead of lowerCamelCase name in JSON
|
||||||
|
// field names.
|
||||||
|
UseProtoNames bool
|
||||||
|
|
||||||
|
// UseEnumNumbers emits enum values as numbers.
|
||||||
|
UseEnumNumbers bool
|
||||||
|
|
||||||
|
// EmitUnpopulated specifies whether to emit unpopulated fields. It does not
|
||||||
|
// emit unpopulated oneof fields or unpopulated extension fields.
|
||||||
|
// The JSON value emitted for unpopulated fields are as follows:
|
||||||
|
// ╔═══════╤════════════════════════════╗
|
||||||
|
// ║ JSON │ Protobuf field ║
|
||||||
|
// ╠═══════╪════════════════════════════╣
|
||||||
|
// ║ false │ proto3 boolean fields ║
|
||||||
|
// ║ 0 │ proto3 numeric fields ║
|
||||||
|
// ║ "" │ proto3 string/bytes fields ║
|
||||||
|
// ║ null │ proto2 scalar fields ║
|
||||||
|
// ║ null │ message fields ║
|
||||||
|
// ║ [] │ list fields ║
|
||||||
|
// ║ {} │ map fields ║
|
||||||
|
// ╚═══════╧════════════════════════════╝
|
||||||
|
EmitUnpopulated bool
|
||||||
|
|
||||||
|
// Resolver is used for looking up types when expanding google.protobuf.Any
|
||||||
|
// messages. If nil, this defaults to using protoregistry.GlobalTypes.
|
||||||
|
Resolver interface {
|
||||||
|
protoregistry.ExtensionTypeResolver
|
||||||
|
protoregistry.MessageTypeResolver
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format formats the message as a string.
|
||||||
|
// This method is only intended for human consumption and ignores errors.
|
||||||
|
// Do not depend on the output being stable. It may change over time across
|
||||||
|
// different versions of the program.
|
||||||
|
func (o MarshalOptions) Format(m proto.Message) string {
|
||||||
|
if m == nil || !m.ProtoReflect().IsValid() {
|
||||||
|
return "<nil>" // invalid syntax, but okay since this is for debugging
|
||||||
|
}
|
||||||
|
o.AllowPartial = true
|
||||||
|
b, _ := o.Marshal(m)
|
||||||
|
return string(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal marshals the given proto.Message in the JSON format using options in
|
||||||
|
// MarshalOptions. Do not depend on the output being stable. It may change over
|
||||||
|
// time across different versions of the program.
|
||||||
|
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
|
||||||
|
return o.marshal(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshal is a centralized function that all marshal operations go through.
|
||||||
|
// For profiling purposes, avoid changing the name of this function or
|
||||||
|
// introducing other code paths for marshal that do not go through this.
|
||||||
|
func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) {
|
||||||
|
if o.Multiline && o.Indent == "" {
|
||||||
|
o.Indent = defaultIndent
|
||||||
|
}
|
||||||
|
if o.Resolver == nil {
|
||||||
|
o.Resolver = protoregistry.GlobalTypes
|
||||||
|
}
|
||||||
|
|
||||||
|
internalEnc, err := json.NewEncoder(o.Indent)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Treat nil message interface as an empty message,
|
||||||
|
// in which case the output in an empty JSON object.
|
||||||
|
if m == nil {
|
||||||
|
return []byte("{}"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
enc := encoder{internalEnc, o}
|
||||||
|
if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if o.AllowPartial {
|
||||||
|
return enc.Bytes(), nil
|
||||||
|
}
|
||||||
|
return enc.Bytes(), proto.CheckInitialized(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
type encoder struct {
|
||||||
|
*json.Encoder
|
||||||
|
opts MarshalOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// typeFieldDesc is a synthetic field descriptor used for the "@type" field.
|
||||||
|
var typeFieldDesc = func() protoreflect.FieldDescriptor {
|
||||||
|
var fd filedesc.Field
|
||||||
|
fd.L0.FullName = "@type"
|
||||||
|
fd.L0.Index = -1
|
||||||
|
fd.L1.Cardinality = protoreflect.Optional
|
||||||
|
fd.L1.Kind = protoreflect.StringKind
|
||||||
|
return &fd
|
||||||
|
}()
|
||||||
|
|
||||||
|
// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method
|
||||||
|
// to additionally iterate over a synthetic field for the type URL.
|
||||||
|
type typeURLFieldRanger struct {
|
||||||
|
order.FieldRanger
|
||||||
|
typeURL string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m typeURLFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) {
|
||||||
|
if !f(typeFieldDesc, pref.ValueOfString(m.typeURL)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.FieldRanger.Range(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range
|
||||||
|
// method to additionally iterate over unpopulated fields.
|
||||||
|
type unpopulatedFieldRanger struct{ pref.Message }
|
||||||
|
|
||||||
|
func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) {
|
||||||
|
fds := m.Descriptor().Fields()
|
||||||
|
for i := 0; i < fds.Len(); i++ {
|
||||||
|
fd := fds.Get(i)
|
||||||
|
if m.Has(fd) || fd.ContainingOneof() != nil {
|
||||||
|
continue // ignore populated fields and fields within a oneofs
|
||||||
|
}
|
||||||
|
|
||||||
|
v := m.Get(fd)
|
||||||
|
isProto2Scalar := fd.Syntax() == pref.Proto2 && fd.Default().IsValid()
|
||||||
|
isSingularMessage := fd.Cardinality() != pref.Repeated && fd.Message() != nil
|
||||||
|
if isProto2Scalar || isSingularMessage {
|
||||||
|
v = pref.Value{} // use invalid value to emit null
|
||||||
|
}
|
||||||
|
if !f(fd, v) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.Message.Range(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalMessage marshals the fields in the given protoreflect.Message.
|
||||||
|
// If the typeURL is non-empty, then a synthetic "@type" field is injected
|
||||||
|
// containing the URL as the value.
|
||||||
|
func (e encoder) marshalMessage(m pref.Message, typeURL string) error {
|
||||||
|
if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) {
|
||||||
|
return errors.New("no support for proto1 MessageSets")
|
||||||
|
}
|
||||||
|
|
||||||
|
if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil {
|
||||||
|
return marshal(e, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
e.StartObject()
|
||||||
|
defer e.EndObject()
|
||||||
|
|
||||||
|
var fields order.FieldRanger = m
|
||||||
|
if e.opts.EmitUnpopulated {
|
||||||
|
fields = unpopulatedFieldRanger{m}
|
||||||
|
}
|
||||||
|
if typeURL != "" {
|
||||||
|
fields = typeURLFieldRanger{fields, typeURL}
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
order.RangeFields(fields, order.IndexNameFieldOrder, func(fd pref.FieldDescriptor, v pref.Value) bool {
|
||||||
|
name := fd.JSONName()
|
||||||
|
if e.opts.UseProtoNames {
|
||||||
|
name = fd.TextName()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = e.WriteName(name); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if err = e.marshalValue(v, fd); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalValue marshals the given protoreflect.Value.
|
||||||
|
func (e encoder) marshalValue(val pref.Value, fd pref.FieldDescriptor) error {
|
||||||
|
switch {
|
||||||
|
case fd.IsList():
|
||||||
|
return e.marshalList(val.List(), fd)
|
||||||
|
case fd.IsMap():
|
||||||
|
return e.marshalMap(val.Map(), fd)
|
||||||
|
default:
|
||||||
|
return e.marshalSingular(val, fd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalSingular marshals the given non-repeated field value. This includes
|
||||||
|
// all scalar types, enums, messages, and groups.
|
||||||
|
func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error {
|
||||||
|
if !val.IsValid() {
|
||||||
|
e.WriteNull()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch kind := fd.Kind(); kind {
|
||||||
|
case pref.BoolKind:
|
||||||
|
e.WriteBool(val.Bool())
|
||||||
|
|
||||||
|
case pref.StringKind:
|
||||||
|
if e.WriteString(val.String()) != nil {
|
||||||
|
return errors.InvalidUTF8(string(fd.FullName()))
|
||||||
|
}
|
||||||
|
|
||||||
|
case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
|
||||||
|
e.WriteInt(val.Int())
|
||||||
|
|
||||||
|
case pref.Uint32Kind, pref.Fixed32Kind:
|
||||||
|
e.WriteUint(val.Uint())
|
||||||
|
|
||||||
|
case pref.Int64Kind, pref.Sint64Kind, pref.Uint64Kind,
|
||||||
|
pref.Sfixed64Kind, pref.Fixed64Kind:
|
||||||
|
// 64-bit integers are written out as JSON string.
|
||||||
|
e.WriteString(val.String())
|
||||||
|
|
||||||
|
case pref.FloatKind:
|
||||||
|
// Encoder.WriteFloat handles the special numbers NaN and infinites.
|
||||||
|
e.WriteFloat(val.Float(), 32)
|
||||||
|
|
||||||
|
case pref.DoubleKind:
|
||||||
|
// Encoder.WriteFloat handles the special numbers NaN and infinites.
|
||||||
|
e.WriteFloat(val.Float(), 64)
|
||||||
|
|
||||||
|
case pref.BytesKind:
|
||||||
|
e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes()))
|
||||||
|
|
||||||
|
case pref.EnumKind:
|
||||||
|
if fd.Enum().FullName() == genid.NullValue_enum_fullname {
|
||||||
|
e.WriteNull()
|
||||||
|
} else {
|
||||||
|
desc := fd.Enum().Values().ByNumber(val.Enum())
|
||||||
|
if e.opts.UseEnumNumbers || desc == nil {
|
||||||
|
e.WriteInt(int64(val.Enum()))
|
||||||
|
} else {
|
||||||
|
e.WriteString(string(desc.Name()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case pref.MessageKind, pref.GroupKind:
|
||||||
|
if err := e.marshalMessage(val.Message(), ""); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalList marshals the given protoreflect.List.
|
||||||
|
func (e encoder) marshalList(list pref.List, fd pref.FieldDescriptor) error {
|
||||||
|
e.StartArray()
|
||||||
|
defer e.EndArray()
|
||||||
|
|
||||||
|
for i := 0; i < list.Len(); i++ {
|
||||||
|
item := list.Get(i)
|
||||||
|
if err := e.marshalSingular(item, fd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalMap marshals given protoreflect.Map.
|
||||||
|
func (e encoder) marshalMap(mmap pref.Map, fd pref.FieldDescriptor) error {
|
||||||
|
e.StartObject()
|
||||||
|
defer e.EndObject()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
order.RangeEntries(mmap, order.GenericKeyOrder, func(k pref.MapKey, v pref.Value) bool {
|
||||||
|
if err = e.WriteName(k.String()); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if err = e.marshalSingular(v, fd.MapValue()); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
889
vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
generated
vendored
Normal file
889
vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
generated
vendored
Normal file
|
@ -0,0 +1,889 @@
|
||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package protojson
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/internal/encoding/json"
|
||||||
|
"google.golang.org/protobuf/internal/errors"
|
||||||
|
"google.golang.org/protobuf/internal/genid"
|
||||||
|
"google.golang.org/protobuf/internal/strs"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
pref "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
type marshalFunc func(encoder, pref.Message) error
|
||||||
|
|
||||||
|
// wellKnownTypeMarshaler returns a marshal function if the message type
|
||||||
|
// has specialized serialization behavior. It returns nil otherwise.
|
||||||
|
func wellKnownTypeMarshaler(name pref.FullName) marshalFunc {
|
||||||
|
if name.Parent() == genid.GoogleProtobuf_package {
|
||||||
|
switch name.Name() {
|
||||||
|
case genid.Any_message_name:
|
||||||
|
return encoder.marshalAny
|
||||||
|
case genid.Timestamp_message_name:
|
||||||
|
return encoder.marshalTimestamp
|
||||||
|
case genid.Duration_message_name:
|
||||||
|
return encoder.marshalDuration
|
||||||
|
case genid.BoolValue_message_name,
|
||||||
|
genid.Int32Value_message_name,
|
||||||
|
genid.Int64Value_message_name,
|
||||||
|
genid.UInt32Value_message_name,
|
||||||
|
genid.UInt64Value_message_name,
|
||||||
|
genid.FloatValue_message_name,
|
||||||
|
genid.DoubleValue_message_name,
|
||||||
|
genid.StringValue_message_name,
|
||||||
|
genid.BytesValue_message_name:
|
||||||
|
return encoder.marshalWrapperType
|
||||||
|
case genid.Struct_message_name:
|
||||||
|
return encoder.marshalStruct
|
||||||
|
case genid.ListValue_message_name:
|
||||||
|
return encoder.marshalListValue
|
||||||
|
case genid.Value_message_name:
|
||||||
|
return encoder.marshalKnownValue
|
||||||
|
case genid.FieldMask_message_name:
|
||||||
|
return encoder.marshalFieldMask
|
||||||
|
case genid.Empty_message_name:
|
||||||
|
return encoder.marshalEmpty
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type unmarshalFunc func(decoder, pref.Message) error
|
||||||
|
|
||||||
|
// wellKnownTypeUnmarshaler returns a unmarshal function if the message type
|
||||||
|
// has specialized serialization behavior. It returns nil otherwise.
|
||||||
|
func wellKnownTypeUnmarshaler(name pref.FullName) unmarshalFunc {
|
||||||
|
if name.Parent() == genid.GoogleProtobuf_package {
|
||||||
|
switch name.Name() {
|
||||||
|
case genid.Any_message_name:
|
||||||
|
return decoder.unmarshalAny
|
||||||
|
case genid.Timestamp_message_name:
|
||||||
|
return decoder.unmarshalTimestamp
|
||||||
|
case genid.Duration_message_name:
|
||||||
|
return decoder.unmarshalDuration
|
||||||
|
case genid.BoolValue_message_name,
|
||||||
|
genid.Int32Value_message_name,
|
||||||
|
genid.Int64Value_message_name,
|
||||||
|
genid.UInt32Value_message_name,
|
||||||
|
genid.UInt64Value_message_name,
|
||||||
|
genid.FloatValue_message_name,
|
||||||
|
genid.DoubleValue_message_name,
|
||||||
|
genid.StringValue_message_name,
|
||||||
|
genid.BytesValue_message_name:
|
||||||
|
return decoder.unmarshalWrapperType
|
||||||
|
case genid.Struct_message_name:
|
||||||
|
return decoder.unmarshalStruct
|
||||||
|
case genid.ListValue_message_name:
|
||||||
|
return decoder.unmarshalListValue
|
||||||
|
case genid.Value_message_name:
|
||||||
|
return decoder.unmarshalKnownValue
|
||||||
|
case genid.FieldMask_message_name:
|
||||||
|
return decoder.unmarshalFieldMask
|
||||||
|
case genid.Empty_message_name:
|
||||||
|
return decoder.unmarshalEmpty
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The JSON representation of an Any message uses the regular representation of
|
||||||
|
// the deserialized, embedded message, with an additional field `@type` which
|
||||||
|
// contains the type URL. If the embedded message type is well-known and has a
|
||||||
|
// custom JSON representation, that representation will be embedded adding a
|
||||||
|
// field `value` which holds the custom JSON in addition to the `@type` field.
|
||||||
|
|
||||||
|
func (e encoder) marshalAny(m pref.Message) error {
|
||||||
|
fds := m.Descriptor().Fields()
|
||||||
|
fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
|
||||||
|
fdValue := fds.ByNumber(genid.Any_Value_field_number)
|
||||||
|
|
||||||
|
if !m.Has(fdType) {
|
||||||
|
if !m.Has(fdValue) {
|
||||||
|
// If message is empty, marshal out empty JSON object.
|
||||||
|
e.StartObject()
|
||||||
|
e.EndObject()
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
// Return error if type_url field is not set, but value is set.
|
||||||
|
return errors.New("%s: %v is not set", genid.Any_message_fullname, genid.Any_TypeUrl_field_name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
typeVal := m.Get(fdType)
|
||||||
|
valueVal := m.Get(fdValue)
|
||||||
|
|
||||||
|
// Resolve the type in order to unmarshal value field.
|
||||||
|
typeURL := typeVal.String()
|
||||||
|
emt, err := e.opts.Resolver.FindMessageByURL(typeURL)
|
||||||
|
if err != nil {
|
||||||
|
return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
em := emt.New()
|
||||||
|
err = proto.UnmarshalOptions{
|
||||||
|
AllowPartial: true, // never check required fields inside an Any
|
||||||
|
Resolver: e.opts.Resolver,
|
||||||
|
}.Unmarshal(valueVal.Bytes(), em.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return errors.New("%s: unable to unmarshal %q: %v", genid.Any_message_fullname, typeURL, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If type of value has custom JSON encoding, marshal out a field "value"
|
||||||
|
// with corresponding custom JSON encoding of the embedded message as a
|
||||||
|
// field.
|
||||||
|
if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil {
|
||||||
|
e.StartObject()
|
||||||
|
defer e.EndObject()
|
||||||
|
|
||||||
|
// Marshal out @type field.
|
||||||
|
e.WriteName("@type")
|
||||||
|
if err := e.WriteString(typeURL); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
e.WriteName("value")
|
||||||
|
return marshal(e, em)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Else, marshal out the embedded message's fields in this Any object.
|
||||||
|
if err := e.marshalMessage(em, typeURL); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d decoder) unmarshalAny(m pref.Message) error {
|
||||||
|
// Peek to check for json.ObjectOpen to avoid advancing a read.
|
||||||
|
start, err := d.Peek()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if start.Kind() != json.ObjectOpen {
|
||||||
|
return d.unexpectedTokenError(start)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use another decoder to parse the unread bytes for @type field. This
|
||||||
|
// avoids advancing a read from current decoder because the current JSON
|
||||||
|
// object may contain the fields of the embedded type.
|
||||||
|
dec := decoder{d.Clone(), UnmarshalOptions{}}
|
||||||
|
tok, err := findTypeURL(dec)
|
||||||
|
switch err {
|
||||||
|
case errEmptyObject:
|
||||||
|
// An empty JSON object translates to an empty Any message.
|
||||||
|
d.Read() // Read json.ObjectOpen.
|
||||||
|
d.Read() // Read json.ObjectClose.
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case errMissingType:
|
||||||
|
if d.opts.DiscardUnknown {
|
||||||
|
// Treat all fields as unknowns, similar to an empty object.
|
||||||
|
return d.skipJSONValue()
|
||||||
|
}
|
||||||
|
// Use start.Pos() for line position.
|
||||||
|
return d.newError(start.Pos(), err.Error())
|
||||||
|
|
||||||
|
default:
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
typeURL := tok.ParsedString()
|
||||||
|
emt, err := d.opts.Resolver.FindMessageByURL(typeURL)
|
||||||
|
if err != nil {
|
||||||
|
return d.newError(tok.Pos(), "unable to resolve %v: %q", tok.RawString(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create new message for the embedded message type and unmarshal into it.
|
||||||
|
em := emt.New()
|
||||||
|
if unmarshal := wellKnownTypeUnmarshaler(emt.Descriptor().FullName()); unmarshal != nil {
|
||||||
|
// If embedded message is a custom type,
|
||||||
|
// unmarshal the JSON "value" field into it.
|
||||||
|
if err := d.unmarshalAnyValue(unmarshal, em); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Else unmarshal the current JSON object into it.
|
||||||
|
if err := d.unmarshalMessage(em, true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Serialize the embedded message and assign the resulting bytes to the
|
||||||
|
// proto value field.
|
||||||
|
b, err := proto.MarshalOptions{
|
||||||
|
AllowPartial: true, // No need to check required fields inside an Any.
|
||||||
|
Deterministic: true,
|
||||||
|
}.Marshal(em.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return d.newError(start.Pos(), "error in marshaling Any.value field: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fds := m.Descriptor().Fields()
|
||||||
|
fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
|
||||||
|
fdValue := fds.ByNumber(genid.Any_Value_field_number)
|
||||||
|
|
||||||
|
m.Set(fdType, pref.ValueOfString(typeURL))
|
||||||
|
m.Set(fdValue, pref.ValueOfBytes(b))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var errEmptyObject = fmt.Errorf(`empty object`)
|
||||||
|
var errMissingType = fmt.Errorf(`missing "@type" field`)
|
||||||
|
|
||||||
|
// findTypeURL returns the token for the "@type" field value from the given
|
||||||
|
// JSON bytes. It is expected that the given bytes start with json.ObjectOpen.
|
||||||
|
// It returns errEmptyObject if the JSON object is empty or errMissingType if
|
||||||
|
// @type field does not exist. It returns other error if the @type field is not
|
||||||
|
// valid or other decoding issues.
|
||||||
|
func findTypeURL(d decoder) (json.Token, error) {
|
||||||
|
var typeURL string
|
||||||
|
var typeTok json.Token
|
||||||
|
numFields := 0
|
||||||
|
// Skip start object.
|
||||||
|
d.Read()
|
||||||
|
|
||||||
|
Loop:
|
||||||
|
for {
|
||||||
|
tok, err := d.Read()
|
||||||
|
if err != nil {
|
||||||
|
return json.Token{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch tok.Kind() {
|
||||||
|
case json.ObjectClose:
|
||||||
|
if typeURL == "" {
|
||||||
|
// Did not find @type field.
|
||||||
|
if numFields > 0 {
|
||||||
|
return json.Token{}, errMissingType
|
||||||
|
}
|
||||||
|
return json.Token{}, errEmptyObject
|
||||||
|
}
|
||||||
|
break Loop
|
||||||
|
|
||||||
|
case json.Name:
|
||||||
|
numFields++
|
||||||
|
if tok.Name() != "@type" {
|
||||||
|
// Skip value.
|
||||||
|
if err := d.skipJSONValue(); err != nil {
|
||||||
|
return json.Token{}, err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return error if this was previously set already.
|
||||||
|
if typeURL != "" {
|
||||||
|
return json.Token{}, d.newError(tok.Pos(), `duplicate "@type" field`)
|
||||||
|
}
|
||||||
|
// Read field value.
|
||||||
|
tok, err := d.Read()
|
||||||
|
if err != nil {
|
||||||
|
return json.Token{}, err
|
||||||
|
}
|
||||||
|
if tok.Kind() != json.String {
|
||||||
|
return json.Token{}, d.newError(tok.Pos(), `@type field value is not a string: %v`, tok.RawString())
|
||||||
|
}
|
||||||
|
typeURL = tok.ParsedString()
|
||||||
|
if typeURL == "" {
|
||||||
|
return json.Token{}, d.newError(tok.Pos(), `@type field contains empty value`)
|
||||||
|
}
|
||||||
|
typeTok = tok
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return typeTok, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// skipJSONValue parses a JSON value (null, boolean, string, number, object and
|
||||||
|
// array) in order to advance the read to the next JSON value. It relies on
|
||||||
|
// the decoder returning an error if the types are not in valid sequence.
|
||||||
|
func (d decoder) skipJSONValue() error {
|
||||||
|
tok, err := d.Read()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Only need to continue reading for objects and arrays.
|
||||||
|
switch tok.Kind() {
|
||||||
|
case json.ObjectOpen:
|
||||||
|
for {
|
||||||
|
tok, err := d.Read()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch tok.Kind() {
|
||||||
|
case json.ObjectClose:
|
||||||
|
return nil
|
||||||
|
case json.Name:
|
||||||
|
// Skip object field value.
|
||||||
|
if err := d.skipJSONValue(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case json.ArrayOpen:
|
||||||
|
for {
|
||||||
|
tok, err := d.Peek()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch tok.Kind() {
|
||||||
|
case json.ArrayClose:
|
||||||
|
d.Read()
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
// Skip array item.
|
||||||
|
if err := d.skipJSONValue(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshalAnyValue unmarshals the given custom-type message from the JSON
|
||||||
|
// object's "value" field.
|
||||||
|
func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m pref.Message) error {
|
||||||
|
// Skip ObjectOpen, and start reading the fields.
|
||||||
|
d.Read()
|
||||||
|
|
||||||
|
var found bool // Used for detecting duplicate "value".
|
||||||
|
for {
|
||||||
|
tok, err := d.Read()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch tok.Kind() {
|
||||||
|
case json.ObjectClose:
|
||||||
|
if !found {
|
||||||
|
return d.newError(tok.Pos(), `missing "value" field`)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case json.Name:
|
||||||
|
switch tok.Name() {
|
||||||
|
case "@type":
|
||||||
|
// Skip the value as this was previously parsed already.
|
||||||
|
d.Read()
|
||||||
|
|
||||||
|
case "value":
|
||||||
|
if found {
|
||||||
|
return d.newError(tok.Pos(), `duplicate "value" field`)
|
||||||
|
}
|
||||||
|
// Unmarshal the field value into the given message.
|
||||||
|
if err := unmarshal(d, m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
found = true
|
||||||
|
|
||||||
|
default:
|
||||||
|
if d.opts.DiscardUnknown {
|
||||||
|
if err := d.skipJSONValue(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wrapper types are encoded as JSON primitives like string, number or boolean.
|
||||||
|
|
||||||
|
func (e encoder) marshalWrapperType(m pref.Message) error {
|
||||||
|
fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number)
|
||||||
|
val := m.Get(fd)
|
||||||
|
return e.marshalSingular(val, fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d decoder) unmarshalWrapperType(m pref.Message) error {
|
||||||
|
fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number)
|
||||||
|
val, err := d.unmarshalScalar(fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.Set(fd, val)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The JSON representation for Empty is an empty JSON object.
|
||||||
|
|
||||||
|
func (e encoder) marshalEmpty(pref.Message) error {
|
||||||
|
e.StartObject()
|
||||||
|
e.EndObject()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d decoder) unmarshalEmpty(pref.Message) error {
|
||||||
|
tok, err := d.Read()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if tok.Kind() != json.ObjectOpen {
|
||||||
|
return d.unexpectedTokenError(tok)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
tok, err := d.Read()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch tok.Kind() {
|
||||||
|
case json.ObjectClose:
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case json.Name:
|
||||||
|
if d.opts.DiscardUnknown {
|
||||||
|
if err := d.skipJSONValue(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
|
||||||
|
|
||||||
|
default:
|
||||||
|
return d.unexpectedTokenError(tok)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The JSON representation for Struct is a JSON object that contains the encoded
|
||||||
|
// Struct.fields map and follows the serialization rules for a map.
|
||||||
|
|
||||||
|
func (e encoder) marshalStruct(m pref.Message) error {
|
||||||
|
fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number)
|
||||||
|
return e.marshalMap(m.Get(fd).Map(), fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d decoder) unmarshalStruct(m pref.Message) error {
|
||||||
|
fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number)
|
||||||
|
return d.unmarshalMap(m.Mutable(fd).Map(), fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The JSON representation for ListValue is JSON array that contains the encoded
|
||||||
|
// ListValue.values repeated field and follows the serialization rules for a
|
||||||
|
// repeated field.
|
||||||
|
|
||||||
|
func (e encoder) marshalListValue(m pref.Message) error {
|
||||||
|
fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number)
|
||||||
|
return e.marshalList(m.Get(fd).List(), fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d decoder) unmarshalListValue(m pref.Message) error {
|
||||||
|
fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number)
|
||||||
|
return d.unmarshalList(m.Mutable(fd).List(), fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The JSON representation for a Value is dependent on the oneof field that is
|
||||||
|
// set. Each of the field in the oneof has its own custom serialization rule. A
|
||||||
|
// Value message needs to be a oneof field set, else it is an error.
|
||||||
|
|
||||||
|
func (e encoder) marshalKnownValue(m pref.Message) error {
|
||||||
|
od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name)
|
||||||
|
fd := m.WhichOneof(od)
|
||||||
|
if fd == nil {
|
||||||
|
return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname)
|
||||||
|
}
|
||||||
|
if fd.Number() == genid.Value_NumberValue_field_number {
|
||||||
|
if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) {
|
||||||
|
return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return e.marshalSingular(m.Get(fd), fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d decoder) unmarshalKnownValue(m pref.Message) error {
|
||||||
|
tok, err := d.Peek()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var fd pref.FieldDescriptor
|
||||||
|
var val pref.Value
|
||||||
|
switch tok.Kind() {
|
||||||
|
case json.Null:
|
||||||
|
d.Read()
|
||||||
|
fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number)
|
||||||
|
val = pref.ValueOfEnum(0)
|
||||||
|
|
||||||
|
case json.Bool:
|
||||||
|
tok, err := d.Read()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number)
|
||||||
|
val = pref.ValueOfBool(tok.Bool())
|
||||||
|
|
||||||
|
case json.Number:
|
||||||
|
tok, err := d.Read()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fd = m.Descriptor().Fields().ByNumber(genid.Value_NumberValue_field_number)
|
||||||
|
var ok bool
|
||||||
|
val, ok = unmarshalFloat(tok, 64)
|
||||||
|
if !ok {
|
||||||
|
return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString())
|
||||||
|
}
|
||||||
|
|
||||||
|
case json.String:
|
||||||
|
// A JSON string may have been encoded from the number_value field,
|
||||||
|
// e.g. "NaN", "Infinity", etc. Parsing a proto double type also allows
|
||||||
|
// for it to be in JSON string form. Given this custom encoding spec,
|
||||||
|
// however, there is no way to identify that and hence a JSON string is
|
||||||
|
// always assigned to the string_value field, which means that certain
|
||||||
|
// encoding cannot be parsed back to the same field.
|
||||||
|
tok, err := d.Read()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number)
|
||||||
|
val = pref.ValueOfString(tok.ParsedString())
|
||||||
|
|
||||||
|
case json.ObjectOpen:
|
||||||
|
fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number)
|
||||||
|
val = m.NewField(fd)
|
||||||
|
if err := d.unmarshalStruct(val.Message()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
case json.ArrayOpen:
|
||||||
|
fd = m.Descriptor().Fields().ByNumber(genid.Value_ListValue_field_number)
|
||||||
|
val = m.NewField(fd)
|
||||||
|
if err := d.unmarshalListValue(val.Message()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString())
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Set(fd, val)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The JSON representation for a Duration is a JSON string that ends in the
|
||||||
|
// suffix "s" (indicating seconds) and is preceded by the number of seconds,
|
||||||
|
// with nanoseconds expressed as fractional seconds.
|
||||||
|
//
|
||||||
|
// Durations less than one second are represented with a 0 seconds field and a
|
||||||
|
// positive or negative nanos field. For durations of one second or more, a
|
||||||
|
// non-zero value for the nanos field must be of the same sign as the seconds
|
||||||
|
// field.
|
||||||
|
//
|
||||||
|
// Duration.seconds must be from -315,576,000,000 to +315,576,000,000 inclusive.
|
||||||
|
// Duration.nanos must be from -999,999,999 to +999,999,999 inclusive.
|
||||||
|
|
||||||
|
const (
|
||||||
|
secondsInNanos = 999999999
|
||||||
|
maxSecondsInDuration = 315576000000
|
||||||
|
)
|
||||||
|
|
||||||
|
func (e encoder) marshalDuration(m pref.Message) error {
|
||||||
|
fds := m.Descriptor().Fields()
|
||||||
|
fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number)
|
||||||
|
fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number)
|
||||||
|
|
||||||
|
secsVal := m.Get(fdSeconds)
|
||||||
|
nanosVal := m.Get(fdNanos)
|
||||||
|
secs := secsVal.Int()
|
||||||
|
nanos := nanosVal.Int()
|
||||||
|
if secs < -maxSecondsInDuration || secs > maxSecondsInDuration {
|
||||||
|
return errors.New("%s: seconds out of range %v", genid.Duration_message_fullname, secs)
|
||||||
|
}
|
||||||
|
if nanos < -secondsInNanos || nanos > secondsInNanos {
|
||||||
|
return errors.New("%s: nanos out of range %v", genid.Duration_message_fullname, nanos)
|
||||||
|
}
|
||||||
|
if (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) {
|
||||||
|
return errors.New("%s: signs of seconds and nanos do not match", genid.Duration_message_fullname)
|
||||||
|
}
|
||||||
|
// Generated output always contains 0, 3, 6, or 9 fractional digits,
|
||||||
|
// depending on required precision, followed by the suffix "s".
|
||||||
|
var sign string
|
||||||
|
if secs < 0 || nanos < 0 {
|
||||||
|
sign, secs, nanos = "-", -1*secs, -1*nanos
|
||||||
|
}
|
||||||
|
x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos)
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
x = strings.TrimSuffix(x, ".000")
|
||||||
|
e.WriteString(x + "s")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d decoder) unmarshalDuration(m pref.Message) error {
|
||||||
|
tok, err := d.Read()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if tok.Kind() != json.String {
|
||||||
|
return d.unexpectedTokenError(tok)
|
||||||
|
}
|
||||||
|
|
||||||
|
secs, nanos, ok := parseDuration(tok.ParsedString())
|
||||||
|
if !ok {
|
||||||
|
return d.newError(tok.Pos(), "invalid %v value %v", genid.Duration_message_fullname, tok.RawString())
|
||||||
|
}
|
||||||
|
// Validate seconds. No need to validate nanos because parseDuration would
|
||||||
|
// have covered that already.
|
||||||
|
if secs < -maxSecondsInDuration || secs > maxSecondsInDuration {
|
||||||
|
return d.newError(tok.Pos(), "%v value out of range: %v", genid.Duration_message_fullname, tok.RawString())
|
||||||
|
}
|
||||||
|
|
||||||
|
fds := m.Descriptor().Fields()
|
||||||
|
fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number)
|
||||||
|
fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number)
|
||||||
|
|
||||||
|
m.Set(fdSeconds, pref.ValueOfInt64(secs))
|
||||||
|
m.Set(fdNanos, pref.ValueOfInt32(nanos))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseDuration parses the given input string for seconds and nanoseconds value
|
||||||
|
// for the Duration JSON format. The format is a decimal number with a suffix
|
||||||
|
// 's'. It can have optional plus/minus sign. There needs to be at least an
|
||||||
|
// integer or fractional part. Fractional part is limited to 9 digits only for
|
||||||
|
// nanoseconds precision, regardless of whether there are trailing zero digits.
|
||||||
|
// Example values are 1s, 0.1s, 1.s, .1s, +1s, -1s, -.1s.
|
||||||
|
func parseDuration(input string) (int64, int32, bool) {
|
||||||
|
b := []byte(input)
|
||||||
|
size := len(b)
|
||||||
|
if size < 2 {
|
||||||
|
return 0, 0, false
|
||||||
|
}
|
||||||
|
if b[size-1] != 's' {
|
||||||
|
return 0, 0, false
|
||||||
|
}
|
||||||
|
b = b[:size-1]
|
||||||
|
|
||||||
|
// Read optional plus/minus symbol.
|
||||||
|
var neg bool
|
||||||
|
switch b[0] {
|
||||||
|
case '-':
|
||||||
|
neg = true
|
||||||
|
b = b[1:]
|
||||||
|
case '+':
|
||||||
|
b = b[1:]
|
||||||
|
}
|
||||||
|
if len(b) == 0 {
|
||||||
|
return 0, 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the integer part.
|
||||||
|
var intp []byte
|
||||||
|
switch {
|
||||||
|
case b[0] == '0':
|
||||||
|
b = b[1:]
|
||||||
|
|
||||||
|
case '1' <= b[0] && b[0] <= '9':
|
||||||
|
intp = b[0:]
|
||||||
|
b = b[1:]
|
||||||
|
n := 1
|
||||||
|
for len(b) > 0 && '0' <= b[0] && b[0] <= '9' {
|
||||||
|
n++
|
||||||
|
b = b[1:]
|
||||||
|
}
|
||||||
|
intp = intp[:n]
|
||||||
|
|
||||||
|
case b[0] == '.':
|
||||||
|
// Continue below.
|
||||||
|
|
||||||
|
default:
|
||||||
|
return 0, 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
hasFrac := false
|
||||||
|
var frac [9]byte
|
||||||
|
if len(b) > 0 {
|
||||||
|
if b[0] != '.' {
|
||||||
|
return 0, 0, false
|
||||||
|
}
|
||||||
|
// Read the fractional part.
|
||||||
|
b = b[1:]
|
||||||
|
n := 0
|
||||||
|
for len(b) > 0 && n < 9 && '0' <= b[0] && b[0] <= '9' {
|
||||||
|
frac[n] = b[0]
|
||||||
|
n++
|
||||||
|
b = b[1:]
|
||||||
|
}
|
||||||
|
// It is not valid if there are more bytes left.
|
||||||
|
if len(b) > 0 {
|
||||||
|
return 0, 0, false
|
||||||
|
}
|
||||||
|
// Pad fractional part with 0s.
|
||||||
|
for i := n; i < 9; i++ {
|
||||||
|
frac[i] = '0'
|
||||||
|
}
|
||||||
|
hasFrac = true
|
||||||
|
}
|
||||||
|
|
||||||
|
var secs int64
|
||||||
|
if len(intp) > 0 {
|
||||||
|
var err error
|
||||||
|
secs, err = strconv.ParseInt(string(intp), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var nanos int64
|
||||||
|
if hasFrac {
|
||||||
|
nanob := bytes.TrimLeft(frac[:], "0")
|
||||||
|
if len(nanob) > 0 {
|
||||||
|
var err error
|
||||||
|
nanos, err = strconv.ParseInt(string(nanob), 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if neg {
|
||||||
|
if secs > 0 {
|
||||||
|
secs = -secs
|
||||||
|
}
|
||||||
|
if nanos > 0 {
|
||||||
|
nanos = -nanos
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return secs, int32(nanos), true
|
||||||
|
}
|
||||||
|
|
||||||
|
// The JSON representation for a Timestamp is a JSON string in the RFC 3339
|
||||||
|
// format, i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" where
|
||||||
|
// {year} is always expressed using four digits while {month}, {day}, {hour},
|
||||||
|
// {min}, and {sec} are zero-padded to two digits each. The fractional seconds,
|
||||||
|
// which can go up to 9 digits, up to 1 nanosecond resolution, is optional. The
|
||||||
|
// "Z" suffix indicates the timezone ("UTC"); the timezone is required. Encoding
|
||||||
|
// should always use UTC (as indicated by "Z") and a decoder should be able to
|
||||||
|
// accept both UTC and other timezones (as indicated by an offset).
|
||||||
|
//
|
||||||
|
// Timestamp.seconds must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z
|
||||||
|
// inclusive.
|
||||||
|
// Timestamp.nanos must be from 0 to 999,999,999 inclusive.
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxTimestampSeconds = 253402300799
|
||||||
|
minTimestampSeconds = -62135596800
|
||||||
|
)
|
||||||
|
|
||||||
|
func (e encoder) marshalTimestamp(m pref.Message) error {
|
||||||
|
fds := m.Descriptor().Fields()
|
||||||
|
fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
|
||||||
|
fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number)
|
||||||
|
|
||||||
|
secsVal := m.Get(fdSeconds)
|
||||||
|
nanosVal := m.Get(fdNanos)
|
||||||
|
secs := secsVal.Int()
|
||||||
|
nanos := nanosVal.Int()
|
||||||
|
if secs < minTimestampSeconds || secs > maxTimestampSeconds {
|
||||||
|
return errors.New("%s: seconds out of range %v", genid.Timestamp_message_fullname, secs)
|
||||||
|
}
|
||||||
|
if nanos < 0 || nanos > secondsInNanos {
|
||||||
|
return errors.New("%s: nanos out of range %v", genid.Timestamp_message_fullname, nanos)
|
||||||
|
}
|
||||||
|
// Uses RFC 3339, where generated output will be Z-normalized and uses 0, 3,
|
||||||
|
// 6 or 9 fractional digits.
|
||||||
|
t := time.Unix(secs, nanos).UTC()
|
||||||
|
x := t.Format("2006-01-02T15:04:05.000000000")
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
x = strings.TrimSuffix(x, ".000")
|
||||||
|
e.WriteString(x + "Z")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d decoder) unmarshalTimestamp(m pref.Message) error {
|
||||||
|
tok, err := d.Read()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if tok.Kind() != json.String {
|
||||||
|
return d.unexpectedTokenError(tok)
|
||||||
|
}
|
||||||
|
|
||||||
|
t, err := time.Parse(time.RFC3339Nano, tok.ParsedString())
|
||||||
|
if err != nil {
|
||||||
|
return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString())
|
||||||
|
}
|
||||||
|
// Validate seconds. No need to validate nanos because time.Parse would have
|
||||||
|
// covered that already.
|
||||||
|
secs := t.Unix()
|
||||||
|
if secs < minTimestampSeconds || secs > maxTimestampSeconds {
|
||||||
|
return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString())
|
||||||
|
}
|
||||||
|
|
||||||
|
fds := m.Descriptor().Fields()
|
||||||
|
fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
|
||||||
|
fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number)
|
||||||
|
|
||||||
|
m.Set(fdSeconds, pref.ValueOfInt64(secs))
|
||||||
|
m.Set(fdNanos, pref.ValueOfInt32(int32(t.Nanosecond())))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The JSON representation for a FieldMask is a JSON string where paths are
|
||||||
|
// separated by a comma. Fields name in each path are converted to/from
|
||||||
|
// lower-camel naming conventions. Encoding should fail if the path name would
|
||||||
|
// end up differently after a round-trip.
|
||||||
|
|
||||||
|
func (e encoder) marshalFieldMask(m pref.Message) error {
|
||||||
|
fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number)
|
||||||
|
list := m.Get(fd).List()
|
||||||
|
paths := make([]string, 0, list.Len())
|
||||||
|
|
||||||
|
for i := 0; i < list.Len(); i++ {
|
||||||
|
s := list.Get(i).String()
|
||||||
|
if !pref.FullName(s).IsValid() {
|
||||||
|
return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s)
|
||||||
|
}
|
||||||
|
// Return error if conversion to camelCase is not reversible.
|
||||||
|
cc := strs.JSONCamelCase(s)
|
||||||
|
if s != strs.JSONSnakeCase(cc) {
|
||||||
|
return errors.New("%s contains irreversible value %q", genid.FieldMask_Paths_field_fullname, s)
|
||||||
|
}
|
||||||
|
paths = append(paths, cc)
|
||||||
|
}
|
||||||
|
|
||||||
|
e.WriteString(strings.Join(paths, ","))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d decoder) unmarshalFieldMask(m pref.Message) error {
|
||||||
|
tok, err := d.Read()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if tok.Kind() != json.String {
|
||||||
|
return d.unexpectedTokenError(tok)
|
||||||
|
}
|
||||||
|
str := strings.TrimSpace(tok.ParsedString())
|
||||||
|
if str == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
paths := strings.Split(str, ",")
|
||||||
|
|
||||||
|
fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number)
|
||||||
|
list := m.Mutable(fd).List()
|
||||||
|
|
||||||
|
for _, s0 := range paths {
|
||||||
|
s := strs.JSONSnakeCase(s0)
|
||||||
|
if strings.Contains(s0, "_") || !pref.FullName(s).IsValid() {
|
||||||
|
return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0)
|
||||||
|
}
|
||||||
|
list.Append(pref.ValueOfString(s))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -25,6 +25,7 @@ const (
|
||||||
FirstReservedNumber Number = 19000
|
FirstReservedNumber Number = 19000
|
||||||
LastReservedNumber Number = 19999
|
LastReservedNumber Number = 19999
|
||||||
MaxValidNumber Number = 1<<29 - 1
|
MaxValidNumber Number = 1<<29 - 1
|
||||||
|
DefaultRecursionLimit = 10000
|
||||||
)
|
)
|
||||||
|
|
||||||
// IsValid reports whether the field number is semantically valid.
|
// IsValid reports whether the field number is semantically valid.
|
||||||
|
@ -55,6 +56,7 @@ const (
|
||||||
errCodeOverflow
|
errCodeOverflow
|
||||||
errCodeReserved
|
errCodeReserved
|
||||||
errCodeEndGroup
|
errCodeEndGroup
|
||||||
|
errCodeRecursionDepth
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -112,6 +114,10 @@ func ConsumeField(b []byte) (Number, Type, int) {
|
||||||
// When parsing a group, the length includes the end group marker and
|
// When parsing a group, the length includes the end group marker and
|
||||||
// the end group is verified to match the starting field number.
|
// the end group is verified to match the starting field number.
|
||||||
func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) {
|
func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) {
|
||||||
|
return consumeFieldValueD(num, typ, b, DefaultRecursionLimit)
|
||||||
|
}
|
||||||
|
|
||||||
|
func consumeFieldValueD(num Number, typ Type, b []byte, depth int) (n int) {
|
||||||
switch typ {
|
switch typ {
|
||||||
case VarintType:
|
case VarintType:
|
||||||
_, n = ConsumeVarint(b)
|
_, n = ConsumeVarint(b)
|
||||||
|
@ -126,6 +132,9 @@ func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) {
|
||||||
_, n = ConsumeBytes(b)
|
_, n = ConsumeBytes(b)
|
||||||
return n
|
return n
|
||||||
case StartGroupType:
|
case StartGroupType:
|
||||||
|
if depth < 0 {
|
||||||
|
return errCodeRecursionDepth
|
||||||
|
}
|
||||||
n0 := len(b)
|
n0 := len(b)
|
||||||
for {
|
for {
|
||||||
num2, typ2, n := ConsumeTag(b)
|
num2, typ2, n := ConsumeTag(b)
|
||||||
|
@ -140,7 +149,7 @@ func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) {
|
||||||
return n0 - len(b)
|
return n0 - len(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
n = ConsumeFieldValue(num2, typ2, b)
|
n = consumeFieldValueD(num2, typ2, b, depth-1)
|
||||||
if n < 0 {
|
if n < 0 {
|
||||||
return n // forward error code
|
return n // forward error code
|
||||||
}
|
}
|
||||||
|
|
340
vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
generated
vendored
Normal file
340
vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
generated
vendored
Normal file
|
@ -0,0 +1,340 @@
|
||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package json
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/internal/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// call specifies which Decoder method was invoked.
|
||||||
|
type call uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
readCall call = iota
|
||||||
|
peekCall
|
||||||
|
)
|
||||||
|
|
||||||
|
const unexpectedFmt = "unexpected token %s"
|
||||||
|
|
||||||
|
// ErrUnexpectedEOF means that EOF was encountered in the middle of the input.
|
||||||
|
var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF)
|
||||||
|
|
||||||
|
// Decoder is a token-based JSON decoder.
|
||||||
|
type Decoder struct {
|
||||||
|
// lastCall is last method called, either readCall or peekCall.
|
||||||
|
// Initial value is readCall.
|
||||||
|
lastCall call
|
||||||
|
|
||||||
|
// lastToken contains the last read token.
|
||||||
|
lastToken Token
|
||||||
|
|
||||||
|
// lastErr contains the last read error.
|
||||||
|
lastErr error
|
||||||
|
|
||||||
|
// openStack is a stack containing ObjectOpen and ArrayOpen values. The
|
||||||
|
// top of stack represents the object or the array the current value is
|
||||||
|
// directly located in.
|
||||||
|
openStack []Kind
|
||||||
|
|
||||||
|
// orig is used in reporting line and column.
|
||||||
|
orig []byte
|
||||||
|
// in contains the unconsumed input.
|
||||||
|
in []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder returns a Decoder to read the given []byte.
|
||||||
|
func NewDecoder(b []byte) *Decoder {
|
||||||
|
return &Decoder{orig: b, in: b}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Peek looks ahead and returns the next token kind without advancing a read.
|
||||||
|
func (d *Decoder) Peek() (Token, error) {
|
||||||
|
defer func() { d.lastCall = peekCall }()
|
||||||
|
if d.lastCall == readCall {
|
||||||
|
d.lastToken, d.lastErr = d.Read()
|
||||||
|
}
|
||||||
|
return d.lastToken, d.lastErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read returns the next JSON token.
|
||||||
|
// It will return an error if there is no valid token.
|
||||||
|
func (d *Decoder) Read() (Token, error) {
|
||||||
|
const scalar = Null | Bool | Number | String
|
||||||
|
|
||||||
|
defer func() { d.lastCall = readCall }()
|
||||||
|
if d.lastCall == peekCall {
|
||||||
|
return d.lastToken, d.lastErr
|
||||||
|
}
|
||||||
|
|
||||||
|
tok, err := d.parseNext()
|
||||||
|
if err != nil {
|
||||||
|
return Token{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch tok.kind {
|
||||||
|
case EOF:
|
||||||
|
if len(d.openStack) != 0 ||
|
||||||
|
d.lastToken.kind&scalar|ObjectClose|ArrayClose == 0 {
|
||||||
|
return Token{}, ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
|
||||||
|
case Null:
|
||||||
|
if !d.isValueNext() {
|
||||||
|
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
|
||||||
|
}
|
||||||
|
|
||||||
|
case Bool, Number:
|
||||||
|
if !d.isValueNext() {
|
||||||
|
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
|
||||||
|
}
|
||||||
|
|
||||||
|
case String:
|
||||||
|
if d.isValueNext() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// This string token should only be for a field name.
|
||||||
|
if d.lastToken.kind&(ObjectOpen|comma) == 0 {
|
||||||
|
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
|
||||||
|
}
|
||||||
|
if len(d.in) == 0 {
|
||||||
|
return Token{}, ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if c := d.in[0]; c != ':' {
|
||||||
|
return Token{}, d.newSyntaxError(d.currPos(), `unexpected character %s, missing ":" after field name`, string(c))
|
||||||
|
}
|
||||||
|
tok.kind = Name
|
||||||
|
d.consume(1)
|
||||||
|
|
||||||
|
case ObjectOpen, ArrayOpen:
|
||||||
|
if !d.isValueNext() {
|
||||||
|
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
|
||||||
|
}
|
||||||
|
d.openStack = append(d.openStack, tok.kind)
|
||||||
|
|
||||||
|
case ObjectClose:
|
||||||
|
if len(d.openStack) == 0 ||
|
||||||
|
d.lastToken.kind == comma ||
|
||||||
|
d.openStack[len(d.openStack)-1] != ObjectOpen {
|
||||||
|
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
|
||||||
|
}
|
||||||
|
d.openStack = d.openStack[:len(d.openStack)-1]
|
||||||
|
|
||||||
|
case ArrayClose:
|
||||||
|
if len(d.openStack) == 0 ||
|
||||||
|
d.lastToken.kind == comma ||
|
||||||
|
d.openStack[len(d.openStack)-1] != ArrayOpen {
|
||||||
|
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
|
||||||
|
}
|
||||||
|
d.openStack = d.openStack[:len(d.openStack)-1]
|
||||||
|
|
||||||
|
case comma:
|
||||||
|
if len(d.openStack) == 0 ||
|
||||||
|
d.lastToken.kind&(scalar|ObjectClose|ArrayClose) == 0 {
|
||||||
|
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update d.lastToken only after validating token to be in the right sequence.
|
||||||
|
d.lastToken = tok
|
||||||
|
|
||||||
|
if d.lastToken.kind == comma {
|
||||||
|
return d.Read()
|
||||||
|
}
|
||||||
|
return tok, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Any sequence that looks like a non-delimiter (for error reporting).
|
||||||
|
var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9]{1,32}|.)`)
|
||||||
|
|
||||||
|
// parseNext parses for the next JSON token. It returns a Token object for
|
||||||
|
// different types, except for Name. It does not handle whether the next token
|
||||||
|
// is in a valid sequence or not.
|
||||||
|
func (d *Decoder) parseNext() (Token, error) {
|
||||||
|
// Trim leading spaces.
|
||||||
|
d.consume(0)
|
||||||
|
|
||||||
|
in := d.in
|
||||||
|
if len(in) == 0 {
|
||||||
|
return d.consumeToken(EOF, 0), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch in[0] {
|
||||||
|
case 'n':
|
||||||
|
if n := matchWithDelim("null", in); n != 0 {
|
||||||
|
return d.consumeToken(Null, n), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case 't':
|
||||||
|
if n := matchWithDelim("true", in); n != 0 {
|
||||||
|
return d.consumeBoolToken(true, n), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case 'f':
|
||||||
|
if n := matchWithDelim("false", in); n != 0 {
|
||||||
|
return d.consumeBoolToken(false, n), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||||
|
if n, ok := parseNumber(in); ok {
|
||||||
|
return d.consumeToken(Number, n), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case '"':
|
||||||
|
s, n, err := d.parseString(in)
|
||||||
|
if err != nil {
|
||||||
|
return Token{}, err
|
||||||
|
}
|
||||||
|
return d.consumeStringToken(s, n), nil
|
||||||
|
|
||||||
|
case '{':
|
||||||
|
return d.consumeToken(ObjectOpen, 1), nil
|
||||||
|
|
||||||
|
case '}':
|
||||||
|
return d.consumeToken(ObjectClose, 1), nil
|
||||||
|
|
||||||
|
case '[':
|
||||||
|
return d.consumeToken(ArrayOpen, 1), nil
|
||||||
|
|
||||||
|
case ']':
|
||||||
|
return d.consumeToken(ArrayClose, 1), nil
|
||||||
|
|
||||||
|
case ',':
|
||||||
|
return d.consumeToken(comma, 1), nil
|
||||||
|
}
|
||||||
|
return Token{}, d.newSyntaxError(d.currPos(), "invalid value %s", errRegexp.Find(in))
|
||||||
|
}
|
||||||
|
|
||||||
|
// newSyntaxError returns an error with line and column information useful for
|
||||||
|
// syntax errors.
|
||||||
|
func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error {
|
||||||
|
e := errors.New(f, x...)
|
||||||
|
line, column := d.Position(pos)
|
||||||
|
return errors.New("syntax error (line %d:%d): %v", line, column, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Position returns line and column number of given index of the original input.
|
||||||
|
// It will panic if index is out of range.
|
||||||
|
func (d *Decoder) Position(idx int) (line int, column int) {
|
||||||
|
b := d.orig[:idx]
|
||||||
|
line = bytes.Count(b, []byte("\n")) + 1
|
||||||
|
if i := bytes.LastIndexByte(b, '\n'); i >= 0 {
|
||||||
|
b = b[i+1:]
|
||||||
|
}
|
||||||
|
column = utf8.RuneCount(b) + 1 // ignore multi-rune characters
|
||||||
|
return line, column
|
||||||
|
}
|
||||||
|
|
||||||
|
// currPos returns the current index position of d.in from d.orig.
|
||||||
|
func (d *Decoder) currPos() int {
|
||||||
|
return len(d.orig) - len(d.in)
|
||||||
|
}
|
||||||
|
|
||||||
|
// matchWithDelim matches s with the input b and verifies that the match
|
||||||
|
// terminates with a delimiter of some form (e.g., r"[^-+_.a-zA-Z0-9]").
|
||||||
|
// As a special case, EOF is considered a delimiter. It returns the length of s
|
||||||
|
// if there is a match, else 0.
|
||||||
|
func matchWithDelim(s string, b []byte) int {
|
||||||
|
if !bytes.HasPrefix(b, []byte(s)) {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
n := len(s)
|
||||||
|
if n < len(b) && isNotDelim(b[n]) {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// isNotDelim returns true if given byte is a not delimiter character.
|
||||||
|
func isNotDelim(c byte) bool {
|
||||||
|
return (c == '-' || c == '+' || c == '.' || c == '_' ||
|
||||||
|
('a' <= c && c <= 'z') ||
|
||||||
|
('A' <= c && c <= 'Z') ||
|
||||||
|
('0' <= c && c <= '9'))
|
||||||
|
}
|
||||||
|
|
||||||
|
// consume consumes n bytes of input and any subsequent whitespace.
|
||||||
|
func (d *Decoder) consume(n int) {
|
||||||
|
d.in = d.in[n:]
|
||||||
|
for len(d.in) > 0 {
|
||||||
|
switch d.in[0] {
|
||||||
|
case ' ', '\n', '\r', '\t':
|
||||||
|
d.in = d.in[1:]
|
||||||
|
default:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// isValueNext returns true if next type should be a JSON value: Null,
|
||||||
|
// Number, String or Bool.
|
||||||
|
func (d *Decoder) isValueNext() bool {
|
||||||
|
if len(d.openStack) == 0 {
|
||||||
|
return d.lastToken.kind == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
start := d.openStack[len(d.openStack)-1]
|
||||||
|
switch start {
|
||||||
|
case ObjectOpen:
|
||||||
|
return d.lastToken.kind&Name != 0
|
||||||
|
case ArrayOpen:
|
||||||
|
return d.lastToken.kind&(ArrayOpen|comma) != 0
|
||||||
|
}
|
||||||
|
panic(fmt.Sprintf(
|
||||||
|
"unreachable logic in Decoder.isValueNext, lastToken.kind: %v, openStack: %v",
|
||||||
|
d.lastToken.kind, start))
|
||||||
|
}
|
||||||
|
|
||||||
|
// consumeToken constructs a Token for given Kind with raw value derived from
|
||||||
|
// current d.in and given size, and consumes the given size-lenght of it.
|
||||||
|
func (d *Decoder) consumeToken(kind Kind, size int) Token {
|
||||||
|
tok := Token{
|
||||||
|
kind: kind,
|
||||||
|
raw: d.in[:size],
|
||||||
|
pos: len(d.orig) - len(d.in),
|
||||||
|
}
|
||||||
|
d.consume(size)
|
||||||
|
return tok
|
||||||
|
}
|
||||||
|
|
||||||
|
// consumeBoolToken constructs a Token for a Bool kind with raw value derived from
|
||||||
|
// current d.in and given size.
|
||||||
|
func (d *Decoder) consumeBoolToken(b bool, size int) Token {
|
||||||
|
tok := Token{
|
||||||
|
kind: Bool,
|
||||||
|
raw: d.in[:size],
|
||||||
|
pos: len(d.orig) - len(d.in),
|
||||||
|
boo: b,
|
||||||
|
}
|
||||||
|
d.consume(size)
|
||||||
|
return tok
|
||||||
|
}
|
||||||
|
|
||||||
|
// consumeStringToken constructs a Token for a String kind with raw value derived
|
||||||
|
// from current d.in and given size.
|
||||||
|
func (d *Decoder) consumeStringToken(s string, size int) Token {
|
||||||
|
tok := Token{
|
||||||
|
kind: String,
|
||||||
|
raw: d.in[:size],
|
||||||
|
pos: len(d.orig) - len(d.in),
|
||||||
|
str: s,
|
||||||
|
}
|
||||||
|
d.consume(size)
|
||||||
|
return tok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a copy of the Decoder for use in reading ahead the next JSON
|
||||||
|
// object, array or other values without affecting current Decoder.
|
||||||
|
func (d *Decoder) Clone() *Decoder {
|
||||||
|
ret := *d
|
||||||
|
ret.openStack = append([]Kind(nil), ret.openStack...)
|
||||||
|
return &ret
|
||||||
|
}
|
254
vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go
generated
vendored
Normal file
254
vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go
generated
vendored
Normal file
|
@ -0,0 +1,254 @@
|
||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package json
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// parseNumber reads the given []byte for a valid JSON number. If it is valid,
|
||||||
|
// it returns the number of bytes. Parsing logic follows the definition in
|
||||||
|
// https://tools.ietf.org/html/rfc7159#section-6, and is based off
|
||||||
|
// encoding/json.isValidNumber function.
|
||||||
|
func parseNumber(input []byte) (int, bool) {
|
||||||
|
var n int
|
||||||
|
|
||||||
|
s := input
|
||||||
|
if len(s) == 0 {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optional -
|
||||||
|
if s[0] == '-' {
|
||||||
|
s = s[1:]
|
||||||
|
n++
|
||||||
|
if len(s) == 0 {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Digits
|
||||||
|
switch {
|
||||||
|
case s[0] == '0':
|
||||||
|
s = s[1:]
|
||||||
|
n++
|
||||||
|
|
||||||
|
case '1' <= s[0] && s[0] <= '9':
|
||||||
|
s = s[1:]
|
||||||
|
n++
|
||||||
|
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
|
||||||
|
s = s[1:]
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// . followed by 1 or more digits.
|
||||||
|
if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
|
||||||
|
s = s[2:]
|
||||||
|
n += 2
|
||||||
|
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
|
||||||
|
s = s[1:]
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// e or E followed by an optional - or + and
|
||||||
|
// 1 or more digits.
|
||||||
|
if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
|
||||||
|
s = s[1:]
|
||||||
|
n++
|
||||||
|
if s[0] == '+' || s[0] == '-' {
|
||||||
|
s = s[1:]
|
||||||
|
n++
|
||||||
|
if len(s) == 0 {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
|
||||||
|
s = s[1:]
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that next byte is a delimiter or it is at the end.
|
||||||
|
if n < len(input) && isNotDelim(input[n]) {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// numberParts is the result of parsing out a valid JSON number. It contains
|
||||||
|
// the parts of a number. The parts are used for integer conversion.
|
||||||
|
type numberParts struct {
|
||||||
|
neg bool
|
||||||
|
intp []byte
|
||||||
|
frac []byte
|
||||||
|
exp []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseNumber constructs numberParts from given []byte. The logic here is
|
||||||
|
// similar to consumeNumber above with the difference of having to construct
|
||||||
|
// numberParts. The slice fields in numberParts are subslices of the input.
|
||||||
|
func parseNumberParts(input []byte) (numberParts, bool) {
|
||||||
|
var neg bool
|
||||||
|
var intp []byte
|
||||||
|
var frac []byte
|
||||||
|
var exp []byte
|
||||||
|
|
||||||
|
s := input
|
||||||
|
if len(s) == 0 {
|
||||||
|
return numberParts{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optional -
|
||||||
|
if s[0] == '-' {
|
||||||
|
neg = true
|
||||||
|
s = s[1:]
|
||||||
|
if len(s) == 0 {
|
||||||
|
return numberParts{}, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Digits
|
||||||
|
switch {
|
||||||
|
case s[0] == '0':
|
||||||
|
// Skip first 0 and no need to store.
|
||||||
|
s = s[1:]
|
||||||
|
|
||||||
|
case '1' <= s[0] && s[0] <= '9':
|
||||||
|
intp = s
|
||||||
|
n := 1
|
||||||
|
s = s[1:]
|
||||||
|
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
|
||||||
|
s = s[1:]
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
intp = intp[:n]
|
||||||
|
|
||||||
|
default:
|
||||||
|
return numberParts{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// . followed by 1 or more digits.
|
||||||
|
if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
|
||||||
|
frac = s[1:]
|
||||||
|
n := 1
|
||||||
|
s = s[2:]
|
||||||
|
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
|
||||||
|
s = s[1:]
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
frac = frac[:n]
|
||||||
|
}
|
||||||
|
|
||||||
|
// e or E followed by an optional - or + and
|
||||||
|
// 1 or more digits.
|
||||||
|
if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
|
||||||
|
s = s[1:]
|
||||||
|
exp = s
|
||||||
|
n := 0
|
||||||
|
if s[0] == '+' || s[0] == '-' {
|
||||||
|
s = s[1:]
|
||||||
|
n++
|
||||||
|
if len(s) == 0 {
|
||||||
|
return numberParts{}, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
|
||||||
|
s = s[1:]
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
exp = exp[:n]
|
||||||
|
}
|
||||||
|
|
||||||
|
return numberParts{
|
||||||
|
neg: neg,
|
||||||
|
intp: intp,
|
||||||
|
frac: bytes.TrimRight(frac, "0"), // Remove unnecessary 0s to the right.
|
||||||
|
exp: exp,
|
||||||
|
}, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// normalizeToIntString returns an integer string in normal form without the
|
||||||
|
// E-notation for given numberParts. It will return false if it is not an
|
||||||
|
// integer or if the exponent exceeds than max/min int value.
|
||||||
|
func normalizeToIntString(n numberParts) (string, bool) {
|
||||||
|
intpSize := len(n.intp)
|
||||||
|
fracSize := len(n.frac)
|
||||||
|
|
||||||
|
if intpSize == 0 && fracSize == 0 {
|
||||||
|
return "0", true
|
||||||
|
}
|
||||||
|
|
||||||
|
var exp int
|
||||||
|
if len(n.exp) > 0 {
|
||||||
|
i, err := strconv.ParseInt(string(n.exp), 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
exp = int(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
var num []byte
|
||||||
|
if exp >= 0 {
|
||||||
|
// For positive E, shift fraction digits into integer part and also pad
|
||||||
|
// with zeroes as needed.
|
||||||
|
|
||||||
|
// If there are more digits in fraction than the E value, then the
|
||||||
|
// number is not an integer.
|
||||||
|
if fracSize > exp {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure resulting digits are within max value limit to avoid
|
||||||
|
// unnecessarily constructing a large byte slice that may simply fail
|
||||||
|
// later on.
|
||||||
|
const maxDigits = 20 // Max uint64 value has 20 decimal digits.
|
||||||
|
if intpSize+exp > maxDigits {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set cap to make a copy of integer part when appended.
|
||||||
|
num = n.intp[:len(n.intp):len(n.intp)]
|
||||||
|
num = append(num, n.frac...)
|
||||||
|
for i := 0; i < exp-fracSize; i++ {
|
||||||
|
num = append(num, '0')
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// For negative E, shift digits in integer part out.
|
||||||
|
|
||||||
|
// If there are fractions, then the number is not an integer.
|
||||||
|
if fracSize > 0 {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
// index is where the decimal point will be after adjusting for negative
|
||||||
|
// exponent.
|
||||||
|
index := intpSize + exp
|
||||||
|
if index < 0 {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
num = n.intp
|
||||||
|
// If any of the digits being shifted to the right of the decimal point
|
||||||
|
// is non-zero, then the number is not an integer.
|
||||||
|
for i := index; i < intpSize; i++ {
|
||||||
|
if num[i] != '0' {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
num = num[:index]
|
||||||
|
}
|
||||||
|
|
||||||
|
if n.neg {
|
||||||
|
return "-" + string(num), true
|
||||||
|
}
|
||||||
|
return string(num), true
|
||||||
|
}
|
91
vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go
generated
vendored
Normal file
91
vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package json
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf16"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/internal/strs"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (d *Decoder) parseString(in []byte) (string, int, error) {
|
||||||
|
in0 := in
|
||||||
|
if len(in) == 0 {
|
||||||
|
return "", 0, ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if in[0] != '"' {
|
||||||
|
return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q at start of string", in[0])
|
||||||
|
}
|
||||||
|
in = in[1:]
|
||||||
|
i := indexNeedEscapeInBytes(in)
|
||||||
|
in, out := in[i:], in[:i:i] // set cap to prevent mutations
|
||||||
|
for len(in) > 0 {
|
||||||
|
switch r, n := utf8.DecodeRune(in); {
|
||||||
|
case r == utf8.RuneError && n == 1:
|
||||||
|
return "", 0, d.newSyntaxError(d.currPos(), "invalid UTF-8 in string")
|
||||||
|
case r < ' ':
|
||||||
|
return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q in string", r)
|
||||||
|
case r == '"':
|
||||||
|
in = in[1:]
|
||||||
|
n := len(in0) - len(in)
|
||||||
|
return string(out), n, nil
|
||||||
|
case r == '\\':
|
||||||
|
if len(in) < 2 {
|
||||||
|
return "", 0, ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
switch r := in[1]; r {
|
||||||
|
case '"', '\\', '/':
|
||||||
|
in, out = in[2:], append(out, r)
|
||||||
|
case 'b':
|
||||||
|
in, out = in[2:], append(out, '\b')
|
||||||
|
case 'f':
|
||||||
|
in, out = in[2:], append(out, '\f')
|
||||||
|
case 'n':
|
||||||
|
in, out = in[2:], append(out, '\n')
|
||||||
|
case 'r':
|
||||||
|
in, out = in[2:], append(out, '\r')
|
||||||
|
case 't':
|
||||||
|
in, out = in[2:], append(out, '\t')
|
||||||
|
case 'u':
|
||||||
|
if len(in) < 6 {
|
||||||
|
return "", 0, ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
v, err := strconv.ParseUint(string(in[2:6]), 16, 16)
|
||||||
|
if err != nil {
|
||||||
|
return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6])
|
||||||
|
}
|
||||||
|
in = in[6:]
|
||||||
|
|
||||||
|
r := rune(v)
|
||||||
|
if utf16.IsSurrogate(r) {
|
||||||
|
if len(in) < 6 {
|
||||||
|
return "", 0, ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
v, err := strconv.ParseUint(string(in[2:6]), 16, 16)
|
||||||
|
r = utf16.DecodeRune(r, rune(v))
|
||||||
|
if in[0] != '\\' || in[1] != 'u' ||
|
||||||
|
r == unicode.ReplacementChar || err != nil {
|
||||||
|
return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6])
|
||||||
|
}
|
||||||
|
in = in[6:]
|
||||||
|
}
|
||||||
|
out = append(out, string(r)...)
|
||||||
|
default:
|
||||||
|
return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:2])
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
i := indexNeedEscapeInBytes(in[n:])
|
||||||
|
in, out = in[n+i:], append(out, in[:n+i]...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", 0, ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
|
||||||
|
// indexNeedEscapeInBytes returns the index of the character that needs
|
||||||
|
// escaping. If no characters need escaping, this returns the input length.
|
||||||
|
func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) }
|
192
vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go
generated
vendored
Normal file
192
vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go
generated
vendored
Normal file
|
@ -0,0 +1,192 @@
|
||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package json
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Kind represents a token kind expressible in the JSON format.
|
||||||
|
type Kind uint16
|
||||||
|
|
||||||
|
const (
|
||||||
|
Invalid Kind = (1 << iota) / 2
|
||||||
|
EOF
|
||||||
|
Null
|
||||||
|
Bool
|
||||||
|
Number
|
||||||
|
String
|
||||||
|
Name
|
||||||
|
ObjectOpen
|
||||||
|
ObjectClose
|
||||||
|
ArrayOpen
|
||||||
|
ArrayClose
|
||||||
|
|
||||||
|
// comma is only for parsing in between tokens and
|
||||||
|
// does not need to be exported.
|
||||||
|
comma
|
||||||
|
)
|
||||||
|
|
||||||
|
func (k Kind) String() string {
|
||||||
|
switch k {
|
||||||
|
case EOF:
|
||||||
|
return "eof"
|
||||||
|
case Null:
|
||||||
|
return "null"
|
||||||
|
case Bool:
|
||||||
|
return "bool"
|
||||||
|
case Number:
|
||||||
|
return "number"
|
||||||
|
case String:
|
||||||
|
return "string"
|
||||||
|
case ObjectOpen:
|
||||||
|
return "{"
|
||||||
|
case ObjectClose:
|
||||||
|
return "}"
|
||||||
|
case Name:
|
||||||
|
return "name"
|
||||||
|
case ArrayOpen:
|
||||||
|
return "["
|
||||||
|
case ArrayClose:
|
||||||
|
return "]"
|
||||||
|
case comma:
|
||||||
|
return ","
|
||||||
|
}
|
||||||
|
return "<invalid>"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Token provides a parsed token kind and value.
|
||||||
|
//
|
||||||
|
// Values are provided by the difference accessor methods. The accessor methods
|
||||||
|
// Name, Bool, and ParsedString will panic if called on the wrong kind. There
|
||||||
|
// are different accessor methods for the Number kind for converting to the
|
||||||
|
// appropriate Go numeric type and those methods have the ok return value.
|
||||||
|
type Token struct {
|
||||||
|
// Token kind.
|
||||||
|
kind Kind
|
||||||
|
// pos provides the position of the token in the original input.
|
||||||
|
pos int
|
||||||
|
// raw bytes of the serialized token.
|
||||||
|
// This is a subslice into the original input.
|
||||||
|
raw []byte
|
||||||
|
// boo is parsed boolean value.
|
||||||
|
boo bool
|
||||||
|
// str is parsed string value.
|
||||||
|
str string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kind returns the token kind.
|
||||||
|
func (t Token) Kind() Kind {
|
||||||
|
return t.kind
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawString returns the read value in string.
|
||||||
|
func (t Token) RawString() string {
|
||||||
|
return string(t.raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pos returns the token position from the input.
|
||||||
|
func (t Token) Pos() int {
|
||||||
|
return t.pos
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the object name if token is Name, else it panics.
|
||||||
|
func (t Token) Name() string {
|
||||||
|
if t.kind == Name {
|
||||||
|
return t.str
|
||||||
|
}
|
||||||
|
panic(fmt.Sprintf("Token is not a Name: %v", t.RawString()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bool returns the bool value if token kind is Bool, else it panics.
|
||||||
|
func (t Token) Bool() bool {
|
||||||
|
if t.kind == Bool {
|
||||||
|
return t.boo
|
||||||
|
}
|
||||||
|
panic(fmt.Sprintf("Token is not a Bool: %v", t.RawString()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParsedString returns the string value for a JSON string token or the read
|
||||||
|
// value in string if token is not a string.
|
||||||
|
func (t Token) ParsedString() string {
|
||||||
|
if t.kind == String {
|
||||||
|
return t.str
|
||||||
|
}
|
||||||
|
panic(fmt.Sprintf("Token is not a String: %v", t.RawString()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float returns the floating-point number if token kind is Number.
|
||||||
|
//
|
||||||
|
// The floating-point precision is specified by the bitSize parameter: 32 for
|
||||||
|
// float32 or 64 for float64. If bitSize=32, the result still has type float64,
|
||||||
|
// but it will be convertible to float32 without changing its value. It will
|
||||||
|
// return false if the number exceeds the floating point limits for given
|
||||||
|
// bitSize.
|
||||||
|
func (t Token) Float(bitSize int) (float64, bool) {
|
||||||
|
if t.kind != Number {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
f, err := strconv.ParseFloat(t.RawString(), bitSize)
|
||||||
|
if err != nil {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
return f, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int returns the signed integer number if token is Number.
|
||||||
|
//
|
||||||
|
// The given bitSize specifies the integer type that the result must fit into.
|
||||||
|
// It returns false if the number is not an integer value or if the result
|
||||||
|
// exceeds the limits for given bitSize.
|
||||||
|
func (t Token) Int(bitSize int) (int64, bool) {
|
||||||
|
s, ok := t.getIntStr()
|
||||||
|
if !ok {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
n, err := strconv.ParseInt(s, 10, bitSize)
|
||||||
|
if err != nil {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
return n, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint returns the signed integer number if token is Number.
|
||||||
|
//
|
||||||
|
// The given bitSize specifies the unsigned integer type that the result must
|
||||||
|
// fit into. It returns false if the number is not an unsigned integer value
|
||||||
|
// or if the result exceeds the limits for given bitSize.
|
||||||
|
func (t Token) Uint(bitSize int) (uint64, bool) {
|
||||||
|
s, ok := t.getIntStr()
|
||||||
|
if !ok {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
n, err := strconv.ParseUint(s, 10, bitSize)
|
||||||
|
if err != nil {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
return n, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t Token) getIntStr() (string, bool) {
|
||||||
|
if t.kind != Number {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
parts, ok := parseNumberParts(t.raw)
|
||||||
|
if !ok {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
return normalizeToIntString(parts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TokenEquals returns true if given Tokens are equal, else false.
|
||||||
|
func TokenEquals(x, y Token) bool {
|
||||||
|
return x.kind == y.kind &&
|
||||||
|
x.pos == y.pos &&
|
||||||
|
bytes.Equal(x.raw, y.raw) &&
|
||||||
|
x.boo == y.boo &&
|
||||||
|
x.str == y.str
|
||||||
|
}
|
276
vendor/google.golang.org/protobuf/internal/encoding/json/encode.go
generated
vendored
Normal file
276
vendor/google.golang.org/protobuf/internal/encoding/json/encode.go
generated
vendored
Normal file
|
@ -0,0 +1,276 @@
|
||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package json
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"math/bits"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/internal/detrand"
|
||||||
|
"google.golang.org/protobuf/internal/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// kind represents an encoding type.
|
||||||
|
type kind uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
_ kind = (1 << iota) / 2
|
||||||
|
name
|
||||||
|
scalar
|
||||||
|
objectOpen
|
||||||
|
objectClose
|
||||||
|
arrayOpen
|
||||||
|
arrayClose
|
||||||
|
)
|
||||||
|
|
||||||
|
// Encoder provides methods to write out JSON constructs and values. The user is
|
||||||
|
// responsible for producing valid sequences of JSON constructs and values.
|
||||||
|
type Encoder struct {
|
||||||
|
indent string
|
||||||
|
lastKind kind
|
||||||
|
indents []byte
|
||||||
|
out []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder returns an Encoder.
|
||||||
|
//
|
||||||
|
// If indent is a non-empty string, it causes every entry for an Array or Object
|
||||||
|
// to be preceded by the indent and trailed by a newline.
|
||||||
|
func NewEncoder(indent string) (*Encoder, error) {
|
||||||
|
e := &Encoder{}
|
||||||
|
if len(indent) > 0 {
|
||||||
|
if strings.Trim(indent, " \t") != "" {
|
||||||
|
return nil, errors.New("indent may only be composed of space or tab characters")
|
||||||
|
}
|
||||||
|
e.indent = indent
|
||||||
|
}
|
||||||
|
return e, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes returns the content of the written bytes.
|
||||||
|
func (e *Encoder) Bytes() []byte {
|
||||||
|
return e.out
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteNull writes out the null value.
|
||||||
|
func (e *Encoder) WriteNull() {
|
||||||
|
e.prepareNext(scalar)
|
||||||
|
e.out = append(e.out, "null"...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteBool writes out the given boolean value.
|
||||||
|
func (e *Encoder) WriteBool(b bool) {
|
||||||
|
e.prepareNext(scalar)
|
||||||
|
if b {
|
||||||
|
e.out = append(e.out, "true"...)
|
||||||
|
} else {
|
||||||
|
e.out = append(e.out, "false"...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteString writes out the given string in JSON string value. Returns error
|
||||||
|
// if input string contains invalid UTF-8.
|
||||||
|
func (e *Encoder) WriteString(s string) error {
|
||||||
|
e.prepareNext(scalar)
|
||||||
|
var err error
|
||||||
|
if e.out, err = appendString(e.out, s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sentinel error used for indicating invalid UTF-8.
|
||||||
|
var errInvalidUTF8 = errors.New("invalid UTF-8")
|
||||||
|
|
||||||
|
func appendString(out []byte, in string) ([]byte, error) {
|
||||||
|
out = append(out, '"')
|
||||||
|
i := indexNeedEscapeInString(in)
|
||||||
|
in, out = in[i:], append(out, in[:i]...)
|
||||||
|
for len(in) > 0 {
|
||||||
|
switch r, n := utf8.DecodeRuneInString(in); {
|
||||||
|
case r == utf8.RuneError && n == 1:
|
||||||
|
return out, errInvalidUTF8
|
||||||
|
case r < ' ' || r == '"' || r == '\\':
|
||||||
|
out = append(out, '\\')
|
||||||
|
switch r {
|
||||||
|
case '"', '\\':
|
||||||
|
out = append(out, byte(r))
|
||||||
|
case '\b':
|
||||||
|
out = append(out, 'b')
|
||||||
|
case '\f':
|
||||||
|
out = append(out, 'f')
|
||||||
|
case '\n':
|
||||||
|
out = append(out, 'n')
|
||||||
|
case '\r':
|
||||||
|
out = append(out, 'r')
|
||||||
|
case '\t':
|
||||||
|
out = append(out, 't')
|
||||||
|
default:
|
||||||
|
out = append(out, 'u')
|
||||||
|
out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...)
|
||||||
|
out = strconv.AppendUint(out, uint64(r), 16)
|
||||||
|
}
|
||||||
|
in = in[n:]
|
||||||
|
default:
|
||||||
|
i := indexNeedEscapeInString(in[n:])
|
||||||
|
in, out = in[n+i:], append(out, in[:n+i]...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out = append(out, '"')
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// indexNeedEscapeInString returns the index of the character that needs
|
||||||
|
// escaping. If no characters need escaping, this returns the input length.
|
||||||
|
func indexNeedEscapeInString(s string) int {
|
||||||
|
for i, r := range s {
|
||||||
|
if r < ' ' || r == '\\' || r == '"' || r == utf8.RuneError {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteFloat writes out the given float and bitSize in JSON number value.
|
||||||
|
func (e *Encoder) WriteFloat(n float64, bitSize int) {
|
||||||
|
e.prepareNext(scalar)
|
||||||
|
e.out = appendFloat(e.out, n, bitSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendFloat formats given float in bitSize, and appends to the given []byte.
|
||||||
|
func appendFloat(out []byte, n float64, bitSize int) []byte {
|
||||||
|
switch {
|
||||||
|
case math.IsNaN(n):
|
||||||
|
return append(out, `"NaN"`...)
|
||||||
|
case math.IsInf(n, +1):
|
||||||
|
return append(out, `"Infinity"`...)
|
||||||
|
case math.IsInf(n, -1):
|
||||||
|
return append(out, `"-Infinity"`...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSON number formatting logic based on encoding/json.
|
||||||
|
// See floatEncoder.encode for reference.
|
||||||
|
fmt := byte('f')
|
||||||
|
if abs := math.Abs(n); abs != 0 {
|
||||||
|
if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) ||
|
||||||
|
bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
|
||||||
|
fmt = 'e'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out = strconv.AppendFloat(out, n, fmt, -1, bitSize)
|
||||||
|
if fmt == 'e' {
|
||||||
|
n := len(out)
|
||||||
|
if n >= 4 && out[n-4] == 'e' && out[n-3] == '-' && out[n-2] == '0' {
|
||||||
|
out[n-2] = out[n-1]
|
||||||
|
out = out[:n-1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteInt writes out the given signed integer in JSON number value.
|
||||||
|
func (e *Encoder) WriteInt(n int64) {
|
||||||
|
e.prepareNext(scalar)
|
||||||
|
e.out = append(e.out, strconv.FormatInt(n, 10)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteUint writes out the given unsigned integer in JSON number value.
|
||||||
|
func (e *Encoder) WriteUint(n uint64) {
|
||||||
|
e.prepareNext(scalar)
|
||||||
|
e.out = append(e.out, strconv.FormatUint(n, 10)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartObject writes out the '{' symbol.
|
||||||
|
func (e *Encoder) StartObject() {
|
||||||
|
e.prepareNext(objectOpen)
|
||||||
|
e.out = append(e.out, '{')
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndObject writes out the '}' symbol.
|
||||||
|
func (e *Encoder) EndObject() {
|
||||||
|
e.prepareNext(objectClose)
|
||||||
|
e.out = append(e.out, '}')
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteName writes out the given string in JSON string value and the name
|
||||||
|
// separator ':'. Returns error if input string contains invalid UTF-8, which
|
||||||
|
// should not be likely as protobuf field names should be valid.
|
||||||
|
func (e *Encoder) WriteName(s string) error {
|
||||||
|
e.prepareNext(name)
|
||||||
|
var err error
|
||||||
|
// Append to output regardless of error.
|
||||||
|
e.out, err = appendString(e.out, s)
|
||||||
|
e.out = append(e.out, ':')
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartArray writes out the '[' symbol.
|
||||||
|
func (e *Encoder) StartArray() {
|
||||||
|
e.prepareNext(arrayOpen)
|
||||||
|
e.out = append(e.out, '[')
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndArray writes out the ']' symbol.
|
||||||
|
func (e *Encoder) EndArray() {
|
||||||
|
e.prepareNext(arrayClose)
|
||||||
|
e.out = append(e.out, ']')
|
||||||
|
}
|
||||||
|
|
||||||
|
// prepareNext adds possible comma and indentation for the next value based
|
||||||
|
// on last type and indent option. It also updates lastKind to next.
|
||||||
|
func (e *Encoder) prepareNext(next kind) {
|
||||||
|
defer func() {
|
||||||
|
// Set lastKind to next.
|
||||||
|
e.lastKind = next
|
||||||
|
}()
|
||||||
|
|
||||||
|
if len(e.indent) == 0 {
|
||||||
|
// Need to add comma on the following condition.
|
||||||
|
if e.lastKind&(scalar|objectClose|arrayClose) != 0 &&
|
||||||
|
next&(name|scalar|objectOpen|arrayOpen) != 0 {
|
||||||
|
e.out = append(e.out, ',')
|
||||||
|
// For single-line output, add a random extra space after each
|
||||||
|
// comma to make output unstable.
|
||||||
|
if detrand.Bool() {
|
||||||
|
e.out = append(e.out, ' ')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case e.lastKind&(objectOpen|arrayOpen) != 0:
|
||||||
|
// If next type is NOT closing, add indent and newline.
|
||||||
|
if next&(objectClose|arrayClose) == 0 {
|
||||||
|
e.indents = append(e.indents, e.indent...)
|
||||||
|
e.out = append(e.out, '\n')
|
||||||
|
e.out = append(e.out, e.indents...)
|
||||||
|
}
|
||||||
|
|
||||||
|
case e.lastKind&(scalar|objectClose|arrayClose) != 0:
|
||||||
|
switch {
|
||||||
|
// If next type is either a value or name, add comma and newline.
|
||||||
|
case next&(name|scalar|objectOpen|arrayOpen) != 0:
|
||||||
|
e.out = append(e.out, ',', '\n')
|
||||||
|
|
||||||
|
// If next type is a closing object or array, adjust indentation.
|
||||||
|
case next&(objectClose|arrayClose) != 0:
|
||||||
|
e.indents = e.indents[:len(e.indents)-len(e.indent)]
|
||||||
|
e.out = append(e.out, '\n')
|
||||||
|
}
|
||||||
|
e.out = append(e.out, e.indents...)
|
||||||
|
|
||||||
|
case e.lastKind&name != 0:
|
||||||
|
e.out = append(e.out, ' ')
|
||||||
|
// For multi-line output, add a random extra space after key: to make
|
||||||
|
// output unstable.
|
||||||
|
if detrand.Bool() {
|
||||||
|
e.out = append(e.out, ' ')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -381,7 +381,7 @@ func (d *Decoder) currentOpenKind() (Kind, byte) {
|
||||||
case '[':
|
case '[':
|
||||||
return ListOpen, ']'
|
return ListOpen, ']'
|
||||||
}
|
}
|
||||||
panic(fmt.Sprintf("Decoder: openStack contains invalid byte %s", string(openCh)))
|
panic(fmt.Sprintf("Decoder: openStack contains invalid byte %c", openCh))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Decoder) pushOpenStack(ch byte) {
|
func (d *Decoder) pushOpenStack(ch byte) {
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build !go1.13
|
||||||
// +build !go1.13
|
// +build !go1.13
|
||||||
|
|
||||||
package errors
|
package errors
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build go1.13
|
||||||
// +build go1.13
|
// +build go1.13
|
||||||
|
|
||||||
package errors
|
package errors
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build !protolegacy
|
||||||
// +build !protolegacy
|
// +build !protolegacy
|
||||||
|
|
||||||
package flags
|
package flags
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build protolegacy
|
||||||
// +build protolegacy
|
// +build protolegacy
|
||||||
|
|
||||||
package flags
|
package flags
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build !go1.12
|
||||||
// +build !go1.12
|
// +build !go1.12
|
||||||
|
|
||||||
package impl
|
package impl
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build go1.12
|
||||||
// +build go1.12
|
// +build go1.12
|
||||||
|
|
||||||
package impl
|
package impl
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build purego || appengine
|
||||||
// +build purego appengine
|
// +build purego appengine
|
||||||
|
|
||||||
package impl
|
package impl
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build !purego && !appengine
|
||||||
// +build !purego,!appengine
|
// +build !purego,!appengine
|
||||||
|
|
||||||
package impl
|
package impl
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var errDecode = errors.New("cannot parse invalid wire-format data")
|
var errDecode = errors.New("cannot parse invalid wire-format data")
|
||||||
|
var errRecursionDepth = errors.New("exceeded maximum recursion depth")
|
||||||
|
|
||||||
type unmarshalOptions struct {
|
type unmarshalOptions struct {
|
||||||
flags protoiface.UnmarshalInputFlags
|
flags protoiface.UnmarshalInputFlags
|
||||||
|
@ -25,6 +26,7 @@ type unmarshalOptions struct {
|
||||||
FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error)
|
FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error)
|
||||||
FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error)
|
FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error)
|
||||||
}
|
}
|
||||||
|
depth int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o unmarshalOptions) Options() proto.UnmarshalOptions {
|
func (o unmarshalOptions) Options() proto.UnmarshalOptions {
|
||||||
|
@ -44,6 +46,7 @@ func (o unmarshalOptions) IsDefault() bool {
|
||||||
|
|
||||||
var lazyUnmarshalOptions = unmarshalOptions{
|
var lazyUnmarshalOptions = unmarshalOptions{
|
||||||
resolver: preg.GlobalTypes,
|
resolver: preg.GlobalTypes,
|
||||||
|
depth: protowire.DefaultRecursionLimit,
|
||||||
}
|
}
|
||||||
|
|
||||||
type unmarshalOutput struct {
|
type unmarshalOutput struct {
|
||||||
|
@ -62,6 +65,7 @@ func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutp
|
||||||
out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{
|
out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{
|
||||||
flags: in.Flags,
|
flags: in.Flags,
|
||||||
resolver: in.Resolver,
|
resolver: in.Resolver,
|
||||||
|
depth: in.Depth,
|
||||||
})
|
})
|
||||||
var flags piface.UnmarshalOutputFlags
|
var flags piface.UnmarshalOutputFlags
|
||||||
if out.initialized {
|
if out.initialized {
|
||||||
|
@ -82,6 +86,10 @@ var errUnknown = errors.New("unknown")
|
||||||
|
|
||||||
func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
|
func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
|
||||||
mi.init()
|
mi.init()
|
||||||
|
opts.depth--
|
||||||
|
if opts.depth < 0 {
|
||||||
|
return out, errRecursionDepth
|
||||||
|
}
|
||||||
if flags.ProtoLegacy && mi.isMessageSet {
|
if flags.ProtoLegacy && mi.isMessageSet {
|
||||||
return unmarshalMessageSet(mi, b, p, opts)
|
return unmarshalMessageSet(mi, b, p, opts)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build purego || appengine
|
||||||
// +build purego appengine
|
// +build purego appengine
|
||||||
|
|
||||||
package impl
|
package impl
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build !purego && !appengine
|
||||||
// +build !purego,!appengine
|
// +build !purego,!appengine
|
||||||
|
|
||||||
package impl
|
package impl
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build purego || appengine
|
||||||
// +build purego appengine
|
// +build purego appengine
|
||||||
|
|
||||||
package strs
|
package strs
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build !purego && !appengine
|
||||||
// +build !purego,!appengine
|
// +build !purego,!appengine
|
||||||
|
|
||||||
package strs
|
package strs
|
||||||
|
|
|
@ -52,8 +52,8 @@ import (
|
||||||
// 10. Send out the CL for review and submit it.
|
// 10. Send out the CL for review and submit it.
|
||||||
const (
|
const (
|
||||||
Major = 1
|
Major = 1
|
||||||
Minor = 27
|
Minor = 28
|
||||||
Patch = 1
|
Patch = 0
|
||||||
PreRelease = ""
|
PreRelease = ""
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -42,18 +42,25 @@ type UnmarshalOptions struct {
|
||||||
FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error)
|
FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error)
|
||||||
FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error)
|
FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RecursionLimit limits how deeply messages may be nested.
|
||||||
|
// If zero, a default limit is applied.
|
||||||
|
RecursionLimit int
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshal parses the wire-format message in b and places the result in m.
|
// Unmarshal parses the wire-format message in b and places the result in m.
|
||||||
// The provided message must be mutable (e.g., a non-nil pointer to a message).
|
// The provided message must be mutable (e.g., a non-nil pointer to a message).
|
||||||
func Unmarshal(b []byte, m Message) error {
|
func Unmarshal(b []byte, m Message) error {
|
||||||
_, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect())
|
_, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshal parses the wire-format message in b and places the result in m.
|
// Unmarshal parses the wire-format message in b and places the result in m.
|
||||||
// The provided message must be mutable (e.g., a non-nil pointer to a message).
|
// The provided message must be mutable (e.g., a non-nil pointer to a message).
|
||||||
func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error {
|
func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error {
|
||||||
|
if o.RecursionLimit == 0 {
|
||||||
|
o.RecursionLimit = protowire.DefaultRecursionLimit
|
||||||
|
}
|
||||||
_, err := o.unmarshal(b, m.ProtoReflect())
|
_, err := o.unmarshal(b, m.ProtoReflect())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -63,6 +70,9 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error {
|
||||||
// This method permits fine-grained control over the unmarshaler.
|
// This method permits fine-grained control over the unmarshaler.
|
||||||
// Most users should use Unmarshal instead.
|
// Most users should use Unmarshal instead.
|
||||||
func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) {
|
func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) {
|
||||||
|
if o.RecursionLimit == 0 {
|
||||||
|
o.RecursionLimit = protowire.DefaultRecursionLimit
|
||||||
|
}
|
||||||
return o.unmarshal(in.Buf, in.Message)
|
return o.unmarshal(in.Buf, in.Message)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,12 +96,17 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto
|
||||||
Message: m,
|
Message: m,
|
||||||
Buf: b,
|
Buf: b,
|
||||||
Resolver: o.Resolver,
|
Resolver: o.Resolver,
|
||||||
|
Depth: o.RecursionLimit,
|
||||||
}
|
}
|
||||||
if o.DiscardUnknown {
|
if o.DiscardUnknown {
|
||||||
in.Flags |= protoiface.UnmarshalDiscardUnknown
|
in.Flags |= protoiface.UnmarshalDiscardUnknown
|
||||||
}
|
}
|
||||||
out, err = methods.Unmarshal(in)
|
out, err = methods.Unmarshal(in)
|
||||||
} else {
|
} else {
|
||||||
|
o.RecursionLimit--
|
||||||
|
if o.RecursionLimit < 0 {
|
||||||
|
return out, errors.New("exceeded max recursion depth")
|
||||||
|
}
|
||||||
err = o.unmarshalMessageSlow(b, m)
|
err = o.unmarshalMessageSlow(b, m)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// The protoreflect build tag disables use of fast-path methods.
|
// The protoreflect build tag disables use of fast-path methods.
|
||||||
|
//go:build !protoreflect
|
||||||
// +build !protoreflect
|
// +build !protoreflect
|
||||||
|
|
||||||
package proto
|
package proto
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// The protoreflect build tag disables use of fast-path methods.
|
// The protoreflect build tag disables use of fast-path methods.
|
||||||
|
//go:build protoreflect
|
||||||
// +build protoreflect
|
// +build protoreflect
|
||||||
|
|
||||||
package proto
|
package proto
|
||||||
|
|
|
@ -53,6 +53,7 @@ type (
|
||||||
FindExtensionByName(field FullName) (ExtensionType, error)
|
FindExtensionByName(field FullName) (ExtensionType, error)
|
||||||
FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error)
|
FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error)
|
||||||
}
|
}
|
||||||
|
Depth int
|
||||||
}
|
}
|
||||||
unmarshalOutput = struct {
|
unmarshalOutput = struct {
|
||||||
pragma.NoUnkeyedLiterals
|
pragma.NoUnkeyedLiterals
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build purego || appengine
|
||||||
// +build purego appengine
|
// +build purego appengine
|
||||||
|
|
||||||
package protoreflect
|
package protoreflect
|
||||||
|
|
|
@ -41,6 +41,31 @@ import (
|
||||||
// Converting to/from a Value and a concrete Go value panics on type mismatch.
|
// Converting to/from a Value and a concrete Go value panics on type mismatch.
|
||||||
// For example, ValueOf("hello").Int() panics because this attempts to
|
// For example, ValueOf("hello").Int() panics because this attempts to
|
||||||
// retrieve an int64 from a string.
|
// retrieve an int64 from a string.
|
||||||
|
//
|
||||||
|
// List, Map, and Message Values are called "composite" values.
|
||||||
|
//
|
||||||
|
// A composite Value may alias (reference) memory at some location,
|
||||||
|
// such that changes to the Value updates the that location.
|
||||||
|
// A composite value acquired with a Mutable method, such as Message.Mutable,
|
||||||
|
// always references the source object.
|
||||||
|
//
|
||||||
|
// For example:
|
||||||
|
// // Append a 0 to a "repeated int32" field.
|
||||||
|
// // Since the Value returned by Mutable is guaranteed to alias
|
||||||
|
// // the source message, modifying the Value modifies the message.
|
||||||
|
// message.Mutable(fieldDesc).(List).Append(protoreflect.ValueOfInt32(0))
|
||||||
|
//
|
||||||
|
// // Assign [0] to a "repeated int32" field by creating a new Value,
|
||||||
|
// // modifying it, and assigning it.
|
||||||
|
// list := message.NewField(fieldDesc).(List)
|
||||||
|
// list.Append(protoreflect.ValueOfInt32(0))
|
||||||
|
// message.Set(fieldDesc, list)
|
||||||
|
// // ERROR: Since it is not defined whether Set aliases the source,
|
||||||
|
// // appending to the List here may or may not modify the message.
|
||||||
|
// list.Append(protoreflect.ValueOfInt32(0))
|
||||||
|
//
|
||||||
|
// Some operations, such as Message.Get, may return an "empty, read-only"
|
||||||
|
// composite Value. Modifying an empty, read-only value panics.
|
||||||
type Value value
|
type Value value
|
||||||
|
|
||||||
// The protoreflect API uses a custom Value union type instead of interface{}
|
// The protoreflect API uses a custom Value union type instead of interface{}
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build !purego && !appengine
|
||||||
// +build !purego,!appengine
|
// +build !purego,!appengine
|
||||||
|
|
||||||
package protoreflect
|
package protoreflect
|
||||||
|
|
|
@ -103,6 +103,7 @@ type UnmarshalInput = struct {
|
||||||
FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error)
|
FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error)
|
||||||
FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error)
|
FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error)
|
||||||
}
|
}
|
||||||
|
Depth int
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalOutput is output from the Unmarshal method.
|
// UnmarshalOutput is output from the Unmarshal method.
|
||||||
|
|
|
@ -12,7 +12,7 @@ github.com/beorn7/perks/quantile
|
||||||
# github.com/cespare/xxhash/v2 v2.1.2
|
# github.com/cespare/xxhash/v2 v2.1.2
|
||||||
## explicit; go 1.11
|
## explicit; go 1.11
|
||||||
github.com/cespare/xxhash/v2
|
github.com/cespare/xxhash/v2
|
||||||
# github.com/containerd/containerd v1.6.8
|
# github.com/containerd/containerd v1.6.10
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/containerd/containerd/errdefs
|
github.com/containerd/containerd/errdefs
|
||||||
github.com/containerd/containerd/log
|
github.com/containerd/containerd/log
|
||||||
|
@ -107,6 +107,7 @@ github.com/gogo/protobuf/sortkeys
|
||||||
github.com/gogo/protobuf/types
|
github.com/gogo/protobuf/types
|
||||||
# github.com/golang/protobuf v1.5.2
|
# github.com/golang/protobuf v1.5.2
|
||||||
## explicit; go 1.9
|
## explicit; go 1.9
|
||||||
|
github.com/golang/protobuf/jsonpb
|
||||||
github.com/golang/protobuf/proto
|
github.com/golang/protobuf/proto
|
||||||
github.com/golang/protobuf/ptypes
|
github.com/golang/protobuf/ptypes
|
||||||
github.com/golang/protobuf/ptypes/any
|
github.com/golang/protobuf/ptypes/any
|
||||||
|
@ -292,10 +293,10 @@ golang.org/x/text/width
|
||||||
# golang.org/x/time v0.1.0
|
# golang.org/x/time v0.1.0
|
||||||
## explicit
|
## explicit
|
||||||
golang.org/x/time/rate
|
golang.org/x/time/rate
|
||||||
# google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa
|
# google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21
|
||||||
## explicit; go 1.11
|
## explicit; go 1.15
|
||||||
google.golang.org/genproto/googleapis/rpc/status
|
google.golang.org/genproto/googleapis/rpc/status
|
||||||
# google.golang.org/grpc v1.45.0
|
# google.golang.org/grpc v1.47.0
|
||||||
## explicit; go 1.14
|
## explicit; go 1.14
|
||||||
google.golang.org/grpc
|
google.golang.org/grpc
|
||||||
google.golang.org/grpc/attributes
|
google.golang.org/grpc/attributes
|
||||||
|
@ -305,6 +306,7 @@ google.golang.org/grpc/balancer/base
|
||||||
google.golang.org/grpc/balancer/grpclb/state
|
google.golang.org/grpc/balancer/grpclb/state
|
||||||
google.golang.org/grpc/balancer/roundrobin
|
google.golang.org/grpc/balancer/roundrobin
|
||||||
google.golang.org/grpc/binarylog/grpc_binarylog_v1
|
google.golang.org/grpc/binarylog/grpc_binarylog_v1
|
||||||
|
google.golang.org/grpc/channelz
|
||||||
google.golang.org/grpc/codes
|
google.golang.org/grpc/codes
|
||||||
google.golang.org/grpc/connectivity
|
google.golang.org/grpc/connectivity
|
||||||
google.golang.org/grpc/credentials
|
google.golang.org/grpc/credentials
|
||||||
|
@ -314,6 +316,7 @@ google.golang.org/grpc/encoding/proto
|
||||||
google.golang.org/grpc/grpclog
|
google.golang.org/grpc/grpclog
|
||||||
google.golang.org/grpc/internal
|
google.golang.org/grpc/internal
|
||||||
google.golang.org/grpc/internal/backoff
|
google.golang.org/grpc/internal/backoff
|
||||||
|
google.golang.org/grpc/internal/balancer/gracefulswitch
|
||||||
google.golang.org/grpc/internal/balancerload
|
google.golang.org/grpc/internal/balancerload
|
||||||
google.golang.org/grpc/internal/binarylog
|
google.golang.org/grpc/internal/binarylog
|
||||||
google.golang.org/grpc/internal/buffer
|
google.golang.org/grpc/internal/buffer
|
||||||
|
@ -325,6 +328,7 @@ google.golang.org/grpc/internal/grpcrand
|
||||||
google.golang.org/grpc/internal/grpcsync
|
google.golang.org/grpc/internal/grpcsync
|
||||||
google.golang.org/grpc/internal/grpcutil
|
google.golang.org/grpc/internal/grpcutil
|
||||||
google.golang.org/grpc/internal/metadata
|
google.golang.org/grpc/internal/metadata
|
||||||
|
google.golang.org/grpc/internal/pretty
|
||||||
google.golang.org/grpc/internal/resolver
|
google.golang.org/grpc/internal/resolver
|
||||||
google.golang.org/grpc/internal/resolver/dns
|
google.golang.org/grpc/internal/resolver/dns
|
||||||
google.golang.org/grpc/internal/resolver/passthrough
|
google.golang.org/grpc/internal/resolver/passthrough
|
||||||
|
@ -342,14 +346,16 @@ google.golang.org/grpc/serviceconfig
|
||||||
google.golang.org/grpc/stats
|
google.golang.org/grpc/stats
|
||||||
google.golang.org/grpc/status
|
google.golang.org/grpc/status
|
||||||
google.golang.org/grpc/tap
|
google.golang.org/grpc/tap
|
||||||
# google.golang.org/protobuf v1.27.1
|
# google.golang.org/protobuf v1.28.0
|
||||||
## explicit; go 1.9
|
## explicit; go 1.11
|
||||||
|
google.golang.org/protobuf/encoding/protojson
|
||||||
google.golang.org/protobuf/encoding/prototext
|
google.golang.org/protobuf/encoding/prototext
|
||||||
google.golang.org/protobuf/encoding/protowire
|
google.golang.org/protobuf/encoding/protowire
|
||||||
google.golang.org/protobuf/internal/descfmt
|
google.golang.org/protobuf/internal/descfmt
|
||||||
google.golang.org/protobuf/internal/descopts
|
google.golang.org/protobuf/internal/descopts
|
||||||
google.golang.org/protobuf/internal/detrand
|
google.golang.org/protobuf/internal/detrand
|
||||||
google.golang.org/protobuf/internal/encoding/defval
|
google.golang.org/protobuf/internal/encoding/defval
|
||||||
|
google.golang.org/protobuf/internal/encoding/json
|
||||||
google.golang.org/protobuf/internal/encoding/messageset
|
google.golang.org/protobuf/internal/encoding/messageset
|
||||||
google.golang.org/protobuf/internal/encoding/tag
|
google.golang.org/protobuf/internal/encoding/tag
|
||||||
google.golang.org/protobuf/internal/encoding/text
|
google.golang.org/protobuf/internal/encoding/text
|
||||||
|
|
Loading…
Reference in New Issue