mirror of https://github.com/docker/cli.git
Merge pull request #4633 from thaJeztah/bump_engine
vendor: github.com/docker/docker ed1a61dcb789 (v25.0.0-dev)
This commit is contained in:
commit
814f70749a
12
vendor.mod
12
vendor.mod
|
@ -9,11 +9,11 @@ go 1.19
|
||||||
require (
|
require (
|
||||||
dario.cat/mergo v1.0.0
|
dario.cat/mergo v1.0.0
|
||||||
github.com/container-orchestrated-devices/container-device-interface v0.6.1
|
github.com/container-orchestrated-devices/container-device-interface v0.6.1
|
||||||
github.com/containerd/containerd v1.7.7
|
github.com/containerd/containerd v1.7.8
|
||||||
github.com/creack/pty v1.1.18
|
github.com/creack/pty v1.1.18
|
||||||
github.com/distribution/reference v0.5.0
|
github.com/distribution/reference v0.5.0
|
||||||
github.com/docker/distribution v2.8.3+incompatible
|
github.com/docker/distribution v2.8.3+incompatible
|
||||||
github.com/docker/docker v24.0.0-rc.2.0.20231025221548-fc4d035e7a4e+incompatible // master (v25.0.0-dev)
|
github.com/docker/docker v24.0.0-rc.2.0.20231103125139-ed1a61dcb789+incompatible // master (v25.0.0-dev)
|
||||||
github.com/docker/docker-credential-helpers v0.8.0
|
github.com/docker/docker-credential-helpers v0.8.0
|
||||||
github.com/docker/go-connections v0.4.0
|
github.com/docker/go-connections v0.4.0
|
||||||
github.com/docker/go-units v0.5.0
|
github.com/docker/go-units v0.5.0
|
||||||
|
@ -80,11 +80,11 @@ require (
|
||||||
go.opentelemetry.io/otel/metric v0.37.0 // indirect
|
go.opentelemetry.io/otel/metric v0.37.0 // indirect
|
||||||
go.opentelemetry.io/otel/trace v1.14.0 // indirect
|
go.opentelemetry.io/otel/trace v1.14.0 // indirect
|
||||||
golang.org/x/crypto v0.14.0 // indirect
|
golang.org/x/crypto v0.14.0 // indirect
|
||||||
golang.org/x/mod v0.10.0 // indirect
|
golang.org/x/mod v0.11.0 // indirect
|
||||||
golang.org/x/net v0.17.0 // indirect
|
golang.org/x/net v0.17.0 // indirect
|
||||||
golang.org/x/time v0.3.0 // indirect
|
golang.org/x/time v0.3.0 // indirect
|
||||||
golang.org/x/tools v0.8.0 // indirect
|
golang.org/x/tools v0.10.0 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect
|
||||||
google.golang.org/grpc v1.56.3 // indirect
|
google.golang.org/grpc v1.58.3 // indirect
|
||||||
google.golang.org/protobuf v1.31.0 // indirect
|
google.golang.org/protobuf v1.31.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
26
vendor.sum
26
vendor.sum
|
@ -38,8 +38,8 @@ github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoC
|
||||||
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
|
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
|
||||||
github.com/container-orchestrated-devices/container-device-interface v0.6.1 h1:mz77uJoP8im/4Zins+mPqt677ZMaflhoGaYrRAl5jvA=
|
github.com/container-orchestrated-devices/container-device-interface v0.6.1 h1:mz77uJoP8im/4Zins+mPqt677ZMaflhoGaYrRAl5jvA=
|
||||||
github.com/container-orchestrated-devices/container-device-interface v0.6.1/go.mod h1:40T6oW59rFrL/ksiSs7q45GzjGlbvxnA4xaK6cyq+kA=
|
github.com/container-orchestrated-devices/container-device-interface v0.6.1/go.mod h1:40T6oW59rFrL/ksiSs7q45GzjGlbvxnA4xaK6cyq+kA=
|
||||||
github.com/containerd/containerd v1.7.7 h1:QOC2K4A42RQpcrZyptP6z9EJZnlHfHJUfZrAAHe15q4=
|
github.com/containerd/containerd v1.7.8 h1:RkwgOW3AVUT3H/dyT0W03Dc8AzlpMG65lX48KftOFSM=
|
||||||
github.com/containerd/containerd v1.7.7/go.mod h1:3c4XZv6VeT9qgf9GMTxNTMFxGJrGpI2vz1yk4ye+YY8=
|
github.com/containerd/containerd v1.7.8/go.mod h1:L/Hn9qylJtUFT7cPeM0Sr3fATj+WjHwRQ0lyrYk3OPY=
|
||||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||||
|
@ -56,8 +56,8 @@ github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5
|
||||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
github.com/docker/docker v24.0.0-rc.2.0.20231025221548-fc4d035e7a4e+incompatible h1:06ap953LHT+0JPkzYkYPMQHPxaC6NhzTSYaTS2/BeRc=
|
github.com/docker/docker v24.0.0-rc.2.0.20231103125139-ed1a61dcb789+incompatible h1:aTS2OQ5LXIzi/Cb7WD6TJUvDglK5PV+9bCbd65FZMLs=
|
||||||
github.com/docker/docker v24.0.0-rc.2.0.20231025221548-fc4d035e7a4e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/docker v24.0.0-rc.2.0.20231103125139-ed1a61dcb789+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
|
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
|
||||||
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
|
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
|
||||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
|
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
|
||||||
|
@ -301,8 +301,8 @@ golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=
|
||||||
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
@ -349,17 +349,19 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg=
|
||||||
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
|
google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g=
|
||||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
|
google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw=
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U=
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
|
||||||
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||||
google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc=
|
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
|
||||||
google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
|
google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||||
|
|
|
@ -72,8 +72,12 @@ func (cli *Client) ContainerWait(ctx context.Context, containerID string, condit
|
||||||
//
|
//
|
||||||
// If there's a JSON parsing error, read the real error message
|
// If there's a JSON parsing error, read the real error message
|
||||||
// off the body and send it to the client.
|
// off the body and send it to the client.
|
||||||
_, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit))
|
if errors.As(err, new(*json.SyntaxError)) {
|
||||||
errC <- errors.New(responseText.String())
|
_, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit))
|
||||||
|
errC <- errors.New(responseText.String())
|
||||||
|
} else {
|
||||||
|
errC <- err
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -481,6 +481,8 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro
|
||||||
return hdr, nil
|
return hdr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const paxSchilyXattr = "SCHILY.xattr."
|
||||||
|
|
||||||
// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
|
// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
|
||||||
// to a tar header
|
// to a tar header
|
||||||
func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
|
func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
|
||||||
|
@ -493,15 +495,16 @@ func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
|
||||||
)
|
)
|
||||||
capability, _ := system.Lgetxattr(path, "security.capability")
|
capability, _ := system.Lgetxattr(path, "security.capability")
|
||||||
if capability != nil {
|
if capability != nil {
|
||||||
length := len(capability)
|
|
||||||
if capability[versionOffset] == vfsCapRevision3 {
|
if capability[versionOffset] == vfsCapRevision3 {
|
||||||
// Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no
|
// Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no
|
||||||
// sense outside the user namespace the archive is built in.
|
// sense outside the user namespace the archive is built in.
|
||||||
capability[versionOffset] = vfsCapRevision2
|
capability[versionOffset] = vfsCapRevision2
|
||||||
length = xattrCapsSz2
|
capability = capability[:xattrCapsSz2]
|
||||||
}
|
}
|
||||||
hdr.Xattrs = make(map[string]string)
|
if hdr.PAXRecords == nil {
|
||||||
hdr.Xattrs["security.capability"] = string(capability[:length])
|
hdr.PAXRecords = make(map[string]string)
|
||||||
|
}
|
||||||
|
hdr.PAXRecords[paxSchilyXattr+"security.capability"] = string(capability)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -776,8 +779,12 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, o
|
||||||
}
|
}
|
||||||
|
|
||||||
var xattrErrs []string
|
var xattrErrs []string
|
||||||
for key, value := range hdr.Xattrs {
|
for key, value := range hdr.PAXRecords {
|
||||||
if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
|
xattr, ok := strings.CutPrefix(key, paxSchilyXattr)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := system.Lsetxattr(path, xattr, []byte(value), 0); err != nil {
|
||||||
if bestEffortXattrs && errors.Is(err, syscall.ENOTSUP) || errors.Is(err, syscall.EPERM) {
|
if bestEffortXattrs && errors.Is(err, syscall.ENOTSUP) || errors.Is(err, syscall.EPERM) {
|
||||||
// EPERM occurs if modifying xattrs is not allowed. This can
|
// EPERM occurs if modifying xattrs is not allowed. This can
|
||||||
// happen when running in userns with restrictions (ChromeOS).
|
// happen when running in userns with restrictions (ChromeOS).
|
||||||
|
|
|
@ -41,9 +41,7 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(opaque) == 1 && opaque[0] == 'y' {
|
if len(opaque) == 1 && opaque[0] == 'y' {
|
||||||
if hdr.Xattrs != nil {
|
delete(hdr.PAXRecords, paxSchilyXattr+"trusted.overlay.opaque")
|
||||||
delete(hdr.Xattrs, "trusted.overlay.opaque")
|
|
||||||
}
|
|
||||||
|
|
||||||
// create a header for the whiteout file
|
// create a header for the whiteout file
|
||||||
// it should inherit some properties from the parent, but be a regular file
|
// it should inherit some properties from the parent, but be a regular file
|
||||||
|
|
|
@ -128,15 +128,14 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
|
||||||
// (from "version"). Select appropriate importer.
|
// (from "version"). Select appropriate importer.
|
||||||
if len(data) > 0 {
|
if len(data) > 0 {
|
||||||
switch data[0] {
|
switch data[0] {
|
||||||
case 'i':
|
case 'v', 'c', 'd': // binary, till go1.10
|
||||||
|
return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
|
||||||
|
|
||||||
|
case 'i': // indexed, till go1.19
|
||||||
_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
|
_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
|
||||||
return pkg, err
|
return pkg, err
|
||||||
|
|
||||||
case 'v', 'c', 'd':
|
case 'u': // unified, from go1.20
|
||||||
_, pkg, err := gcimporter.BImportData(fset, imports, data, path)
|
|
||||||
return pkg, err
|
|
||||||
|
|
||||||
case 'u':
|
|
||||||
_, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path)
|
_, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path)
|
||||||
return pkg, err
|
return pkg, err
|
||||||
|
|
||||||
|
|
|
@ -625,7 +625,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
if pkg.PkgPath == "unsafe" {
|
if pkg.PkgPath == "unsafe" {
|
||||||
pkg.GoFiles = nil // ignore fake unsafe.go file
|
pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929)
|
||||||
|
} else if len(pkg.CompiledGoFiles) == 0 {
|
||||||
|
// Work around for pre-go.1.11 versions of go list.
|
||||||
|
// TODO(matloob): they should be handled by the fallback.
|
||||||
|
// Can we delete this?
|
||||||
|
pkg.CompiledGoFiles = pkg.GoFiles
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assume go list emits only absolute paths for Dir.
|
// Assume go list emits only absolute paths for Dir.
|
||||||
|
@ -663,13 +668,6 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
|
||||||
response.Roots = append(response.Roots, pkg.ID)
|
response.Roots = append(response.Roots, pkg.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Work around for pre-go.1.11 versions of go list.
|
|
||||||
// TODO(matloob): they should be handled by the fallback.
|
|
||||||
// Can we delete this?
|
|
||||||
if len(pkg.CompiledGoFiles) == 0 {
|
|
||||||
pkg.CompiledGoFiles = pkg.GoFiles
|
|
||||||
}
|
|
||||||
|
|
||||||
// Temporary work-around for golang/go#39986. Parse filenames out of
|
// Temporary work-around for golang/go#39986. Parse filenames out of
|
||||||
// error messages. This happens if there are unrecoverable syntax
|
// error messages. This happens if there are unrecoverable syntax
|
||||||
// errors in the source, so we can't match on a specific error message.
|
// errors in the source, so we can't match on a specific error message.
|
||||||
|
@ -891,6 +889,15 @@ func golistargs(cfg *Config, words []string, goVersion int) []string {
|
||||||
// probably because you'd just get the TestMain.
|
// probably because you'd just get the TestMain.
|
||||||
fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)),
|
fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// golang/go#60456: with go1.21 and later, go list serves pgo variants, which
|
||||||
|
// can be costly to compute and may result in redundant processing for the
|
||||||
|
// caller. Disable these variants. If someone wants to add e.g. a NeedPGO
|
||||||
|
// mode flag, that should be a separate proposal.
|
||||||
|
if goVersion >= 21 {
|
||||||
|
fullargs = append(fullargs, "-pgo=off")
|
||||||
|
}
|
||||||
|
|
||||||
fullargs = append(fullargs, cfg.BuildFlags...)
|
fullargs = append(fullargs, cfg.BuildFlags...)
|
||||||
fullargs = append(fullargs, "--")
|
fullargs = append(fullargs, "--")
|
||||||
fullargs = append(fullargs, words...)
|
fullargs = append(fullargs, words...)
|
||||||
|
|
|
@ -308,6 +308,9 @@ type Package struct {
|
||||||
TypeErrors []types.Error
|
TypeErrors []types.Error
|
||||||
|
|
||||||
// GoFiles lists the absolute file paths of the package's Go source files.
|
// GoFiles lists the absolute file paths of the package's Go source files.
|
||||||
|
// It may include files that should not be compiled, for example because
|
||||||
|
// they contain non-matching build tags, are documentary pseudo-files such as
|
||||||
|
// unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing.
|
||||||
GoFiles []string
|
GoFiles []string
|
||||||
|
|
||||||
// CompiledGoFiles lists the absolute file paths of the package's source
|
// CompiledGoFiles lists the absolute file paths of the package's source
|
||||||
|
|
|
@ -1,762 +0,0 @@
|
||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package objectpath defines a naming scheme for types.Objects
|
|
||||||
// (that is, named entities in Go programs) relative to their enclosing
|
|
||||||
// package.
|
|
||||||
//
|
|
||||||
// Type-checker objects are canonical, so they are usually identified by
|
|
||||||
// their address in memory (a pointer), but a pointer has meaning only
|
|
||||||
// within one address space. By contrast, objectpath names allow the
|
|
||||||
// identity of an object to be sent from one program to another,
|
|
||||||
// establishing a correspondence between types.Object variables that are
|
|
||||||
// distinct but logically equivalent.
|
|
||||||
//
|
|
||||||
// A single object may have multiple paths. In this example,
|
|
||||||
//
|
|
||||||
// type A struct{ X int }
|
|
||||||
// type B A
|
|
||||||
//
|
|
||||||
// the field X has two paths due to its membership of both A and B.
|
|
||||||
// The For(obj) function always returns one of these paths, arbitrarily
|
|
||||||
// but consistently.
|
|
||||||
package objectpath
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"go/types"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/tools/internal/typeparams"
|
|
||||||
|
|
||||||
_ "unsafe" // for go:linkname
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Path is an opaque name that identifies a types.Object
|
|
||||||
// relative to its package. Conceptually, the name consists of a
|
|
||||||
// sequence of destructuring operations applied to the package scope
|
|
||||||
// to obtain the original object.
|
|
||||||
// The name does not include the package itself.
|
|
||||||
type Path string
|
|
||||||
|
|
||||||
// Encoding
|
|
||||||
//
|
|
||||||
// An object path is a textual and (with training) human-readable encoding
|
|
||||||
// of a sequence of destructuring operators, starting from a types.Package.
|
|
||||||
// The sequences represent a path through the package/object/type graph.
|
|
||||||
// We classify these operators by their type:
|
|
||||||
//
|
|
||||||
// PO package->object Package.Scope.Lookup
|
|
||||||
// OT object->type Object.Type
|
|
||||||
// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU]
|
|
||||||
// TO type->object Type.{At,Field,Method,Obj} [AFMO]
|
|
||||||
//
|
|
||||||
// All valid paths start with a package and end at an object
|
|
||||||
// and thus may be defined by the regular language:
|
|
||||||
//
|
|
||||||
// objectpath = PO (OT TT* TO)*
|
|
||||||
//
|
|
||||||
// The concrete encoding follows directly:
|
|
||||||
// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
|
|
||||||
// - The only OT operator is Object.Type,
|
|
||||||
// which we encode as '.' because dot cannot appear in an identifier.
|
|
||||||
// - The TT operators are encoded as [EKPRUTC];
|
|
||||||
// one of these (TypeParam) requires an integer operand,
|
|
||||||
// which is encoded as a string of decimal digits.
|
|
||||||
// - The TO operators are encoded as [AFMO];
|
|
||||||
// three of these (At,Field,Method) require an integer operand,
|
|
||||||
// which is encoded as a string of decimal digits.
|
|
||||||
// These indices are stable across different representations
|
|
||||||
// of the same package, even source and export data.
|
|
||||||
// The indices used are implementation specific and may not correspond to
|
|
||||||
// the argument to the go/types function.
|
|
||||||
//
|
|
||||||
// In the example below,
|
|
||||||
//
|
|
||||||
// package p
|
|
||||||
//
|
|
||||||
// type T interface {
|
|
||||||
// f() (a string, b struct{ X int })
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// field X has the path "T.UM0.RA1.F0",
|
|
||||||
// representing the following sequence of operations:
|
|
||||||
//
|
|
||||||
// p.Lookup("T") T
|
|
||||||
// .Type().Underlying().Method(0). f
|
|
||||||
// .Type().Results().At(1) b
|
|
||||||
// .Type().Field(0) X
|
|
||||||
//
|
|
||||||
// The encoding is not maximally compact---every R or P is
|
|
||||||
// followed by an A, for example---but this simplifies the
|
|
||||||
// encoder and decoder.
|
|
||||||
const (
|
|
||||||
// object->type operators
|
|
||||||
opType = '.' // .Type() (Object)
|
|
||||||
|
|
||||||
// type->type operators
|
|
||||||
opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
|
|
||||||
opKey = 'K' // .Key() (Map)
|
|
||||||
opParams = 'P' // .Params() (Signature)
|
|
||||||
opResults = 'R' // .Results() (Signature)
|
|
||||||
opUnderlying = 'U' // .Underlying() (Named)
|
|
||||||
opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature)
|
|
||||||
opConstraint = 'C' // .Constraint() (TypeParam)
|
|
||||||
|
|
||||||
// type->object operators
|
|
||||||
opAt = 'A' // .At(i) (Tuple)
|
|
||||||
opField = 'F' // .Field(i) (Struct)
|
|
||||||
opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
|
|
||||||
opObj = 'O' // .Obj() (Named, TypeParam)
|
|
||||||
)
|
|
||||||
|
|
||||||
// For returns the path to an object relative to its package,
|
|
||||||
// or an error if the object is not accessible from the package's Scope.
|
|
||||||
//
|
|
||||||
// The For function guarantees to return a path only for the following objects:
|
|
||||||
// - package-level types
|
|
||||||
// - exported package-level non-types
|
|
||||||
// - methods
|
|
||||||
// - parameter and result variables
|
|
||||||
// - struct fields
|
|
||||||
// These objects are sufficient to define the API of their package.
|
|
||||||
// The objects described by a package's export data are drawn from this set.
|
|
||||||
//
|
|
||||||
// For does not return a path for predeclared names, imported package
|
|
||||||
// names, local names, and unexported package-level names (except
|
|
||||||
// types).
|
|
||||||
//
|
|
||||||
// Example: given this definition,
|
|
||||||
//
|
|
||||||
// package p
|
|
||||||
//
|
|
||||||
// type T interface {
|
|
||||||
// f() (a string, b struct{ X int })
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// For(X) would return a path that denotes the following sequence of operations:
|
|
||||||
//
|
|
||||||
// p.Scope().Lookup("T") (TypeName T)
|
|
||||||
// .Type().Underlying().Method(0). (method Func f)
|
|
||||||
// .Type().Results().At(1) (field Var b)
|
|
||||||
// .Type().Field(0) (field Var X)
|
|
||||||
//
|
|
||||||
// where p is the package (*types.Package) to which X belongs.
|
|
||||||
func For(obj types.Object) (Path, error) {
|
|
||||||
return newEncoderFor()(obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
// An encoder amortizes the cost of encoding the paths of multiple objects.
|
|
||||||
// Nonexported pending approval of proposal 58668.
|
|
||||||
type encoder struct {
|
|
||||||
scopeNamesMemo map[*types.Scope][]string // memoization of Scope.Names()
|
|
||||||
namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exposed to gopls via golang.org/x/tools/internal/typesinternal
|
|
||||||
// pending approval of proposal 58668.
|
|
||||||
//
|
|
||||||
//go:linkname newEncoderFor
|
|
||||||
func newEncoderFor() func(types.Object) (Path, error) { return new(encoder).For }
|
|
||||||
|
|
||||||
func (enc *encoder) For(obj types.Object) (Path, error) {
|
|
||||||
pkg := obj.Pkg()
|
|
||||||
|
|
||||||
// This table lists the cases of interest.
|
|
||||||
//
|
|
||||||
// Object Action
|
|
||||||
// ------ ------
|
|
||||||
// nil reject
|
|
||||||
// builtin reject
|
|
||||||
// pkgname reject
|
|
||||||
// label reject
|
|
||||||
// var
|
|
||||||
// package-level accept
|
|
||||||
// func param/result accept
|
|
||||||
// local reject
|
|
||||||
// struct field accept
|
|
||||||
// const
|
|
||||||
// package-level accept
|
|
||||||
// local reject
|
|
||||||
// func
|
|
||||||
// package-level accept
|
|
||||||
// init functions reject
|
|
||||||
// concrete method accept
|
|
||||||
// interface method accept
|
|
||||||
// type
|
|
||||||
// package-level accept
|
|
||||||
// local reject
|
|
||||||
//
|
|
||||||
// The only accessible package-level objects are members of pkg itself.
|
|
||||||
//
|
|
||||||
// The cases are handled in four steps:
|
|
||||||
//
|
|
||||||
// 1. reject nil and builtin
|
|
||||||
// 2. accept package-level objects
|
|
||||||
// 3. reject obviously invalid objects
|
|
||||||
// 4. search the API for the path to the param/result/field/method.
|
|
||||||
|
|
||||||
// 1. reference to nil or builtin?
|
|
||||||
if pkg == nil {
|
|
||||||
return "", fmt.Errorf("predeclared %s has no path", obj)
|
|
||||||
}
|
|
||||||
scope := pkg.Scope()
|
|
||||||
|
|
||||||
// 2. package-level object?
|
|
||||||
if scope.Lookup(obj.Name()) == obj {
|
|
||||||
// Only exported objects (and non-exported types) have a path.
|
|
||||||
// Non-exported types may be referenced by other objects.
|
|
||||||
if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() {
|
|
||||||
return "", fmt.Errorf("no path for non-exported %v", obj)
|
|
||||||
}
|
|
||||||
return Path(obj.Name()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3. Not a package-level object.
|
|
||||||
// Reject obviously non-viable cases.
|
|
||||||
switch obj := obj.(type) {
|
|
||||||
case *types.TypeName:
|
|
||||||
if _, ok := obj.Type().(*typeparams.TypeParam); !ok {
|
|
||||||
// With the exception of type parameters, only package-level type names
|
|
||||||
// have a path.
|
|
||||||
return "", fmt.Errorf("no path for %v", obj)
|
|
||||||
}
|
|
||||||
case *types.Const, // Only package-level constants have a path.
|
|
||||||
*types.Label, // Labels are function-local.
|
|
||||||
*types.PkgName: // PkgNames are file-local.
|
|
||||||
return "", fmt.Errorf("no path for %v", obj)
|
|
||||||
|
|
||||||
case *types.Var:
|
|
||||||
// Could be:
|
|
||||||
// - a field (obj.IsField())
|
|
||||||
// - a func parameter or result
|
|
||||||
// - a local var.
|
|
||||||
// Sadly there is no way to distinguish
|
|
||||||
// a param/result from a local
|
|
||||||
// so we must proceed to the find.
|
|
||||||
|
|
||||||
case *types.Func:
|
|
||||||
// A func, if not package-level, must be a method.
|
|
||||||
if recv := obj.Type().(*types.Signature).Recv(); recv == nil {
|
|
||||||
return "", fmt.Errorf("func is not a method: %v", obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
if path, ok := enc.concreteMethod(obj); ok {
|
|
||||||
// Fast path for concrete methods that avoids looping over scope.
|
|
||||||
return path, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic(obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 4. Search the API for the path to the var (field/param/result) or method.
|
|
||||||
|
|
||||||
// First inspect package-level named types.
|
|
||||||
// In the presence of path aliases, these give
|
|
||||||
// the best paths because non-types may
|
|
||||||
// refer to types, but not the reverse.
|
|
||||||
empty := make([]byte, 0, 48) // initial space
|
|
||||||
names := enc.scopeNames(scope)
|
|
||||||
for _, name := range names {
|
|
||||||
o := scope.Lookup(name)
|
|
||||||
tname, ok := o.(*types.TypeName)
|
|
||||||
if !ok {
|
|
||||||
continue // handle non-types in second pass
|
|
||||||
}
|
|
||||||
|
|
||||||
path := append(empty, name...)
|
|
||||||
path = append(path, opType)
|
|
||||||
|
|
||||||
T := o.Type()
|
|
||||||
|
|
||||||
if tname.IsAlias() {
|
|
||||||
// type alias
|
|
||||||
if r := find(obj, T, path, nil); r != nil {
|
|
||||||
return Path(r), nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if named, _ := T.(*types.Named); named != nil {
|
|
||||||
if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil {
|
|
||||||
// generic named type
|
|
||||||
return Path(r), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// defined (named) type
|
|
||||||
if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil {
|
|
||||||
return Path(r), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then inspect everything else:
|
|
||||||
// non-types, and declared methods of defined types.
|
|
||||||
for _, name := range names {
|
|
||||||
o := scope.Lookup(name)
|
|
||||||
path := append(empty, name...)
|
|
||||||
if _, ok := o.(*types.TypeName); !ok {
|
|
||||||
if o.Exported() {
|
|
||||||
// exported non-type (const, var, func)
|
|
||||||
if r := find(obj, o.Type(), append(path, opType), nil); r != nil {
|
|
||||||
return Path(r), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inspect declared methods of defined types.
|
|
||||||
if T, ok := o.Type().(*types.Named); ok {
|
|
||||||
path = append(path, opType)
|
|
||||||
// Note that method index here is always with respect
|
|
||||||
// to canonical ordering of methods, regardless of how
|
|
||||||
// they appear in the underlying type.
|
|
||||||
for i, m := range enc.namedMethods(T) {
|
|
||||||
path2 := appendOpArg(path, opMethod, i)
|
|
||||||
if m == obj {
|
|
||||||
return Path(path2), nil // found declared method
|
|
||||||
}
|
|
||||||
if r := find(obj, m.Type(), append(path2, opType), nil); r != nil {
|
|
||||||
return Path(r), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path())
|
|
||||||
}
|
|
||||||
|
|
||||||
func appendOpArg(path []byte, op byte, arg int) []byte {
|
|
||||||
path = append(path, op)
|
|
||||||
path = strconv.AppendInt(path, int64(arg), 10)
|
|
||||||
return path
|
|
||||||
}
|
|
||||||
|
|
||||||
// concreteMethod returns the path for meth, which must have a non-nil receiver.
|
|
||||||
// The second return value indicates success and may be false if the method is
|
|
||||||
// an interface method or if it is an instantiated method.
|
|
||||||
//
|
|
||||||
// This function is just an optimization that avoids the general scope walking
|
|
||||||
// approach. You are expected to fall back to the general approach if this
|
|
||||||
// function fails.
|
|
||||||
func (enc *encoder) concreteMethod(meth *types.Func) (Path, bool) {
|
|
||||||
// Concrete methods can only be declared on package-scoped named types. For
|
|
||||||
// that reason we can skip the expensive walk over the package scope: the
|
|
||||||
// path will always be package -> named type -> method. We can trivially get
|
|
||||||
// the type name from the receiver, and only have to look over the type's
|
|
||||||
// methods to find the method index.
|
|
||||||
//
|
|
||||||
// Methods on generic types require special consideration, however. Consider
|
|
||||||
// the following package:
|
|
||||||
//
|
|
||||||
// L1: type S[T any] struct{}
|
|
||||||
// L2: func (recv S[A]) Foo() { recv.Bar() }
|
|
||||||
// L3: func (recv S[B]) Bar() { }
|
|
||||||
// L4: type Alias = S[int]
|
|
||||||
// L5: func _[T any]() { var s S[int]; s.Foo() }
|
|
||||||
//
|
|
||||||
// The receivers of methods on generic types are instantiations. L2 and L3
|
|
||||||
// instantiate S with the type-parameters A and B, which are scoped to the
|
|
||||||
// respective methods. L4 and L5 each instantiate S with int. Each of these
|
|
||||||
// instantiations has its own method set, full of methods (and thus objects)
|
|
||||||
// with receivers whose types are the respective instantiations. In other
|
|
||||||
// words, we have
|
|
||||||
//
|
|
||||||
// S[A].Foo, S[A].Bar
|
|
||||||
// S[B].Foo, S[B].Bar
|
|
||||||
// S[int].Foo, S[int].Bar
|
|
||||||
//
|
|
||||||
// We may thus be trying to produce object paths for any of these objects.
|
|
||||||
//
|
|
||||||
// S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo
|
|
||||||
// and S.Bar, which are the paths that this function naturally produces.
|
|
||||||
//
|
|
||||||
// S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that
|
|
||||||
// don't correspond to the origin methods. For S[int], this is significant.
|
|
||||||
// The most precise object path for S[int].Foo, for example, is Alias.Foo,
|
|
||||||
// not S.Foo. Our function, however, would produce S.Foo, which would
|
|
||||||
// resolve to a different object.
|
|
||||||
//
|
|
||||||
// For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are
|
|
||||||
// still the correct paths, since only the origin methods have meaningful
|
|
||||||
// paths. But this is likely only true for trivial cases and has edge cases.
|
|
||||||
// Since this function is only an optimization, we err on the side of giving
|
|
||||||
// up, deferring to the slower but definitely correct algorithm. Most users
|
|
||||||
// of objectpath will only be giving us origin methods, anyway, as referring
|
|
||||||
// to instantiated methods is usually not useful.
|
|
||||||
|
|
||||||
if typeparams.OriginMethod(meth) != meth {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
recvT := meth.Type().(*types.Signature).Recv().Type()
|
|
||||||
if ptr, ok := recvT.(*types.Pointer); ok {
|
|
||||||
recvT = ptr.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
named, ok := recvT.(*types.Named)
|
|
||||||
if !ok {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
if types.IsInterface(named) {
|
|
||||||
// Named interfaces don't have to be package-scoped
|
|
||||||
//
|
|
||||||
// TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface
|
|
||||||
// methods, too, I think.
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Preallocate space for the name, opType, opMethod, and some digits.
|
|
||||||
name := named.Obj().Name()
|
|
||||||
path := make([]byte, 0, len(name)+8)
|
|
||||||
path = append(path, name...)
|
|
||||||
path = append(path, opType)
|
|
||||||
for i, m := range enc.namedMethods(named) {
|
|
||||||
if m == meth {
|
|
||||||
path = appendOpArg(path, opMethod, i)
|
|
||||||
return Path(path), true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
panic(fmt.Sprintf("couldn't find method %s on type %s", meth, named))
|
|
||||||
}
|
|
||||||
|
|
||||||
// find finds obj within type T, returning the path to it, or nil if not found.
|
|
||||||
//
|
|
||||||
// The seen map is used to short circuit cycles through type parameters. If
|
|
||||||
// nil, it will be allocated as necessary.
|
|
||||||
func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte {
|
|
||||||
switch T := T.(type) {
|
|
||||||
case *types.Basic, *types.Named:
|
|
||||||
// Named types belonging to pkg were handled already,
|
|
||||||
// so T must belong to another package. No path.
|
|
||||||
return nil
|
|
||||||
case *types.Pointer:
|
|
||||||
return find(obj, T.Elem(), append(path, opElem), seen)
|
|
||||||
case *types.Slice:
|
|
||||||
return find(obj, T.Elem(), append(path, opElem), seen)
|
|
||||||
case *types.Array:
|
|
||||||
return find(obj, T.Elem(), append(path, opElem), seen)
|
|
||||||
case *types.Chan:
|
|
||||||
return find(obj, T.Elem(), append(path, opElem), seen)
|
|
||||||
case *types.Map:
|
|
||||||
if r := find(obj, T.Key(), append(path, opKey), seen); r != nil {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
return find(obj, T.Elem(), append(path, opElem), seen)
|
|
||||||
case *types.Signature:
|
|
||||||
if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
if r := find(obj, T.Params(), append(path, opParams), seen); r != nil {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
return find(obj, T.Results(), append(path, opResults), seen)
|
|
||||||
case *types.Struct:
|
|
||||||
for i := 0; i < T.NumFields(); i++ {
|
|
||||||
fld := T.Field(i)
|
|
||||||
path2 := appendOpArg(path, opField, i)
|
|
||||||
if fld == obj {
|
|
||||||
return path2 // found field var
|
|
||||||
}
|
|
||||||
if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
case *types.Tuple:
|
|
||||||
for i := 0; i < T.Len(); i++ {
|
|
||||||
v := T.At(i)
|
|
||||||
path2 := appendOpArg(path, opAt, i)
|
|
||||||
if v == obj {
|
|
||||||
return path2 // found param/result var
|
|
||||||
}
|
|
||||||
if r := find(obj, v.Type(), append(path2, opType), seen); r != nil {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
case *types.Interface:
|
|
||||||
for i := 0; i < T.NumMethods(); i++ {
|
|
||||||
m := T.Method(i)
|
|
||||||
path2 := appendOpArg(path, opMethod, i)
|
|
||||||
if m == obj {
|
|
||||||
return path2 // found interface method
|
|
||||||
}
|
|
||||||
if r := find(obj, m.Type(), append(path2, opType), seen); r != nil {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
case *typeparams.TypeParam:
|
|
||||||
name := T.Obj()
|
|
||||||
if name == obj {
|
|
||||||
return append(path, opObj)
|
|
||||||
}
|
|
||||||
if seen[name] {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if seen == nil {
|
|
||||||
seen = make(map[*types.TypeName]bool)
|
|
||||||
}
|
|
||||||
seen[name] = true
|
|
||||||
if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
panic(T)
|
|
||||||
}
|
|
||||||
|
|
||||||
func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte {
|
|
||||||
for i := 0; i < list.Len(); i++ {
|
|
||||||
tparam := list.At(i)
|
|
||||||
path2 := appendOpArg(path, opTypeParam, i)
|
|
||||||
if r := find(obj, tparam, path2, seen); r != nil {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object returns the object denoted by path p within the package pkg.
|
|
||||||
func Object(pkg *types.Package, p Path) (types.Object, error) {
|
|
||||||
if p == "" {
|
|
||||||
return nil, fmt.Errorf("empty path")
|
|
||||||
}
|
|
||||||
|
|
||||||
pathstr := string(p)
|
|
||||||
var pkgobj, suffix string
|
|
||||||
if dot := strings.IndexByte(pathstr, opType); dot < 0 {
|
|
||||||
pkgobj = pathstr
|
|
||||||
} else {
|
|
||||||
pkgobj = pathstr[:dot]
|
|
||||||
suffix = pathstr[dot:] // suffix starts with "."
|
|
||||||
}
|
|
||||||
|
|
||||||
obj := pkg.Scope().Lookup(pkgobj)
|
|
||||||
if obj == nil {
|
|
||||||
return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj)
|
|
||||||
}
|
|
||||||
|
|
||||||
// abstraction of *types.{Pointer,Slice,Array,Chan,Map}
|
|
||||||
type hasElem interface {
|
|
||||||
Elem() types.Type
|
|
||||||
}
|
|
||||||
// abstraction of *types.{Named,Signature}
|
|
||||||
type hasTypeParams interface {
|
|
||||||
TypeParams() *typeparams.TypeParamList
|
|
||||||
}
|
|
||||||
// abstraction of *types.{Named,TypeParam}
|
|
||||||
type hasObj interface {
|
|
||||||
Obj() *types.TypeName
|
|
||||||
}
|
|
||||||
|
|
||||||
// The loop state is the pair (t, obj),
|
|
||||||
// exactly one of which is non-nil, initially obj.
|
|
||||||
// All suffixes start with '.' (the only object->type operation),
|
|
||||||
// followed by optional type->type operations,
|
|
||||||
// then a type->object operation.
|
|
||||||
// The cycle then repeats.
|
|
||||||
var t types.Type
|
|
||||||
for suffix != "" {
|
|
||||||
code := suffix[0]
|
|
||||||
suffix = suffix[1:]
|
|
||||||
|
|
||||||
// Codes [AFM] have an integer operand.
|
|
||||||
var index int
|
|
||||||
switch code {
|
|
||||||
case opAt, opField, opMethod, opTypeParam:
|
|
||||||
rest := strings.TrimLeft(suffix, "0123456789")
|
|
||||||
numerals := suffix[:len(suffix)-len(rest)]
|
|
||||||
suffix = rest
|
|
||||||
i, err := strconv.Atoi(numerals)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code)
|
|
||||||
}
|
|
||||||
index = int(i)
|
|
||||||
case opObj:
|
|
||||||
// no operand
|
|
||||||
default:
|
|
||||||
// The suffix must end with a type->object operation.
|
|
||||||
if suffix == "" {
|
|
||||||
return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if code == opType {
|
|
||||||
if t != nil {
|
|
||||||
return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType)
|
|
||||||
}
|
|
||||||
t = obj.Type()
|
|
||||||
obj = nil
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if t == nil {
|
|
||||||
return nil, fmt.Errorf("invalid path: code %q in object context", code)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inv: t != nil, obj == nil
|
|
||||||
|
|
||||||
switch code {
|
|
||||||
case opElem:
|
|
||||||
hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t)
|
|
||||||
}
|
|
||||||
t = hasElem.Elem()
|
|
||||||
|
|
||||||
case opKey:
|
|
||||||
mapType, ok := t.(*types.Map)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t)
|
|
||||||
}
|
|
||||||
t = mapType.Key()
|
|
||||||
|
|
||||||
case opParams:
|
|
||||||
sig, ok := t.(*types.Signature)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
|
|
||||||
}
|
|
||||||
t = sig.Params()
|
|
||||||
|
|
||||||
case opResults:
|
|
||||||
sig, ok := t.(*types.Signature)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
|
|
||||||
}
|
|
||||||
t = sig.Results()
|
|
||||||
|
|
||||||
case opUnderlying:
|
|
||||||
named, ok := t.(*types.Named)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t)
|
|
||||||
}
|
|
||||||
t = named.Underlying()
|
|
||||||
|
|
||||||
case opTypeParam:
|
|
||||||
hasTypeParams, ok := t.(hasTypeParams) // Named, Signature
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t)
|
|
||||||
}
|
|
||||||
tparams := hasTypeParams.TypeParams()
|
|
||||||
if n := tparams.Len(); index >= n {
|
|
||||||
return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
|
|
||||||
}
|
|
||||||
t = tparams.At(index)
|
|
||||||
|
|
||||||
case opConstraint:
|
|
||||||
tparam, ok := t.(*typeparams.TypeParam)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t)
|
|
||||||
}
|
|
||||||
t = tparam.Constraint()
|
|
||||||
|
|
||||||
case opAt:
|
|
||||||
tuple, ok := t.(*types.Tuple)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t)
|
|
||||||
}
|
|
||||||
if n := tuple.Len(); index >= n {
|
|
||||||
return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
|
|
||||||
}
|
|
||||||
obj = tuple.At(index)
|
|
||||||
t = nil
|
|
||||||
|
|
||||||
case opField:
|
|
||||||
structType, ok := t.(*types.Struct)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t)
|
|
||||||
}
|
|
||||||
if n := structType.NumFields(); index >= n {
|
|
||||||
return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n)
|
|
||||||
}
|
|
||||||
obj = structType.Field(index)
|
|
||||||
t = nil
|
|
||||||
|
|
||||||
case opMethod:
|
|
||||||
switch t := t.(type) {
|
|
||||||
case *types.Interface:
|
|
||||||
if index >= t.NumMethods() {
|
|
||||||
return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods())
|
|
||||||
}
|
|
||||||
obj = t.Method(index) // Id-ordered
|
|
||||||
|
|
||||||
case *types.Named:
|
|
||||||
methods := namedMethods(t) // (unmemoized)
|
|
||||||
if index >= len(methods) {
|
|
||||||
return nil, fmt.Errorf("method index %d out of range [0-%d)", index, len(methods))
|
|
||||||
}
|
|
||||||
obj = methods[index] // Id-ordered
|
|
||||||
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t)
|
|
||||||
}
|
|
||||||
t = nil
|
|
||||||
|
|
||||||
case opObj:
|
|
||||||
hasObj, ok := t.(hasObj)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t)
|
|
||||||
}
|
|
||||||
obj = hasObj.Obj()
|
|
||||||
t = nil
|
|
||||||
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("invalid path: unknown code %q", code)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if obj.Pkg() != pkg {
|
|
||||||
return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
return obj, nil // success
|
|
||||||
}
|
|
||||||
|
|
||||||
// namedMethods returns the methods of a Named type in ascending Id order.
|
|
||||||
func namedMethods(named *types.Named) []*types.Func {
|
|
||||||
methods := make([]*types.Func, named.NumMethods())
|
|
||||||
for i := range methods {
|
|
||||||
methods[i] = named.Method(i)
|
|
||||||
}
|
|
||||||
sort.Slice(methods, func(i, j int) bool {
|
|
||||||
return methods[i].Id() < methods[j].Id()
|
|
||||||
})
|
|
||||||
return methods
|
|
||||||
}
|
|
||||||
|
|
||||||
// scopeNames is a memoization of scope.Names. Callers must not modify the result.
|
|
||||||
func (enc *encoder) scopeNames(scope *types.Scope) []string {
|
|
||||||
m := enc.scopeNamesMemo
|
|
||||||
if m == nil {
|
|
||||||
m = make(map[*types.Scope][]string)
|
|
||||||
enc.scopeNamesMemo = m
|
|
||||||
}
|
|
||||||
names, ok := m[scope]
|
|
||||||
if !ok {
|
|
||||||
names = scope.Names() // allocates and sorts
|
|
||||||
m[scope] = names
|
|
||||||
}
|
|
||||||
return names
|
|
||||||
}
|
|
||||||
|
|
||||||
// namedMethods is a memoization of the namedMethods function. Callers must not modify the result.
|
|
||||||
func (enc *encoder) namedMethods(named *types.Named) []*types.Func {
|
|
||||||
m := enc.namedMethodsMemo
|
|
||||||
if m == nil {
|
|
||||||
m = make(map[*types.Named][]*types.Func)
|
|
||||||
enc.namedMethodsMemo = m
|
|
||||||
}
|
|
||||||
methods, ok := m[named]
|
|
||||||
if !ok {
|
|
||||||
methods = namedMethods(named) // allocates and sorts
|
|
||||||
m[named] = methods
|
|
||||||
}
|
|
||||||
return methods
|
|
||||||
|
|
||||||
}
|
|
|
@ -0,0 +1,59 @@
|
||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package tag provides the labels used for telemetry throughout gopls.
|
||||||
|
package tag
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/tools/internal/event/keys"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// create the label keys we use
|
||||||
|
Method = keys.NewString("method", "")
|
||||||
|
StatusCode = keys.NewString("status.code", "")
|
||||||
|
StatusMessage = keys.NewString("status.message", "")
|
||||||
|
RPCID = keys.NewString("id", "")
|
||||||
|
RPCDirection = keys.NewString("direction", "")
|
||||||
|
File = keys.NewString("file", "")
|
||||||
|
Directory = keys.New("directory", "")
|
||||||
|
URI = keys.New("URI", "")
|
||||||
|
Package = keys.NewString("package", "") // Package ID
|
||||||
|
PackagePath = keys.NewString("package_path", "")
|
||||||
|
Query = keys.New("query", "")
|
||||||
|
Snapshot = keys.NewUInt64("snapshot", "")
|
||||||
|
Operation = keys.NewString("operation", "")
|
||||||
|
|
||||||
|
Position = keys.New("position", "")
|
||||||
|
Category = keys.NewString("category", "")
|
||||||
|
PackageCount = keys.NewInt("packages", "")
|
||||||
|
Files = keys.New("files", "")
|
||||||
|
Port = keys.NewInt("port", "")
|
||||||
|
Type = keys.New("type", "")
|
||||||
|
HoverKind = keys.NewString("hoverkind", "")
|
||||||
|
|
||||||
|
NewServer = keys.NewString("new_server", "A new server was added")
|
||||||
|
EndServer = keys.NewString("end_server", "A server was shut down")
|
||||||
|
|
||||||
|
ServerID = keys.NewString("server", "The server ID an event is related to")
|
||||||
|
Logfile = keys.NewString("logfile", "")
|
||||||
|
DebugAddress = keys.NewString("debug_address", "")
|
||||||
|
GoplsPath = keys.NewString("gopls_path", "")
|
||||||
|
ClientID = keys.NewString("client_id", "")
|
||||||
|
|
||||||
|
Level = keys.NewInt("level", "The logging level")
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// create the stats we measure
|
||||||
|
Started = keys.NewInt64("started", "Count of started RPCs.")
|
||||||
|
ReceivedBytes = keys.NewInt64("received_bytes", "Bytes received.") //, unit.Bytes)
|
||||||
|
SentBytes = keys.NewInt64("sent_bytes", "Bytes sent.") //, unit.Bytes)
|
||||||
|
Latency = keys.NewFloat64("latency_ms", "Elapsed time in milliseconds") //, unit.Milliseconds)
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
Inbound = "in"
|
||||||
|
Outbound = "out"
|
||||||
|
)
|
|
@ -1,852 +0,0 @@
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Binary package export.
|
|
||||||
// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go;
|
|
||||||
// see that file for specification of the format.
|
|
||||||
|
|
||||||
package gcimporter
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"go/constant"
|
|
||||||
"go/token"
|
|
||||||
"go/types"
|
|
||||||
"math"
|
|
||||||
"math/big"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// If debugFormat is set, each integer and string value is preceded by a marker
|
|
||||||
// and position information in the encoding. This mechanism permits an importer
|
|
||||||
// to recognize immediately when it is out of sync. The importer recognizes this
|
|
||||||
// mode automatically (i.e., it can import export data produced with debugging
|
|
||||||
// support even if debugFormat is not set at the time of import). This mode will
|
|
||||||
// lead to massively larger export data (by a factor of 2 to 3) and should only
|
|
||||||
// be enabled during development and debugging.
|
|
||||||
//
|
|
||||||
// NOTE: This flag is the first flag to enable if importing dies because of
|
|
||||||
// (suspected) format errors, and whenever a change is made to the format.
|
|
||||||
const debugFormat = false // default: false
|
|
||||||
|
|
||||||
// Current export format version. Increase with each format change.
|
|
||||||
//
|
|
||||||
// Note: The latest binary (non-indexed) export format is at version 6.
|
|
||||||
// This exporter is still at level 4, but it doesn't matter since
|
|
||||||
// the binary importer can handle older versions just fine.
|
|
||||||
//
|
|
||||||
// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE
|
|
||||||
// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMENTED HERE
|
|
||||||
// 4: type name objects support type aliases, uses aliasTag
|
|
||||||
// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used)
|
|
||||||
// 2: removed unused bool in ODCL export (compiler only)
|
|
||||||
// 1: header format change (more regular), export package for _ struct fields
|
|
||||||
// 0: Go1.7 encoding
|
|
||||||
const exportVersion = 4
|
|
||||||
|
|
||||||
// trackAllTypes enables cycle tracking for all types, not just named
|
|
||||||
// types. The existing compiler invariants assume that unnamed types
|
|
||||||
// that are not completely set up are not used, or else there are spurious
|
|
||||||
// errors.
|
|
||||||
// If disabled, only named types are tracked, possibly leading to slightly
|
|
||||||
// less efficient encoding in rare cases. It also prevents the export of
|
|
||||||
// some corner-case type declarations (but those are not handled correctly
|
|
||||||
// with with the textual export format either).
|
|
||||||
// TODO(gri) enable and remove once issues caused by it are fixed
|
|
||||||
const trackAllTypes = false
|
|
||||||
|
|
||||||
type exporter struct {
|
|
||||||
fset *token.FileSet
|
|
||||||
out bytes.Buffer
|
|
||||||
|
|
||||||
// object -> index maps, indexed in order of serialization
|
|
||||||
strIndex map[string]int
|
|
||||||
pkgIndex map[*types.Package]int
|
|
||||||
typIndex map[types.Type]int
|
|
||||||
|
|
||||||
// position encoding
|
|
||||||
posInfoFormat bool
|
|
||||||
prevFile string
|
|
||||||
prevLine int
|
|
||||||
|
|
||||||
// debugging support
|
|
||||||
written int // bytes written
|
|
||||||
indent int // for trace
|
|
||||||
}
|
|
||||||
|
|
||||||
// internalError represents an error generated inside this package.
|
|
||||||
type internalError string
|
|
||||||
|
|
||||||
func (e internalError) Error() string { return "gcimporter: " + string(e) }
|
|
||||||
|
|
||||||
func internalErrorf(format string, args ...interface{}) error {
|
|
||||||
return internalError(fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// BExportData returns binary export data for pkg.
|
|
||||||
// If no file set is provided, position info will be missing.
|
|
||||||
func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
|
|
||||||
if !debug {
|
|
||||||
defer func() {
|
|
||||||
if e := recover(); e != nil {
|
|
||||||
if ierr, ok := e.(internalError); ok {
|
|
||||||
err = ierr
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Not an internal error; panic again.
|
|
||||||
panic(e)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
p := exporter{
|
|
||||||
fset: fset,
|
|
||||||
strIndex: map[string]int{"": 0}, // empty string is mapped to 0
|
|
||||||
pkgIndex: make(map[*types.Package]int),
|
|
||||||
typIndex: make(map[types.Type]int),
|
|
||||||
posInfoFormat: true, // TODO(gri) might become a flag, eventually
|
|
||||||
}
|
|
||||||
|
|
||||||
// write version info
|
|
||||||
// The version string must start with "version %d" where %d is the version
|
|
||||||
// number. Additional debugging information may follow after a blank; that
|
|
||||||
// text is ignored by the importer.
|
|
||||||
p.rawStringln(fmt.Sprintf("version %d", exportVersion))
|
|
||||||
var debug string
|
|
||||||
if debugFormat {
|
|
||||||
debug = "debug"
|
|
||||||
}
|
|
||||||
p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly
|
|
||||||
p.bool(trackAllTypes)
|
|
||||||
p.bool(p.posInfoFormat)
|
|
||||||
|
|
||||||
// --- generic export data ---
|
|
||||||
|
|
||||||
// populate type map with predeclared "known" types
|
|
||||||
for index, typ := range predeclared() {
|
|
||||||
p.typIndex[typ] = index
|
|
||||||
}
|
|
||||||
if len(p.typIndex) != len(predeclared()) {
|
|
||||||
return nil, internalError("duplicate entries in type map?")
|
|
||||||
}
|
|
||||||
|
|
||||||
// write package data
|
|
||||||
p.pkg(pkg, true)
|
|
||||||
if trace {
|
|
||||||
p.tracef("\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
// write objects
|
|
||||||
objcount := 0
|
|
||||||
scope := pkg.Scope()
|
|
||||||
for _, name := range scope.Names() {
|
|
||||||
if !token.IsExported(name) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if trace {
|
|
||||||
p.tracef("\n")
|
|
||||||
}
|
|
||||||
p.obj(scope.Lookup(name))
|
|
||||||
objcount++
|
|
||||||
}
|
|
||||||
|
|
||||||
// indicate end of list
|
|
||||||
if trace {
|
|
||||||
p.tracef("\n")
|
|
||||||
}
|
|
||||||
p.tag(endTag)
|
|
||||||
|
|
||||||
// for self-verification only (redundant)
|
|
||||||
p.int(objcount)
|
|
||||||
|
|
||||||
if trace {
|
|
||||||
p.tracef("\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- end of export data ---
|
|
||||||
|
|
||||||
return p.out.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *exporter) pkg(pkg *types.Package, emptypath bool) {
|
|
||||||
if pkg == nil {
|
|
||||||
panic(internalError("unexpected nil pkg"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// if we saw the package before, write its index (>= 0)
|
|
||||||
if i, ok := p.pkgIndex[pkg]; ok {
|
|
||||||
p.index('P', i)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// otherwise, remember the package, write the package tag (< 0) and package data
|
|
||||||
if trace {
|
|
||||||
p.tracef("P%d = { ", len(p.pkgIndex))
|
|
||||||
defer p.tracef("} ")
|
|
||||||
}
|
|
||||||
p.pkgIndex[pkg] = len(p.pkgIndex)
|
|
||||||
|
|
||||||
p.tag(packageTag)
|
|
||||||
p.string(pkg.Name())
|
|
||||||
if emptypath {
|
|
||||||
p.string("")
|
|
||||||
} else {
|
|
||||||
p.string(pkg.Path())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *exporter) obj(obj types.Object) {
|
|
||||||
switch obj := obj.(type) {
|
|
||||||
case *types.Const:
|
|
||||||
p.tag(constTag)
|
|
||||||
p.pos(obj)
|
|
||||||
p.qualifiedName(obj)
|
|
||||||
p.typ(obj.Type())
|
|
||||||
p.value(obj.Val())
|
|
||||||
|
|
||||||
case *types.TypeName:
|
|
||||||
if obj.IsAlias() {
|
|
||||||
p.tag(aliasTag)
|
|
||||||
p.pos(obj)
|
|
||||||
p.qualifiedName(obj)
|
|
||||||
} else {
|
|
||||||
p.tag(typeTag)
|
|
||||||
}
|
|
||||||
p.typ(obj.Type())
|
|
||||||
|
|
||||||
case *types.Var:
|
|
||||||
p.tag(varTag)
|
|
||||||
p.pos(obj)
|
|
||||||
p.qualifiedName(obj)
|
|
||||||
p.typ(obj.Type())
|
|
||||||
|
|
||||||
case *types.Func:
|
|
||||||
p.tag(funcTag)
|
|
||||||
p.pos(obj)
|
|
||||||
p.qualifiedName(obj)
|
|
||||||
sig := obj.Type().(*types.Signature)
|
|
||||||
p.paramList(sig.Params(), sig.Variadic())
|
|
||||||
p.paramList(sig.Results(), false)
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic(internalErrorf("unexpected object %v (%T)", obj, obj))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *exporter) pos(obj types.Object) {
|
|
||||||
if !p.posInfoFormat {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
file, line := p.fileLine(obj)
|
|
||||||
if file == p.prevFile {
|
|
||||||
// common case: write line delta
|
|
||||||
// delta == 0 means different file or no line change
|
|
||||||
delta := line - p.prevLine
|
|
||||||
p.int(delta)
|
|
||||||
if delta == 0 {
|
|
||||||
p.int(-1) // -1 means no file change
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// different file
|
|
||||||
p.int(0)
|
|
||||||
// Encode filename as length of common prefix with previous
|
|
||||||
// filename, followed by (possibly empty) suffix. Filenames
|
|
||||||
// frequently share path prefixes, so this can save a lot
|
|
||||||
// of space and make export data size less dependent on file
|
|
||||||
// path length. The suffix is unlikely to be empty because
|
|
||||||
// file names tend to end in ".go".
|
|
||||||
n := commonPrefixLen(p.prevFile, file)
|
|
||||||
p.int(n) // n >= 0
|
|
||||||
p.string(file[n:]) // write suffix only
|
|
||||||
p.prevFile = file
|
|
||||||
p.int(line)
|
|
||||||
}
|
|
||||||
p.prevLine = line
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *exporter) fileLine(obj types.Object) (file string, line int) {
|
|
||||||
if p.fset != nil {
|
|
||||||
pos := p.fset.Position(obj.Pos())
|
|
||||||
file = pos.Filename
|
|
||||||
line = pos.Line
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func commonPrefixLen(a, b string) int {
|
|
||||||
if len(a) > len(b) {
|
|
||||||
a, b = b, a
|
|
||||||
}
|
|
||||||
// len(a) <= len(b)
|
|
||||||
i := 0
|
|
||||||
for i < len(a) && a[i] == b[i] {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *exporter) qualifiedName(obj types.Object) {
|
|
||||||
p.string(obj.Name())
|
|
||||||
p.pkg(obj.Pkg(), false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *exporter) typ(t types.Type) {
|
|
||||||
if t == nil {
|
|
||||||
panic(internalError("nil type"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Possible optimization: Anonymous pointer types *T where
|
|
||||||
// T is a named type are common. We could canonicalize all
|
|
||||||
// such types *T to a single type PT = *T. This would lead
|
|
||||||
// to at most one *T entry in typIndex, and all future *T's
|
|
||||||
// would be encoded as the respective index directly. Would
|
|
||||||
// save 1 byte (pointerTag) per *T and reduce the typIndex
|
|
||||||
// size (at the cost of a canonicalization map). We can do
|
|
||||||
// this later, without encoding format change.
|
|
||||||
|
|
||||||
// if we saw the type before, write its index (>= 0)
|
|
||||||
if i, ok := p.typIndex[t]; ok {
|
|
||||||
p.index('T', i)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// otherwise, remember the type, write the type tag (< 0) and type data
|
|
||||||
if trackAllTypes {
|
|
||||||
if trace {
|
|
||||||
p.tracef("T%d = {>\n", len(p.typIndex))
|
|
||||||
defer p.tracef("<\n} ")
|
|
||||||
}
|
|
||||||
p.typIndex[t] = len(p.typIndex)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch t := t.(type) {
|
|
||||||
case *types.Named:
|
|
||||||
if !trackAllTypes {
|
|
||||||
// if we don't track all types, track named types now
|
|
||||||
p.typIndex[t] = len(p.typIndex)
|
|
||||||
}
|
|
||||||
|
|
||||||
p.tag(namedTag)
|
|
||||||
p.pos(t.Obj())
|
|
||||||
p.qualifiedName(t.Obj())
|
|
||||||
p.typ(t.Underlying())
|
|
||||||
if !types.IsInterface(t) {
|
|
||||||
p.assocMethods(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
case *types.Array:
|
|
||||||
p.tag(arrayTag)
|
|
||||||
p.int64(t.Len())
|
|
||||||
p.typ(t.Elem())
|
|
||||||
|
|
||||||
case *types.Slice:
|
|
||||||
p.tag(sliceTag)
|
|
||||||
p.typ(t.Elem())
|
|
||||||
|
|
||||||
case *dddSlice:
|
|
||||||
p.tag(dddTag)
|
|
||||||
p.typ(t.elem)
|
|
||||||
|
|
||||||
case *types.Struct:
|
|
||||||
p.tag(structTag)
|
|
||||||
p.fieldList(t)
|
|
||||||
|
|
||||||
case *types.Pointer:
|
|
||||||
p.tag(pointerTag)
|
|
||||||
p.typ(t.Elem())
|
|
||||||
|
|
||||||
case *types.Signature:
|
|
||||||
p.tag(signatureTag)
|
|
||||||
p.paramList(t.Params(), t.Variadic())
|
|
||||||
p.paramList(t.Results(), false)
|
|
||||||
|
|
||||||
case *types.Interface:
|
|
||||||
p.tag(interfaceTag)
|
|
||||||
p.iface(t)
|
|
||||||
|
|
||||||
case *types.Map:
|
|
||||||
p.tag(mapTag)
|
|
||||||
p.typ(t.Key())
|
|
||||||
p.typ(t.Elem())
|
|
||||||
|
|
||||||
case *types.Chan:
|
|
||||||
p.tag(chanTag)
|
|
||||||
p.int(int(3 - t.Dir())) // hack
|
|
||||||
p.typ(t.Elem())
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic(internalErrorf("unexpected type %T: %s", t, t))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *exporter) assocMethods(named *types.Named) {
|
|
||||||
// Sort methods (for determinism).
|
|
||||||
var methods []*types.Func
|
|
||||||
for i := 0; i < named.NumMethods(); i++ {
|
|
||||||
methods = append(methods, named.Method(i))
|
|
||||||
}
|
|
||||||
sort.Sort(methodsByName(methods))
|
|
||||||
|
|
||||||
p.int(len(methods))
|
|
||||||
|
|
||||||
if trace && methods != nil {
|
|
||||||
p.tracef("associated methods {>\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, m := range methods {
|
|
||||||
if trace && i > 0 {
|
|
||||||
p.tracef("\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
p.pos(m)
|
|
||||||
name := m.Name()
|
|
||||||
p.string(name)
|
|
||||||
if !exported(name) {
|
|
||||||
p.pkg(m.Pkg(), false)
|
|
||||||
}
|
|
||||||
|
|
||||||
sig := m.Type().(*types.Signature)
|
|
||||||
p.paramList(types.NewTuple(sig.Recv()), false)
|
|
||||||
p.paramList(sig.Params(), sig.Variadic())
|
|
||||||
p.paramList(sig.Results(), false)
|
|
||||||
p.int(0) // dummy value for go:nointerface pragma - ignored by importer
|
|
||||||
}
|
|
||||||
|
|
||||||
if trace && methods != nil {
|
|
||||||
p.tracef("<\n} ")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type methodsByName []*types.Func
|
|
||||||
|
|
||||||
func (x methodsByName) Len() int { return len(x) }
|
|
||||||
func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
|
||||||
func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() }
|
|
||||||
|
|
||||||
func (p *exporter) fieldList(t *types.Struct) {
|
|
||||||
if trace && t.NumFields() > 0 {
|
|
||||||
p.tracef("fields {>\n")
|
|
||||||
defer p.tracef("<\n} ")
|
|
||||||
}
|
|
||||||
|
|
||||||
p.int(t.NumFields())
|
|
||||||
for i := 0; i < t.NumFields(); i++ {
|
|
||||||
if trace && i > 0 {
|
|
||||||
p.tracef("\n")
|
|
||||||
}
|
|
||||||
p.field(t.Field(i))
|
|
||||||
p.string(t.Tag(i))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *exporter) field(f *types.Var) {
|
|
||||||
if !f.IsField() {
|
|
||||||
panic(internalError("field expected"))
|
|
||||||
}
|
|
||||||
|
|
||||||
p.pos(f)
|
|
||||||
p.fieldName(f)
|
|
||||||
p.typ(f.Type())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *exporter) iface(t *types.Interface) {
|
|
||||||
// TODO(gri): enable importer to load embedded interfaces,
|
|
||||||
// then emit Embeddeds and ExplicitMethods separately here.
|
|
||||||
p.int(0)
|
|
||||||
|
|
||||||
n := t.NumMethods()
|
|
||||||
if trace && n > 0 {
|
|
||||||
p.tracef("methods {>\n")
|
|
||||||
defer p.tracef("<\n} ")
|
|
||||||
}
|
|
||||||
p.int(n)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
if trace && i > 0 {
|
|
||||||
p.tracef("\n")
|
|
||||||
}
|
|
||||||
p.method(t.Method(i))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *exporter) method(m *types.Func) {
|
|
||||||
sig := m.Type().(*types.Signature)
|
|
||||||
if sig.Recv() == nil {
|
|
||||||
panic(internalError("method expected"))
|
|
||||||
}
|
|
||||||
|
|
||||||
p.pos(m)
|
|
||||||
p.string(m.Name())
|
|
||||||
if m.Name() != "_" && !token.IsExported(m.Name()) {
|
|
||||||
p.pkg(m.Pkg(), false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// interface method; no need to encode receiver.
|
|
||||||
p.paramList(sig.Params(), sig.Variadic())
|
|
||||||
p.paramList(sig.Results(), false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *exporter) fieldName(f *types.Var) {
|
|
||||||
name := f.Name()
|
|
||||||
|
|
||||||
if f.Anonymous() {
|
|
||||||
// anonymous field - we distinguish between 3 cases:
|
|
||||||
// 1) field name matches base type name and is exported
|
|
||||||
// 2) field name matches base type name and is not exported
|
|
||||||
// 3) field name doesn't match base type name (alias name)
|
|
||||||
bname := basetypeName(f.Type())
|
|
||||||
if name == bname {
|
|
||||||
if token.IsExported(name) {
|
|
||||||
name = "" // 1) we don't need to know the field name or package
|
|
||||||
} else {
|
|
||||||
name = "?" // 2) use unexported name "?" to force package export
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// 3) indicate alias and export name as is
|
|
||||||
// (this requires an extra "@" but this is a rare case)
|
|
||||||
p.string("@")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
p.string(name)
|
|
||||||
if name != "" && !token.IsExported(name) {
|
|
||||||
p.pkg(f.Pkg(), false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func basetypeName(typ types.Type) string {
|
|
||||||
switch typ := deref(typ).(type) {
|
|
||||||
case *types.Basic:
|
|
||||||
return typ.Name()
|
|
||||||
case *types.Named:
|
|
||||||
return typ.Obj().Name()
|
|
||||||
default:
|
|
||||||
return "" // unnamed type
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *exporter) paramList(params *types.Tuple, variadic bool) {
|
|
||||||
// use negative length to indicate unnamed parameters
|
|
||||||
// (look at the first parameter only since either all
|
|
||||||
// names are present or all are absent)
|
|
||||||
n := params.Len()
|
|
||||||
if n > 0 && params.At(0).Name() == "" {
|
|
||||||
n = -n
|
|
||||||
}
|
|
||||||
p.int(n)
|
|
||||||
for i := 0; i < params.Len(); i++ {
|
|
||||||
q := params.At(i)
|
|
||||||
t := q.Type()
|
|
||||||
if variadic && i == params.Len()-1 {
|
|
||||||
t = &dddSlice{t.(*types.Slice).Elem()}
|
|
||||||
}
|
|
||||||
p.typ(t)
|
|
||||||
if n > 0 {
|
|
||||||
name := q.Name()
|
|
||||||
p.string(name)
|
|
||||||
if name != "_" {
|
|
||||||
p.pkg(q.Pkg(), false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
p.string("") // no compiler-specific info
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *exporter) value(x constant.Value) {
|
|
||||||
if trace {
|
|
||||||
p.tracef("= ")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch x.Kind() {
|
|
||||||
case constant.Bool:
|
|
||||||
tag := falseTag
|
|
||||||
if constant.BoolVal(x) {
|
|
||||||
tag = trueTag
|
|
||||||
}
|
|
||||||
p.tag(tag)
|
|
||||||
|
|
||||||
case constant.Int:
|
|
||||||
if v, exact := constant.Int64Val(x); exact {
|
|
||||||
// common case: x fits into an int64 - use compact encoding
|
|
||||||
p.tag(int64Tag)
|
|
||||||
p.int64(v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// uncommon case: large x - use float encoding
|
|
||||||
// (powers of 2 will be encoded efficiently with exponent)
|
|
||||||
p.tag(floatTag)
|
|
||||||
p.float(constant.ToFloat(x))
|
|
||||||
|
|
||||||
case constant.Float:
|
|
||||||
p.tag(floatTag)
|
|
||||||
p.float(x)
|
|
||||||
|
|
||||||
case constant.Complex:
|
|
||||||
p.tag(complexTag)
|
|
||||||
p.float(constant.Real(x))
|
|
||||||
p.float(constant.Imag(x))
|
|
||||||
|
|
||||||
case constant.String:
|
|
||||||
p.tag(stringTag)
|
|
||||||
p.string(constant.StringVal(x))
|
|
||||||
|
|
||||||
case constant.Unknown:
|
|
||||||
// package contains type errors
|
|
||||||
p.tag(unknownTag)
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic(internalErrorf("unexpected value %v (%T)", x, x))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *exporter) float(x constant.Value) {
|
|
||||||
if x.Kind() != constant.Float {
|
|
||||||
panic(internalErrorf("unexpected constant %v, want float", x))
|
|
||||||
}
|
|
||||||
// extract sign (there is no -0)
|
|
||||||
sign := constant.Sign(x)
|
|
||||||
if sign == 0 {
|
|
||||||
// x == 0
|
|
||||||
p.int(0)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// x != 0
|
|
||||||
|
|
||||||
var f big.Float
|
|
||||||
if v, exact := constant.Float64Val(x); exact {
|
|
||||||
// float64
|
|
||||||
f.SetFloat64(v)
|
|
||||||
} else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
|
|
||||||
// TODO(gri): add big.Rat accessor to constant.Value.
|
|
||||||
r := valueToRat(num)
|
|
||||||
f.SetRat(r.Quo(r, valueToRat(denom)))
|
|
||||||
} else {
|
|
||||||
// Value too large to represent as a fraction => inaccessible.
|
|
||||||
// TODO(gri): add big.Float accessor to constant.Value.
|
|
||||||
f.SetFloat64(math.MaxFloat64) // FIXME
|
|
||||||
}
|
|
||||||
|
|
||||||
// extract exponent such that 0.5 <= m < 1.0
|
|
||||||
var m big.Float
|
|
||||||
exp := f.MantExp(&m)
|
|
||||||
|
|
||||||
// extract mantissa as *big.Int
|
|
||||||
// - set exponent large enough so mant satisfies mant.IsInt()
|
|
||||||
// - get *big.Int from mant
|
|
||||||
m.SetMantExp(&m, int(m.MinPrec()))
|
|
||||||
mant, acc := m.Int(nil)
|
|
||||||
if acc != big.Exact {
|
|
||||||
panic(internalError("internal error"))
|
|
||||||
}
|
|
||||||
|
|
||||||
p.int(sign)
|
|
||||||
p.int(exp)
|
|
||||||
p.string(string(mant.Bytes()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func valueToRat(x constant.Value) *big.Rat {
|
|
||||||
// Convert little-endian to big-endian.
|
|
||||||
// I can't believe this is necessary.
|
|
||||||
bytes := constant.Bytes(x)
|
|
||||||
for i := 0; i < len(bytes)/2; i++ {
|
|
||||||
bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
|
|
||||||
}
|
|
||||||
return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *exporter) bool(b bool) bool {
|
|
||||||
if trace {
|
|
||||||
p.tracef("[")
|
|
||||||
defer p.tracef("= %v] ", b)
|
|
||||||
}
|
|
||||||
|
|
||||||
x := 0
|
|
||||||
if b {
|
|
||||||
x = 1
|
|
||||||
}
|
|
||||||
p.int(x)
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
// Low-level encoders
|
|
||||||
|
|
||||||
func (p *exporter) index(marker byte, index int) {
|
|
||||||
if index < 0 {
|
|
||||||
panic(internalError("invalid index < 0"))
|
|
||||||
}
|
|
||||||
if debugFormat {
|
|
||||||
p.marker('t')
|
|
||||||
}
|
|
||||||
if trace {
|
|
||||||
p.tracef("%c%d ", marker, index)
|
|
||||||
}
|
|
||||||
p.rawInt64(int64(index))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *exporter) tag(tag int) {
|
|
||||||
if tag >= 0 {
|
|
||||||
panic(internalError("invalid tag >= 0"))
|
|
||||||
}
|
|
||||||
if debugFormat {
|
|
||||||
p.marker('t')
|
|
||||||
}
|
|
||||||
if trace {
|
|
||||||
p.tracef("%s ", tagString[-tag])
|
|
||||||
}
|
|
||||||
p.rawInt64(int64(tag))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *exporter) int(x int) {
|
|
||||||
p.int64(int64(x))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *exporter) int64(x int64) {
|
|
||||||
if debugFormat {
|
|
||||||
p.marker('i')
|
|
||||||
}
|
|
||||||
if trace {
|
|
||||||
p.tracef("%d ", x)
|
|
||||||
}
|
|
||||||
p.rawInt64(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *exporter) string(s string) {
|
|
||||||
if debugFormat {
|
|
||||||
p.marker('s')
|
|
||||||
}
|
|
||||||
if trace {
|
|
||||||
p.tracef("%q ", s)
|
|
||||||
}
|
|
||||||
// if we saw the string before, write its index (>= 0)
|
|
||||||
// (the empty string is mapped to 0)
|
|
||||||
if i, ok := p.strIndex[s]; ok {
|
|
||||||
p.rawInt64(int64(i))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// otherwise, remember string and write its negative length and bytes
|
|
||||||
p.strIndex[s] = len(p.strIndex)
|
|
||||||
p.rawInt64(-int64(len(s)))
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
p.rawByte(s[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// marker emits a marker byte and position information which makes
|
|
||||||
// it easy for a reader to detect if it is "out of sync". Used for
|
|
||||||
// debugFormat format only.
|
|
||||||
func (p *exporter) marker(m byte) {
|
|
||||||
p.rawByte(m)
|
|
||||||
// Enable this for help tracking down the location
|
|
||||||
// of an incorrect marker when running in debugFormat.
|
|
||||||
if false && trace {
|
|
||||||
p.tracef("#%d ", p.written)
|
|
||||||
}
|
|
||||||
p.rawInt64(int64(p.written))
|
|
||||||
}
|
|
||||||
|
|
||||||
// rawInt64 should only be used by low-level encoders.
|
|
||||||
func (p *exporter) rawInt64(x int64) {
|
|
||||||
var tmp [binary.MaxVarintLen64]byte
|
|
||||||
n := binary.PutVarint(tmp[:], x)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
p.rawByte(tmp[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// rawStringln should only be used to emit the initial version string.
|
|
||||||
func (p *exporter) rawStringln(s string) {
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
p.rawByte(s[i])
|
|
||||||
}
|
|
||||||
p.rawByte('\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
// rawByte is the bottleneck interface to write to p.out.
|
|
||||||
// rawByte escapes b as follows (any encoding does that
|
|
||||||
// hides '$'):
|
|
||||||
//
|
|
||||||
// '$' => '|' 'S'
|
|
||||||
// '|' => '|' '|'
|
|
||||||
//
|
|
||||||
// Necessary so other tools can find the end of the
|
|
||||||
// export data by searching for "$$".
|
|
||||||
// rawByte should only be used by low-level encoders.
|
|
||||||
func (p *exporter) rawByte(b byte) {
|
|
||||||
switch b {
|
|
||||||
case '$':
|
|
||||||
// write '$' as '|' 'S'
|
|
||||||
b = 'S'
|
|
||||||
fallthrough
|
|
||||||
case '|':
|
|
||||||
// write '|' as '|' '|'
|
|
||||||
p.out.WriteByte('|')
|
|
||||||
p.written++
|
|
||||||
}
|
|
||||||
p.out.WriteByte(b)
|
|
||||||
p.written++
|
|
||||||
}
|
|
||||||
|
|
||||||
// tracef is like fmt.Printf but it rewrites the format string
|
|
||||||
// to take care of indentation.
|
|
||||||
func (p *exporter) tracef(format string, args ...interface{}) {
|
|
||||||
if strings.ContainsAny(format, "<>\n") {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
for i := 0; i < len(format); i++ {
|
|
||||||
// no need to deal with runes
|
|
||||||
ch := format[i]
|
|
||||||
switch ch {
|
|
||||||
case '>':
|
|
||||||
p.indent++
|
|
||||||
continue
|
|
||||||
case '<':
|
|
||||||
p.indent--
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
buf.WriteByte(ch)
|
|
||||||
if ch == '\n' {
|
|
||||||
for j := p.indent; j > 0; j-- {
|
|
||||||
buf.WriteString(". ")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
format = buf.String()
|
|
||||||
}
|
|
||||||
fmt.Printf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debugging support.
|
|
||||||
// (tagString is only used when tracing is enabled)
|
|
||||||
var tagString = [...]string{
|
|
||||||
// Packages
|
|
||||||
-packageTag: "package",
|
|
||||||
|
|
||||||
// Types
|
|
||||||
-namedTag: "named type",
|
|
||||||
-arrayTag: "array",
|
|
||||||
-sliceTag: "slice",
|
|
||||||
-dddTag: "ddd",
|
|
||||||
-structTag: "struct",
|
|
||||||
-pointerTag: "pointer",
|
|
||||||
-signatureTag: "signature",
|
|
||||||
-interfaceTag: "interface",
|
|
||||||
-mapTag: "map",
|
|
||||||
-chanTag: "chan",
|
|
||||||
|
|
||||||
// Values
|
|
||||||
-falseTag: "false",
|
|
||||||
-trueTag: "true",
|
|
||||||
-int64Tag: "int64",
|
|
||||||
-floatTag: "float",
|
|
||||||
-fractionTag: "fraction",
|
|
||||||
-complexTag: "complex",
|
|
||||||
-stringTag: "string",
|
|
||||||
-unknownTag: "unknown",
|
|
||||||
|
|
||||||
// Type aliases
|
|
||||||
-aliasTag: "alias",
|
|
||||||
}
|
|
|
@ -2,340 +2,24 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go.
|
// This file contains the remaining vestiges of
|
||||||
|
// $GOROOT/src/go/internal/gcimporter/bimport.go.
|
||||||
|
|
||||||
package gcimporter
|
package gcimporter
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"go/constant"
|
|
||||||
"go/token"
|
"go/token"
|
||||||
"go/types"
|
"go/types"
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type importer struct {
|
|
||||||
imports map[string]*types.Package
|
|
||||||
data []byte
|
|
||||||
importpath string
|
|
||||||
buf []byte // for reading strings
|
|
||||||
version int // export format version
|
|
||||||
|
|
||||||
// object lists
|
|
||||||
strList []string // in order of appearance
|
|
||||||
pathList []string // in order of appearance
|
|
||||||
pkgList []*types.Package // in order of appearance
|
|
||||||
typList []types.Type // in order of appearance
|
|
||||||
interfaceList []*types.Interface // for delayed completion only
|
|
||||||
trackAllTypes bool
|
|
||||||
|
|
||||||
// position encoding
|
|
||||||
posInfoFormat bool
|
|
||||||
prevFile string
|
|
||||||
prevLine int
|
|
||||||
fake fakeFileSet
|
|
||||||
|
|
||||||
// debugging support
|
|
||||||
debugFormat bool
|
|
||||||
read int // bytes read
|
|
||||||
}
|
|
||||||
|
|
||||||
// BImportData imports a package from the serialized package data
|
|
||||||
// and returns the number of bytes consumed and a reference to the package.
|
|
||||||
// If the export data version is not recognized or the format is otherwise
|
|
||||||
// compromised, an error is returned.
|
|
||||||
func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
|
|
||||||
// catch panics and return them as errors
|
|
||||||
const currentVersion = 6
|
|
||||||
version := -1 // unknown version
|
|
||||||
defer func() {
|
|
||||||
if e := recover(); e != nil {
|
|
||||||
// Return a (possibly nil or incomplete) package unchanged (see #16088).
|
|
||||||
if version > currentVersion {
|
|
||||||
err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
|
|
||||||
} else {
|
|
||||||
err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
p := importer{
|
|
||||||
imports: imports,
|
|
||||||
data: data,
|
|
||||||
importpath: path,
|
|
||||||
version: version,
|
|
||||||
strList: []string{""}, // empty string is mapped to 0
|
|
||||||
pathList: []string{""}, // empty string is mapped to 0
|
|
||||||
fake: fakeFileSet{
|
|
||||||
fset: fset,
|
|
||||||
files: make(map[string]*fileInfo),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
defer p.fake.setLines() // set lines for files in fset
|
|
||||||
|
|
||||||
// read version info
|
|
||||||
var versionstr string
|
|
||||||
if b := p.rawByte(); b == 'c' || b == 'd' {
|
|
||||||
// Go1.7 encoding; first byte encodes low-level
|
|
||||||
// encoding format (compact vs debug).
|
|
||||||
// For backward-compatibility only (avoid problems with
|
|
||||||
// old installed packages). Newly compiled packages use
|
|
||||||
// the extensible format string.
|
|
||||||
// TODO(gri) Remove this support eventually; after Go1.8.
|
|
||||||
if b == 'd' {
|
|
||||||
p.debugFormat = true
|
|
||||||
}
|
|
||||||
p.trackAllTypes = p.rawByte() == 'a'
|
|
||||||
p.posInfoFormat = p.int() != 0
|
|
||||||
versionstr = p.string()
|
|
||||||
if versionstr == "v1" {
|
|
||||||
version = 0
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Go1.8 extensible encoding
|
|
||||||
// read version string and extract version number (ignore anything after the version number)
|
|
||||||
versionstr = p.rawStringln(b)
|
|
||||||
if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" {
|
|
||||||
if v, err := strconv.Atoi(s[1]); err == nil && v > 0 {
|
|
||||||
version = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
p.version = version
|
|
||||||
|
|
||||||
// read version specific flags - extend as necessary
|
|
||||||
switch p.version {
|
|
||||||
// case currentVersion:
|
|
||||||
// ...
|
|
||||||
// fallthrough
|
|
||||||
case currentVersion, 5, 4, 3, 2, 1:
|
|
||||||
p.debugFormat = p.rawStringln(p.rawByte()) == "debug"
|
|
||||||
p.trackAllTypes = p.int() != 0
|
|
||||||
p.posInfoFormat = p.int() != 0
|
|
||||||
case 0:
|
|
||||||
// Go1.7 encoding format - nothing to do here
|
|
||||||
default:
|
|
||||||
errorf("unknown bexport format version %d (%q)", p.version, versionstr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- generic export data ---
|
|
||||||
|
|
||||||
// populate typList with predeclared "known" types
|
|
||||||
p.typList = append(p.typList, predeclared()...)
|
|
||||||
|
|
||||||
// read package data
|
|
||||||
pkg = p.pkg()
|
|
||||||
|
|
||||||
// read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go)
|
|
||||||
objcount := 0
|
|
||||||
for {
|
|
||||||
tag := p.tagOrIndex()
|
|
||||||
if tag == endTag {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
p.obj(tag)
|
|
||||||
objcount++
|
|
||||||
}
|
|
||||||
|
|
||||||
// self-verification
|
|
||||||
if count := p.int(); count != objcount {
|
|
||||||
errorf("got %d objects; want %d", objcount, count)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ignore compiler-specific import data
|
|
||||||
|
|
||||||
// complete interfaces
|
|
||||||
// TODO(gri) re-investigate if we still need to do this in a delayed fashion
|
|
||||||
for _, typ := range p.interfaceList {
|
|
||||||
typ.Complete()
|
|
||||||
}
|
|
||||||
|
|
||||||
// record all referenced packages as imports
|
|
||||||
list := append(([]*types.Package)(nil), p.pkgList[1:]...)
|
|
||||||
sort.Sort(byPath(list))
|
|
||||||
pkg.SetImports(list)
|
|
||||||
|
|
||||||
// package was imported completely and without errors
|
|
||||||
pkg.MarkComplete()
|
|
||||||
|
|
||||||
return p.read, pkg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func errorf(format string, args ...interface{}) {
|
func errorf(format string, args ...interface{}) {
|
||||||
panic(fmt.Sprintf(format, args...))
|
panic(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *importer) pkg() *types.Package {
|
|
||||||
// if the package was seen before, i is its index (>= 0)
|
|
||||||
i := p.tagOrIndex()
|
|
||||||
if i >= 0 {
|
|
||||||
return p.pkgList[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
// otherwise, i is the package tag (< 0)
|
|
||||||
if i != packageTag {
|
|
||||||
errorf("unexpected package tag %d version %d", i, p.version)
|
|
||||||
}
|
|
||||||
|
|
||||||
// read package data
|
|
||||||
name := p.string()
|
|
||||||
var path string
|
|
||||||
if p.version >= 5 {
|
|
||||||
path = p.path()
|
|
||||||
} else {
|
|
||||||
path = p.string()
|
|
||||||
}
|
|
||||||
if p.version >= 6 {
|
|
||||||
p.int() // package height; unused by go/types
|
|
||||||
}
|
|
||||||
|
|
||||||
// we should never see an empty package name
|
|
||||||
if name == "" {
|
|
||||||
errorf("empty package name in import")
|
|
||||||
}
|
|
||||||
|
|
||||||
// an empty path denotes the package we are currently importing;
|
|
||||||
// it must be the first package we see
|
|
||||||
if (path == "") != (len(p.pkgList) == 0) {
|
|
||||||
errorf("package path %q for pkg index %d", path, len(p.pkgList))
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the package was imported before, use that one; otherwise create a new one
|
|
||||||
if path == "" {
|
|
||||||
path = p.importpath
|
|
||||||
}
|
|
||||||
pkg := p.imports[path]
|
|
||||||
if pkg == nil {
|
|
||||||
pkg = types.NewPackage(path, name)
|
|
||||||
p.imports[path] = pkg
|
|
||||||
} else if pkg.Name() != name {
|
|
||||||
errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path)
|
|
||||||
}
|
|
||||||
p.pkgList = append(p.pkgList, pkg)
|
|
||||||
|
|
||||||
return pkg
|
|
||||||
}
|
|
||||||
|
|
||||||
// objTag returns the tag value for each object kind.
|
|
||||||
func objTag(obj types.Object) int {
|
|
||||||
switch obj.(type) {
|
|
||||||
case *types.Const:
|
|
||||||
return constTag
|
|
||||||
case *types.TypeName:
|
|
||||||
return typeTag
|
|
||||||
case *types.Var:
|
|
||||||
return varTag
|
|
||||||
case *types.Func:
|
|
||||||
return funcTag
|
|
||||||
default:
|
|
||||||
errorf("unexpected object: %v (%T)", obj, obj) // panics
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func sameObj(a, b types.Object) bool {
|
|
||||||
// Because unnamed types are not canonicalized, we cannot simply compare types for
|
|
||||||
// (pointer) identity.
|
|
||||||
// Ideally we'd check equality of constant values as well, but this is good enough.
|
|
||||||
return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *importer) declare(obj types.Object) {
|
|
||||||
pkg := obj.Pkg()
|
|
||||||
if alt := pkg.Scope().Insert(obj); alt != nil {
|
|
||||||
// This can only trigger if we import a (non-type) object a second time.
|
|
||||||
// Excluding type aliases, this cannot happen because 1) we only import a package
|
|
||||||
// once; and b) we ignore compiler-specific export data which may contain
|
|
||||||
// functions whose inlined function bodies refer to other functions that
|
|
||||||
// were already imported.
|
|
||||||
// However, type aliases require reexporting the original type, so we need
|
|
||||||
// to allow it (see also the comment in cmd/compile/internal/gc/bimport.go,
|
|
||||||
// method importer.obj, switch case importing functions).
|
|
||||||
// TODO(gri) review/update this comment once the gc compiler handles type aliases.
|
|
||||||
if !sameObj(obj, alt) {
|
|
||||||
errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *importer) obj(tag int) {
|
|
||||||
switch tag {
|
|
||||||
case constTag:
|
|
||||||
pos := p.pos()
|
|
||||||
pkg, name := p.qualifiedName()
|
|
||||||
typ := p.typ(nil, nil)
|
|
||||||
val := p.value()
|
|
||||||
p.declare(types.NewConst(pos, pkg, name, typ, val))
|
|
||||||
|
|
||||||
case aliasTag:
|
|
||||||
// TODO(gri) verify type alias hookup is correct
|
|
||||||
pos := p.pos()
|
|
||||||
pkg, name := p.qualifiedName()
|
|
||||||
typ := p.typ(nil, nil)
|
|
||||||
p.declare(types.NewTypeName(pos, pkg, name, typ))
|
|
||||||
|
|
||||||
case typeTag:
|
|
||||||
p.typ(nil, nil)
|
|
||||||
|
|
||||||
case varTag:
|
|
||||||
pos := p.pos()
|
|
||||||
pkg, name := p.qualifiedName()
|
|
||||||
typ := p.typ(nil, nil)
|
|
||||||
p.declare(types.NewVar(pos, pkg, name, typ))
|
|
||||||
|
|
||||||
case funcTag:
|
|
||||||
pos := p.pos()
|
|
||||||
pkg, name := p.qualifiedName()
|
|
||||||
params, isddd := p.paramList()
|
|
||||||
result, _ := p.paramList()
|
|
||||||
sig := types.NewSignature(nil, params, result, isddd)
|
|
||||||
p.declare(types.NewFunc(pos, pkg, name, sig))
|
|
||||||
|
|
||||||
default:
|
|
||||||
errorf("unexpected object tag %d", tag)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
|
const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
|
||||||
|
|
||||||
func (p *importer) pos() token.Pos {
|
|
||||||
if !p.posInfoFormat {
|
|
||||||
return token.NoPos
|
|
||||||
}
|
|
||||||
|
|
||||||
file := p.prevFile
|
|
||||||
line := p.prevLine
|
|
||||||
delta := p.int()
|
|
||||||
line += delta
|
|
||||||
if p.version >= 5 {
|
|
||||||
if delta == deltaNewFile {
|
|
||||||
if n := p.int(); n >= 0 {
|
|
||||||
// file changed
|
|
||||||
file = p.path()
|
|
||||||
line = n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if delta == 0 {
|
|
||||||
if n := p.int(); n >= 0 {
|
|
||||||
// file changed
|
|
||||||
file = p.prevFile[:n] + p.string()
|
|
||||||
line = p.int()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
p.prevFile = file
|
|
||||||
p.prevLine = line
|
|
||||||
|
|
||||||
return p.fake.pos(file, line, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Synthesize a token.Pos
|
// Synthesize a token.Pos
|
||||||
type fakeFileSet struct {
|
type fakeFileSet struct {
|
||||||
fset *token.FileSet
|
fset *token.FileSet
|
||||||
|
@ -389,205 +73,6 @@ var (
|
||||||
fakeLinesOnce sync.Once
|
fakeLinesOnce sync.Once
|
||||||
)
|
)
|
||||||
|
|
||||||
func (p *importer) qualifiedName() (pkg *types.Package, name string) {
|
|
||||||
name = p.string()
|
|
||||||
pkg = p.pkg()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *importer) record(t types.Type) {
|
|
||||||
p.typList = append(p.typList, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A dddSlice is a types.Type representing ...T parameters.
|
|
||||||
// It only appears for parameter types and does not escape
|
|
||||||
// the importer.
|
|
||||||
type dddSlice struct {
|
|
||||||
elem types.Type
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *dddSlice) Underlying() types.Type { return t }
|
|
||||||
func (t *dddSlice) String() string { return "..." + t.elem.String() }
|
|
||||||
|
|
||||||
// parent is the package which declared the type; parent == nil means
|
|
||||||
// the package currently imported. The parent package is needed for
|
|
||||||
// exported struct fields and interface methods which don't contain
|
|
||||||
// explicit package information in the export data.
|
|
||||||
//
|
|
||||||
// A non-nil tname is used as the "owner" of the result type; i.e.,
|
|
||||||
// the result type is the underlying type of tname. tname is used
|
|
||||||
// to give interface methods a named receiver type where possible.
|
|
||||||
func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type {
|
|
||||||
// if the type was seen before, i is its index (>= 0)
|
|
||||||
i := p.tagOrIndex()
|
|
||||||
if i >= 0 {
|
|
||||||
return p.typList[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
// otherwise, i is the type tag (< 0)
|
|
||||||
switch i {
|
|
||||||
case namedTag:
|
|
||||||
// read type object
|
|
||||||
pos := p.pos()
|
|
||||||
parent, name := p.qualifiedName()
|
|
||||||
scope := parent.Scope()
|
|
||||||
obj := scope.Lookup(name)
|
|
||||||
|
|
||||||
// if the object doesn't exist yet, create and insert it
|
|
||||||
if obj == nil {
|
|
||||||
obj = types.NewTypeName(pos, parent, name, nil)
|
|
||||||
scope.Insert(obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := obj.(*types.TypeName); !ok {
|
|
||||||
errorf("pkg = %s, name = %s => %s", parent, name, obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
// associate new named type with obj if it doesn't exist yet
|
|
||||||
t0 := types.NewNamed(obj.(*types.TypeName), nil, nil)
|
|
||||||
|
|
||||||
// but record the existing type, if any
|
|
||||||
tname := obj.Type().(*types.Named) // tname is either t0 or the existing type
|
|
||||||
p.record(tname)
|
|
||||||
|
|
||||||
// read underlying type
|
|
||||||
t0.SetUnderlying(p.typ(parent, t0))
|
|
||||||
|
|
||||||
// interfaces don't have associated methods
|
|
||||||
if types.IsInterface(t0) {
|
|
||||||
return tname
|
|
||||||
}
|
|
||||||
|
|
||||||
// read associated methods
|
|
||||||
for i := p.int(); i > 0; i-- {
|
|
||||||
// TODO(gri) replace this with something closer to fieldName
|
|
||||||
pos := p.pos()
|
|
||||||
name := p.string()
|
|
||||||
if !exported(name) {
|
|
||||||
p.pkg()
|
|
||||||
}
|
|
||||||
|
|
||||||
recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver?
|
|
||||||
params, isddd := p.paramList()
|
|
||||||
result, _ := p.paramList()
|
|
||||||
p.int() // go:nointerface pragma - discarded
|
|
||||||
|
|
||||||
sig := types.NewSignature(recv.At(0), params, result, isddd)
|
|
||||||
t0.AddMethod(types.NewFunc(pos, parent, name, sig))
|
|
||||||
}
|
|
||||||
|
|
||||||
return tname
|
|
||||||
|
|
||||||
case arrayTag:
|
|
||||||
t := new(types.Array)
|
|
||||||
if p.trackAllTypes {
|
|
||||||
p.record(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
n := p.int64()
|
|
||||||
*t = *types.NewArray(p.typ(parent, nil), n)
|
|
||||||
return t
|
|
||||||
|
|
||||||
case sliceTag:
|
|
||||||
t := new(types.Slice)
|
|
||||||
if p.trackAllTypes {
|
|
||||||
p.record(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
*t = *types.NewSlice(p.typ(parent, nil))
|
|
||||||
return t
|
|
||||||
|
|
||||||
case dddTag:
|
|
||||||
t := new(dddSlice)
|
|
||||||
if p.trackAllTypes {
|
|
||||||
p.record(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
t.elem = p.typ(parent, nil)
|
|
||||||
return t
|
|
||||||
|
|
||||||
case structTag:
|
|
||||||
t := new(types.Struct)
|
|
||||||
if p.trackAllTypes {
|
|
||||||
p.record(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
*t = *types.NewStruct(p.fieldList(parent))
|
|
||||||
return t
|
|
||||||
|
|
||||||
case pointerTag:
|
|
||||||
t := new(types.Pointer)
|
|
||||||
if p.trackAllTypes {
|
|
||||||
p.record(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
*t = *types.NewPointer(p.typ(parent, nil))
|
|
||||||
return t
|
|
||||||
|
|
||||||
case signatureTag:
|
|
||||||
t := new(types.Signature)
|
|
||||||
if p.trackAllTypes {
|
|
||||||
p.record(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
params, isddd := p.paramList()
|
|
||||||
result, _ := p.paramList()
|
|
||||||
*t = *types.NewSignature(nil, params, result, isddd)
|
|
||||||
return t
|
|
||||||
|
|
||||||
case interfaceTag:
|
|
||||||
// Create a dummy entry in the type list. This is safe because we
|
|
||||||
// cannot expect the interface type to appear in a cycle, as any
|
|
||||||
// such cycle must contain a named type which would have been
|
|
||||||
// first defined earlier.
|
|
||||||
// TODO(gri) Is this still true now that we have type aliases?
|
|
||||||
// See issue #23225.
|
|
||||||
n := len(p.typList)
|
|
||||||
if p.trackAllTypes {
|
|
||||||
p.record(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
var embeddeds []types.Type
|
|
||||||
for n := p.int(); n > 0; n-- {
|
|
||||||
p.pos()
|
|
||||||
embeddeds = append(embeddeds, p.typ(parent, nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
t := newInterface(p.methodList(parent, tname), embeddeds)
|
|
||||||
p.interfaceList = append(p.interfaceList, t)
|
|
||||||
if p.trackAllTypes {
|
|
||||||
p.typList[n] = t
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
|
|
||||||
case mapTag:
|
|
||||||
t := new(types.Map)
|
|
||||||
if p.trackAllTypes {
|
|
||||||
p.record(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
key := p.typ(parent, nil)
|
|
||||||
val := p.typ(parent, nil)
|
|
||||||
*t = *types.NewMap(key, val)
|
|
||||||
return t
|
|
||||||
|
|
||||||
case chanTag:
|
|
||||||
t := new(types.Chan)
|
|
||||||
if p.trackAllTypes {
|
|
||||||
p.record(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
dir := chanDir(p.int())
|
|
||||||
val := p.typ(parent, nil)
|
|
||||||
*t = *types.NewChan(dir, val)
|
|
||||||
return t
|
|
||||||
|
|
||||||
default:
|
|
||||||
errorf("unexpected type tag %d", i) // panics
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func chanDir(d int) types.ChanDir {
|
func chanDir(d int) types.ChanDir {
|
||||||
// tag values must match the constants in cmd/compile/internal/gc/go.go
|
// tag values must match the constants in cmd/compile/internal/gc/go.go
|
||||||
switch d {
|
switch d {
|
||||||
|
@ -603,394 +88,6 @@ func chanDir(d int) types.ChanDir {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) {
|
|
||||||
if n := p.int(); n > 0 {
|
|
||||||
fields = make([]*types.Var, n)
|
|
||||||
tags = make([]string, n)
|
|
||||||
for i := range fields {
|
|
||||||
fields[i], tags[i] = p.field(parent)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *importer) field(parent *types.Package) (*types.Var, string) {
|
|
||||||
pos := p.pos()
|
|
||||||
pkg, name, alias := p.fieldName(parent)
|
|
||||||
typ := p.typ(parent, nil)
|
|
||||||
tag := p.string()
|
|
||||||
|
|
||||||
anonymous := false
|
|
||||||
if name == "" {
|
|
||||||
// anonymous field - typ must be T or *T and T must be a type name
|
|
||||||
switch typ := deref(typ).(type) {
|
|
||||||
case *types.Basic: // basic types are named types
|
|
||||||
pkg = nil // // objects defined in Universe scope have no package
|
|
||||||
name = typ.Name()
|
|
||||||
case *types.Named:
|
|
||||||
name = typ.Obj().Name()
|
|
||||||
default:
|
|
||||||
errorf("named base type expected")
|
|
||||||
}
|
|
||||||
anonymous = true
|
|
||||||
} else if alias {
|
|
||||||
// anonymous field: we have an explicit name because it's an alias
|
|
||||||
anonymous = true
|
|
||||||
}
|
|
||||||
|
|
||||||
return types.NewField(pos, pkg, name, typ, anonymous), tag
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) {
|
|
||||||
if n := p.int(); n > 0 {
|
|
||||||
methods = make([]*types.Func, n)
|
|
||||||
for i := range methods {
|
|
||||||
methods[i] = p.method(parent, baseType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func {
|
|
||||||
pos := p.pos()
|
|
||||||
pkg, name, _ := p.fieldName(parent)
|
|
||||||
// If we don't have a baseType, use a nil receiver.
|
|
||||||
// A receiver using the actual interface type (which
|
|
||||||
// we don't know yet) will be filled in when we call
|
|
||||||
// types.Interface.Complete.
|
|
||||||
var recv *types.Var
|
|
||||||
if baseType != nil {
|
|
||||||
recv = types.NewVar(token.NoPos, parent, "", baseType)
|
|
||||||
}
|
|
||||||
params, isddd := p.paramList()
|
|
||||||
result, _ := p.paramList()
|
|
||||||
sig := types.NewSignature(recv, params, result, isddd)
|
|
||||||
return types.NewFunc(pos, pkg, name, sig)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) {
|
|
||||||
name = p.string()
|
|
||||||
pkg = parent
|
|
||||||
if pkg == nil {
|
|
||||||
// use the imported package instead
|
|
||||||
pkg = p.pkgList[0]
|
|
||||||
}
|
|
||||||
if p.version == 0 && name == "_" {
|
|
||||||
// version 0 didn't export a package for _ fields
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch name {
|
|
||||||
case "":
|
|
||||||
// 1) field name matches base type name and is exported: nothing to do
|
|
||||||
case "?":
|
|
||||||
// 2) field name matches base type name and is not exported: need package
|
|
||||||
name = ""
|
|
||||||
pkg = p.pkg()
|
|
||||||
case "@":
|
|
||||||
// 3) field name doesn't match type name (alias)
|
|
||||||
name = p.string()
|
|
||||||
alias = true
|
|
||||||
fallthrough
|
|
||||||
default:
|
|
||||||
if !exported(name) {
|
|
||||||
pkg = p.pkg()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *importer) paramList() (*types.Tuple, bool) {
|
|
||||||
n := p.int()
|
|
||||||
if n == 0 {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
// negative length indicates unnamed parameters
|
|
||||||
named := true
|
|
||||||
if n < 0 {
|
|
||||||
n = -n
|
|
||||||
named = false
|
|
||||||
}
|
|
||||||
// n > 0
|
|
||||||
params := make([]*types.Var, n)
|
|
||||||
isddd := false
|
|
||||||
for i := range params {
|
|
||||||
params[i], isddd = p.param(named)
|
|
||||||
}
|
|
||||||
return types.NewTuple(params...), isddd
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *importer) param(named bool) (*types.Var, bool) {
|
|
||||||
t := p.typ(nil, nil)
|
|
||||||
td, isddd := t.(*dddSlice)
|
|
||||||
if isddd {
|
|
||||||
t = types.NewSlice(td.elem)
|
|
||||||
}
|
|
||||||
|
|
||||||
var pkg *types.Package
|
|
||||||
var name string
|
|
||||||
if named {
|
|
||||||
name = p.string()
|
|
||||||
if name == "" {
|
|
||||||
errorf("expected named parameter")
|
|
||||||
}
|
|
||||||
if name != "_" {
|
|
||||||
pkg = p.pkg()
|
|
||||||
}
|
|
||||||
if i := strings.Index(name, "·"); i > 0 {
|
|
||||||
name = name[:i] // cut off gc-specific parameter numbering
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// read and discard compiler-specific info
|
|
||||||
p.string()
|
|
||||||
|
|
||||||
return types.NewVar(token.NoPos, pkg, name, t), isddd
|
|
||||||
}
|
|
||||||
|
|
||||||
func exported(name string) bool {
|
|
||||||
ch, _ := utf8.DecodeRuneInString(name)
|
|
||||||
return unicode.IsUpper(ch)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *importer) value() constant.Value {
|
|
||||||
switch tag := p.tagOrIndex(); tag {
|
|
||||||
case falseTag:
|
|
||||||
return constant.MakeBool(false)
|
|
||||||
case trueTag:
|
|
||||||
return constant.MakeBool(true)
|
|
||||||
case int64Tag:
|
|
||||||
return constant.MakeInt64(p.int64())
|
|
||||||
case floatTag:
|
|
||||||
return p.float()
|
|
||||||
case complexTag:
|
|
||||||
re := p.float()
|
|
||||||
im := p.float()
|
|
||||||
return constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
|
|
||||||
case stringTag:
|
|
||||||
return constant.MakeString(p.string())
|
|
||||||
case unknownTag:
|
|
||||||
return constant.MakeUnknown()
|
|
||||||
default:
|
|
||||||
errorf("unexpected value tag %d", tag) // panics
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *importer) float() constant.Value {
|
|
||||||
sign := p.int()
|
|
||||||
if sign == 0 {
|
|
||||||
return constant.MakeInt64(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
exp := p.int()
|
|
||||||
mant := []byte(p.string()) // big endian
|
|
||||||
|
|
||||||
// remove leading 0's if any
|
|
||||||
for len(mant) > 0 && mant[0] == 0 {
|
|
||||||
mant = mant[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// convert to little endian
|
|
||||||
// TODO(gri) go/constant should have a more direct conversion function
|
|
||||||
// (e.g., once it supports a big.Float based implementation)
|
|
||||||
for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 {
|
|
||||||
mant[i], mant[j] = mant[j], mant[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
// adjust exponent (constant.MakeFromBytes creates an integer value,
|
|
||||||
// but mant represents the mantissa bits such that 0.5 <= mant < 1.0)
|
|
||||||
exp -= len(mant) << 3
|
|
||||||
if len(mant) > 0 {
|
|
||||||
for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 {
|
|
||||||
exp++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
x := constant.MakeFromBytes(mant)
|
|
||||||
switch {
|
|
||||||
case exp < 0:
|
|
||||||
d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
|
|
||||||
x = constant.BinaryOp(x, token.QUO, d)
|
|
||||||
case exp > 0:
|
|
||||||
x = constant.Shift(x, token.SHL, uint(exp))
|
|
||||||
}
|
|
||||||
|
|
||||||
if sign < 0 {
|
|
||||||
x = constant.UnaryOp(token.SUB, x, 0)
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
// Low-level decoders
|
|
||||||
|
|
||||||
func (p *importer) tagOrIndex() int {
|
|
||||||
if p.debugFormat {
|
|
||||||
p.marker('t')
|
|
||||||
}
|
|
||||||
|
|
||||||
return int(p.rawInt64())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *importer) int() int {
|
|
||||||
x := p.int64()
|
|
||||||
if int64(int(x)) != x {
|
|
||||||
errorf("exported integer too large")
|
|
||||||
}
|
|
||||||
return int(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *importer) int64() int64 {
|
|
||||||
if p.debugFormat {
|
|
||||||
p.marker('i')
|
|
||||||
}
|
|
||||||
|
|
||||||
return p.rawInt64()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *importer) path() string {
|
|
||||||
if p.debugFormat {
|
|
||||||
p.marker('p')
|
|
||||||
}
|
|
||||||
// if the path was seen before, i is its index (>= 0)
|
|
||||||
// (the empty string is at index 0)
|
|
||||||
i := p.rawInt64()
|
|
||||||
if i >= 0 {
|
|
||||||
return p.pathList[i]
|
|
||||||
}
|
|
||||||
// otherwise, i is the negative path length (< 0)
|
|
||||||
a := make([]string, -i)
|
|
||||||
for n := range a {
|
|
||||||
a[n] = p.string()
|
|
||||||
}
|
|
||||||
s := strings.Join(a, "/")
|
|
||||||
p.pathList = append(p.pathList, s)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *importer) string() string {
|
|
||||||
if p.debugFormat {
|
|
||||||
p.marker('s')
|
|
||||||
}
|
|
||||||
// if the string was seen before, i is its index (>= 0)
|
|
||||||
// (the empty string is at index 0)
|
|
||||||
i := p.rawInt64()
|
|
||||||
if i >= 0 {
|
|
||||||
return p.strList[i]
|
|
||||||
}
|
|
||||||
// otherwise, i is the negative string length (< 0)
|
|
||||||
if n := int(-i); n <= cap(p.buf) {
|
|
||||||
p.buf = p.buf[:n]
|
|
||||||
} else {
|
|
||||||
p.buf = make([]byte, n)
|
|
||||||
}
|
|
||||||
for i := range p.buf {
|
|
||||||
p.buf[i] = p.rawByte()
|
|
||||||
}
|
|
||||||
s := string(p.buf)
|
|
||||||
p.strList = append(p.strList, s)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *importer) marker(want byte) {
|
|
||||||
if got := p.rawByte(); got != want {
|
|
||||||
errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read)
|
|
||||||
}
|
|
||||||
|
|
||||||
pos := p.read
|
|
||||||
if n := int(p.rawInt64()); n != pos {
|
|
||||||
errorf("incorrect position: got %d; want %d", n, pos)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// rawInt64 should only be used by low-level decoders.
|
|
||||||
func (p *importer) rawInt64() int64 {
|
|
||||||
i, err := binary.ReadVarint(p)
|
|
||||||
if err != nil {
|
|
||||||
errorf("read error: %v", err)
|
|
||||||
}
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// rawStringln should only be used to read the initial version string.
|
|
||||||
func (p *importer) rawStringln(b byte) string {
|
|
||||||
p.buf = p.buf[:0]
|
|
||||||
for b != '\n' {
|
|
||||||
p.buf = append(p.buf, b)
|
|
||||||
b = p.rawByte()
|
|
||||||
}
|
|
||||||
return string(p.buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// needed for binary.ReadVarint in rawInt64
|
|
||||||
func (p *importer) ReadByte() (byte, error) {
|
|
||||||
return p.rawByte(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// byte is the bottleneck interface for reading p.data.
|
|
||||||
// It unescapes '|' 'S' to '$' and '|' '|' to '|'.
|
|
||||||
// rawByte should only be used by low-level decoders.
|
|
||||||
func (p *importer) rawByte() byte {
|
|
||||||
b := p.data[0]
|
|
||||||
r := 1
|
|
||||||
if b == '|' {
|
|
||||||
b = p.data[1]
|
|
||||||
r = 2
|
|
||||||
switch b {
|
|
||||||
case 'S':
|
|
||||||
b = '$'
|
|
||||||
case '|':
|
|
||||||
// nothing to do
|
|
||||||
default:
|
|
||||||
errorf("unexpected escape sequence in export data")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
p.data = p.data[r:]
|
|
||||||
p.read += r
|
|
||||||
return b
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
// Export format
|
|
||||||
|
|
||||||
// Tags. Must be < 0.
|
|
||||||
const (
|
|
||||||
// Objects
|
|
||||||
packageTag = -(iota + 1)
|
|
||||||
constTag
|
|
||||||
typeTag
|
|
||||||
varTag
|
|
||||||
funcTag
|
|
||||||
endTag
|
|
||||||
|
|
||||||
// Types
|
|
||||||
namedTag
|
|
||||||
arrayTag
|
|
||||||
sliceTag
|
|
||||||
dddTag
|
|
||||||
structTag
|
|
||||||
pointerTag
|
|
||||||
signatureTag
|
|
||||||
interfaceTag
|
|
||||||
mapTag
|
|
||||||
chanTag
|
|
||||||
|
|
||||||
// Values
|
|
||||||
falseTag
|
|
||||||
trueTag
|
|
||||||
int64Tag
|
|
||||||
floatTag
|
|
||||||
fractionTag // not used by gc
|
|
||||||
complexTag
|
|
||||||
stringTag
|
|
||||||
nilTag // only used by gc (appears in exported inlined function bodies)
|
|
||||||
unknownTag // not used by gc (only appears in packages with errors)
|
|
||||||
|
|
||||||
// Type aliases
|
|
||||||
aliasTag
|
|
||||||
)
|
|
||||||
|
|
||||||
var predeclOnce sync.Once
|
var predeclOnce sync.Once
|
||||||
var predecl []types.Type // initialized lazily
|
var predecl []types.Type // initialized lazily
|
||||||
|
|
||||||
|
|
|
@ -230,20 +230,17 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
|
||||||
// Or, define a new standard go/types/gcexportdata package.
|
// Or, define a new standard go/types/gcexportdata package.
|
||||||
fset := token.NewFileSet()
|
fset := token.NewFileSet()
|
||||||
|
|
||||||
// The indexed export format starts with an 'i'; the older
|
// Select appropriate importer.
|
||||||
// binary export format starts with a 'c', 'd', or 'v'
|
|
||||||
// (from "version"). Select appropriate importer.
|
|
||||||
if len(data) > 0 {
|
if len(data) > 0 {
|
||||||
switch data[0] {
|
switch data[0] {
|
||||||
case 'i':
|
case 'v', 'c', 'd': // binary, till go1.10
|
||||||
|
return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
|
||||||
|
|
||||||
|
case 'i': // indexed, till go1.19
|
||||||
_, pkg, err := IImportData(fset, packages, data[1:], id)
|
_, pkg, err := IImportData(fset, packages, data[1:], id)
|
||||||
return pkg, err
|
return pkg, err
|
||||||
|
|
||||||
case 'v', 'c', 'd':
|
case 'u': // unified, from go1.20
|
||||||
_, pkg, err := BImportData(fset, packages, data, id)
|
|
||||||
return pkg, err
|
|
||||||
|
|
||||||
case 'u':
|
|
||||||
_, pkg, err := UImportData(fset, packages, data[1:size], id)
|
_, pkg, err := UImportData(fset, packages, data[1:size], id)
|
||||||
return pkg, err
|
return pkg, err
|
||||||
|
|
||||||
|
|
|
@ -913,6 +913,17 @@ func (w *exportWriter) value(typ types.Type, v constant.Value) {
|
||||||
w.int64(int64(v.Kind()))
|
w.int64(int64(v.Kind()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if v.Kind() == constant.Unknown {
|
||||||
|
// golang/go#60605: treat unknown constant values as if they have invalid type
|
||||||
|
//
|
||||||
|
// This loses some fidelity over the package type-checked from source, but that
|
||||||
|
// is acceptable.
|
||||||
|
//
|
||||||
|
// TODO(rfindley): we should switch on the recorded constant kind rather
|
||||||
|
// than the constant type
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
|
switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
|
||||||
case types.IsBoolean:
|
case types.IsBoolean:
|
||||||
w.bool(constant.BoolVal(v))
|
w.bool(constant.BoolVal(v))
|
||||||
|
@ -969,6 +980,16 @@ func constantToFloat(x constant.Value) *big.Float {
|
||||||
return &f
|
return &f
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func valueToRat(x constant.Value) *big.Rat {
|
||||||
|
// Convert little-endian to big-endian.
|
||||||
|
// I can't believe this is necessary.
|
||||||
|
bytes := constant.Bytes(x)
|
||||||
|
for i := 0; i < len(bytes)/2; i++ {
|
||||||
|
bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
|
||||||
|
}
|
||||||
|
return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
|
||||||
|
}
|
||||||
|
|
||||||
// mpint exports a multi-precision integer.
|
// mpint exports a multi-precision integer.
|
||||||
//
|
//
|
||||||
// For unsigned types, small values are written out as a single
|
// For unsigned types, small values are written out as a single
|
||||||
|
@ -1178,3 +1199,12 @@ func (q *objQueue) popHead() types.Object {
|
||||||
q.head++
|
q.head++
|
||||||
return obj
|
return obj
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// internalError represents an error generated inside this package.
|
||||||
|
type internalError string
|
||||||
|
|
||||||
|
func (e internalError) Error() string { return "gcimporter: " + string(e) }
|
||||||
|
|
||||||
|
func internalErrorf(format string, args ...interface{}) error {
|
||||||
|
return internalError(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
|
|
@ -131,7 +131,7 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte,
|
||||||
} else if version > currentVersion {
|
} else if version > currentVersion {
|
||||||
err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
|
err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
|
||||||
} else {
|
} else {
|
||||||
err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
|
err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -140,11 +140,8 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte,
|
||||||
r := &intReader{bytes.NewReader(data), path}
|
r := &intReader{bytes.NewReader(data), path}
|
||||||
|
|
||||||
if bundle {
|
if bundle {
|
||||||
bundleVersion := r.uint64()
|
if v := r.uint64(); v != bundleVersion {
|
||||||
switch bundleVersion {
|
errorf("unknown bundle format version %d", v)
|
||||||
case bundleVersion:
|
|
||||||
default:
|
|
||||||
errorf("unknown bundle format version %d", bundleVersion)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
package gcimporter
|
package gcimporter
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"go/token"
|
"go/token"
|
||||||
"go/types"
|
"go/types"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -63,6 +64,14 @@ type typeInfo struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
|
func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
|
||||||
|
if !debug {
|
||||||
|
defer func() {
|
||||||
|
if x := recover(); x != nil {
|
||||||
|
err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
s := string(data)
|
s := string(data)
|
||||||
s = s[:strings.LastIndex(s, "\n$$\n")]
|
s = s[:strings.LastIndex(s, "\n$$\n")]
|
||||||
input := pkgbits.NewPkgDecoder(path, s)
|
input := pkgbits.NewPkgDecoder(path, s)
|
||||||
|
|
|
@ -8,10 +8,12 @@ package gocommand
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
"reflect"
|
||||||
"regexp"
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -22,6 +24,9 @@ import (
|
||||||
exec "golang.org/x/sys/execabs"
|
exec "golang.org/x/sys/execabs"
|
||||||
|
|
||||||
"golang.org/x/tools/internal/event"
|
"golang.org/x/tools/internal/event"
|
||||||
|
"golang.org/x/tools/internal/event/keys"
|
||||||
|
"golang.org/x/tools/internal/event/label"
|
||||||
|
"golang.org/x/tools/internal/event/tag"
|
||||||
)
|
)
|
||||||
|
|
||||||
// An Runner will run go command invocations and serialize
|
// An Runner will run go command invocations and serialize
|
||||||
|
@ -51,9 +56,19 @@ func (runner *Runner) initialize() {
|
||||||
// 1.14: go: updating go.mod: existing contents have changed since last read
|
// 1.14: go: updating go.mod: existing contents have changed since last read
|
||||||
var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`)
|
var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`)
|
||||||
|
|
||||||
|
// verb is an event label for the go command verb.
|
||||||
|
var verb = keys.NewString("verb", "go command verb")
|
||||||
|
|
||||||
|
func invLabels(inv Invocation) []label.Label {
|
||||||
|
return []label.Label{verb.Of(inv.Verb), tag.Directory.Of(inv.WorkingDir)}
|
||||||
|
}
|
||||||
|
|
||||||
// Run is a convenience wrapper around RunRaw.
|
// Run is a convenience wrapper around RunRaw.
|
||||||
// It returns only stdout and a "friendly" error.
|
// It returns only stdout and a "friendly" error.
|
||||||
func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) {
|
func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) {
|
||||||
|
ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...)
|
||||||
|
defer done()
|
||||||
|
|
||||||
stdout, _, friendly, _ := runner.RunRaw(ctx, inv)
|
stdout, _, friendly, _ := runner.RunRaw(ctx, inv)
|
||||||
return stdout, friendly
|
return stdout, friendly
|
||||||
}
|
}
|
||||||
|
@ -61,6 +76,9 @@ func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, e
|
||||||
// RunPiped runs the invocation serially, always waiting for any concurrent
|
// RunPiped runs the invocation serially, always waiting for any concurrent
|
||||||
// invocations to complete first.
|
// invocations to complete first.
|
||||||
func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error {
|
func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error {
|
||||||
|
ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...)
|
||||||
|
defer done()
|
||||||
|
|
||||||
_, err := runner.runPiped(ctx, inv, stdout, stderr)
|
_, err := runner.runPiped(ctx, inv, stdout, stderr)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -68,6 +86,8 @@ func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stde
|
||||||
// RunRaw runs the invocation, serializing requests only if they fight over
|
// RunRaw runs the invocation, serializing requests only if they fight over
|
||||||
// go.mod changes.
|
// go.mod changes.
|
||||||
func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
|
func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
|
||||||
|
ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...)
|
||||||
|
defer done()
|
||||||
// Make sure the runner is always initialized.
|
// Make sure the runner is always initialized.
|
||||||
runner.initialize()
|
runner.initialize()
|
||||||
|
|
||||||
|
@ -215,6 +235,18 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
|
||||||
cmd := exec.Command("go", goArgs...)
|
cmd := exec.Command("go", goArgs...)
|
||||||
cmd.Stdout = stdout
|
cmd.Stdout = stdout
|
||||||
cmd.Stderr = stderr
|
cmd.Stderr = stderr
|
||||||
|
|
||||||
|
// cmd.WaitDelay was added only in go1.20 (see #50436).
|
||||||
|
if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() {
|
||||||
|
// https://go.dev/issue/59541: don't wait forever copying stderr
|
||||||
|
// after the command has exited.
|
||||||
|
// After CL 484741 we copy stdout manually, so we we'll stop reading that as
|
||||||
|
// soon as ctx is done. However, we also don't want to wait around forever
|
||||||
|
// for stderr. Give a much-longer-than-reasonable delay and then assume that
|
||||||
|
// something has wedged in the kernel or runtime.
|
||||||
|
waitDelay.Set(reflect.ValueOf(30 * time.Second))
|
||||||
|
}
|
||||||
|
|
||||||
// On darwin the cwd gets resolved to the real path, which breaks anything that
|
// On darwin the cwd gets resolved to the real path, which breaks anything that
|
||||||
// expects the working directory to keep the original path, including the
|
// expects the working directory to keep the original path, including the
|
||||||
// go command when dealing with modules.
|
// go command when dealing with modules.
|
||||||
|
@ -229,6 +261,7 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
|
||||||
cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir)
|
cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir)
|
||||||
cmd.Dir = i.WorkingDir
|
cmd.Dir = i.WorkingDir
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
|
defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
|
||||||
|
|
||||||
return runCmdContext(ctx, cmd)
|
return runCmdContext(ctx, cmd)
|
||||||
|
@ -242,10 +275,85 @@ var DebugHangingGoCommands = false
|
||||||
|
|
||||||
// runCmdContext is like exec.CommandContext except it sends os.Interrupt
|
// runCmdContext is like exec.CommandContext except it sends os.Interrupt
|
||||||
// before os.Kill.
|
// before os.Kill.
|
||||||
func runCmdContext(ctx context.Context, cmd *exec.Cmd) error {
|
func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
|
||||||
if err := cmd.Start(); err != nil {
|
// If cmd.Stdout is not an *os.File, the exec package will create a pipe and
|
||||||
|
// copy it to the Writer in a goroutine until the process has finished and
|
||||||
|
// either the pipe reaches EOF or command's WaitDelay expires.
|
||||||
|
//
|
||||||
|
// However, the output from 'go list' can be quite large, and we don't want to
|
||||||
|
// keep reading (and allocating buffers) if we've already decided we don't
|
||||||
|
// care about the output. We don't want to wait for the process to finish, and
|
||||||
|
// we don't wait to wait for the WaitDelay to expire either.
|
||||||
|
//
|
||||||
|
// Instead, if cmd.Stdout requires a copying goroutine we explicitly replace
|
||||||
|
// it with a pipe (which is an *os.File), which we can close in order to stop
|
||||||
|
// copying output as soon as we realize we don't care about it.
|
||||||
|
var stdoutW *os.File
|
||||||
|
if cmd.Stdout != nil {
|
||||||
|
if _, ok := cmd.Stdout.(*os.File); !ok {
|
||||||
|
var stdoutR *os.File
|
||||||
|
stdoutR, stdoutW, err = os.Pipe()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
prevStdout := cmd.Stdout
|
||||||
|
cmd.Stdout = stdoutW
|
||||||
|
|
||||||
|
stdoutErr := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
_, err := io.Copy(prevStdout, stdoutR)
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("copying stdout: %w", err)
|
||||||
|
}
|
||||||
|
stdoutErr <- err
|
||||||
|
}()
|
||||||
|
defer func() {
|
||||||
|
// We started a goroutine to copy a stdout pipe.
|
||||||
|
// Wait for it to finish, or terminate it if need be.
|
||||||
|
var err2 error
|
||||||
|
select {
|
||||||
|
case err2 = <-stdoutErr:
|
||||||
|
stdoutR.Close()
|
||||||
|
case <-ctx.Done():
|
||||||
|
stdoutR.Close()
|
||||||
|
// Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close
|
||||||
|
// should cause the Read call in io.Copy to unblock and return
|
||||||
|
// immediately, but we still need to receive from stdoutErr to confirm
|
||||||
|
// that that has happened.
|
||||||
|
<-stdoutErr
|
||||||
|
err2 = ctx.Err()
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
err = err2
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the
|
||||||
|
// same writer, and have a type that can be compared with ==, at most
|
||||||
|
// one goroutine at a time will call Write.”
|
||||||
|
//
|
||||||
|
// Since we're starting a goroutine that writes to cmd.Stdout, we must
|
||||||
|
// also update cmd.Stderr so that that still holds.
|
||||||
|
func() {
|
||||||
|
defer func() { recover() }()
|
||||||
|
if cmd.Stderr == prevStdout {
|
||||||
|
cmd.Stderr = cmd.Stdout
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = cmd.Start()
|
||||||
|
if stdoutW != nil {
|
||||||
|
// The child process has inherited the pipe file,
|
||||||
|
// so close the copy held in this process.
|
||||||
|
stdoutW.Close()
|
||||||
|
stdoutW = nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
resChan := make(chan error, 1)
|
resChan := make(chan error, 1)
|
||||||
go func() {
|
go func() {
|
||||||
resChan <- cmd.Wait()
|
resChan <- cmd.Wait()
|
||||||
|
@ -253,11 +361,14 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error {
|
||||||
|
|
||||||
// If we're interested in debugging hanging Go commands, stop waiting after a
|
// If we're interested in debugging hanging Go commands, stop waiting after a
|
||||||
// minute and panic with interesting information.
|
// minute and panic with interesting information.
|
||||||
if DebugHangingGoCommands {
|
debug := DebugHangingGoCommands
|
||||||
|
if debug {
|
||||||
|
timer := time.NewTimer(1 * time.Minute)
|
||||||
|
defer timer.Stop()
|
||||||
select {
|
select {
|
||||||
case err := <-resChan:
|
case err := <-resChan:
|
||||||
return err
|
return err
|
||||||
case <-time.After(1 * time.Minute):
|
case <-timer.C:
|
||||||
HandleHangingGoCommand(cmd.Process)
|
HandleHangingGoCommand(cmd.Process)
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
}
|
}
|
||||||
|
@ -270,30 +381,25 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cancelled. Interrupt and see if it ends voluntarily.
|
// Cancelled. Interrupt and see if it ends voluntarily.
|
||||||
cmd.Process.Signal(os.Interrupt)
|
if err := cmd.Process.Signal(os.Interrupt); err == nil {
|
||||||
select {
|
// (We used to wait only 1s but this proved
|
||||||
case err := <-resChan:
|
// fragile on loaded builder machines.)
|
||||||
return err
|
timer := time.NewTimer(5 * time.Second)
|
||||||
case <-time.After(time.Second):
|
defer timer.Stop()
|
||||||
|
select {
|
||||||
|
case err := <-resChan:
|
||||||
|
return err
|
||||||
|
case <-timer.C:
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Didn't shut down in response to interrupt. Kill it hard.
|
// Didn't shut down in response to interrupt. Kill it hard.
|
||||||
// TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT
|
// TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT
|
||||||
// on certain platforms, such as unix.
|
// on certain platforms, such as unix.
|
||||||
if err := cmd.Process.Kill(); err != nil && DebugHangingGoCommands {
|
if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug {
|
||||||
// Don't panic here as this reliably fails on windows with EINVAL.
|
|
||||||
log.Printf("error killing the Go command: %v", err)
|
log.Printf("error killing the Go command: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// See above: don't wait indefinitely if we're debugging hanging Go commands.
|
|
||||||
if DebugHangingGoCommands {
|
|
||||||
select {
|
|
||||||
case err := <-resChan:
|
|
||||||
return err
|
|
||||||
case <-time.After(10 * time.Second): // a shorter wait as resChan should return quickly following Kill
|
|
||||||
HandleHangingGoCommand(cmd.Process)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return <-resChan
|
return <-resChan
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,21 +23,11 @@ import (
|
||||||
func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) {
|
func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) {
|
||||||
inv.Verb = "list"
|
inv.Verb = "list"
|
||||||
inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`}
|
inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`}
|
||||||
inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off")
|
inv.BuildFlags = nil // This is not a build command.
|
||||||
// Unset any unneeded flags, and remove them from BuildFlags, if they're
|
|
||||||
// present.
|
|
||||||
inv.ModFile = ""
|
|
||||||
inv.ModFlag = ""
|
inv.ModFlag = ""
|
||||||
var buildFlags []string
|
inv.ModFile = ""
|
||||||
for _, flag := range inv.BuildFlags {
|
inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off")
|
||||||
// Flags can be prefixed by one or two dashes.
|
|
||||||
f := strings.TrimPrefix(strings.TrimPrefix(flag, "-"), "-")
|
|
||||||
if strings.HasPrefix(f, "mod=") || strings.HasPrefix(f, "modfile=") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
buildFlags = append(buildFlags, flag)
|
|
||||||
}
|
|
||||||
inv.BuildFlags = buildFlags
|
|
||||||
stdoutBytes, err := r.Run(ctx, inv)
|
stdoutBytes, err := r.Run(ctx, inv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
|
|
@ -105,6 +105,26 @@ func OriginMethod(fn *types.Func) *types.Func {
|
||||||
}
|
}
|
||||||
orig := NamedTypeOrigin(named)
|
orig := NamedTypeOrigin(named)
|
||||||
gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name())
|
gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name())
|
||||||
|
|
||||||
|
// This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In:
|
||||||
|
// package p
|
||||||
|
// type T *int
|
||||||
|
// func (*T) f() {}
|
||||||
|
// LookupFieldOrMethod(T, true, p, f)=nil, but NewMethodSet(*T)={(*T).f}.
|
||||||
|
// Here we make them consistent by force.
|
||||||
|
// (The go/types bug is general, but this workaround is reached only
|
||||||
|
// for generic T thanks to the early return above.)
|
||||||
|
if gfn == nil {
|
||||||
|
mset := types.NewMethodSet(types.NewPointer(orig))
|
||||||
|
for i := 0; i < mset.Len(); i++ {
|
||||||
|
m := mset.At(i)
|
||||||
|
if m.Obj().Id() == fn.Id() {
|
||||||
|
gfn = m.Obj()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return gfn.(*types.Func)
|
return gfn.(*types.Func)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,8 +11,6 @@ import (
|
||||||
"go/types"
|
"go/types"
|
||||||
"reflect"
|
"reflect"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"golang.org/x/tools/go/types/objectpath"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func SetUsesCgo(conf *types.Config) bool {
|
func SetUsesCgo(conf *types.Config) bool {
|
||||||
|
@ -52,10 +50,3 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos,
|
||||||
}
|
}
|
||||||
|
|
||||||
var SetGoVersion = func(conf *types.Config, version string) bool { return false }
|
var SetGoVersion = func(conf *types.Config, version string) bool { return false }
|
||||||
|
|
||||||
// NewObjectpathEncoder returns a function closure equivalent to
|
|
||||||
// objectpath.For but amortized for multiple (sequential) calls.
|
|
||||||
// It is a temporary workaround, pending the approval of proposal 58668.
|
|
||||||
//
|
|
||||||
//go:linkname NewObjectpathFunc golang.org/x/tools/go/types/objectpath.newEncoderFor
|
|
||||||
func NewObjectpathFunc() func(types.Object) (objectpath.Path, error)
|
|
||||||
|
|
|
@ -14,21 +14,14 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
With [Go module][] support (Go 1.11+), simply add the following import
|
Simply add the following import to your code, and then `go [build|run|test]`
|
||||||
|
will automatically fetch the necessary dependencies:
|
||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
import "google.golang.org/grpc"
|
import "google.golang.org/grpc"
|
||||||
```
|
```
|
||||||
|
|
||||||
to your code, and then `go [build|run|test]` will automatically fetch the
|
|
||||||
necessary dependencies.
|
|
||||||
|
|
||||||
Otherwise, to install the `grpc-go` package, run the following command:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ go get -u google.golang.org/grpc
|
|
||||||
```
|
|
||||||
|
|
||||||
> **Note:** If you are trying to access `grpc-go` from **China**, see the
|
> **Note:** If you are trying to access `grpc-go` from **China**, see the
|
||||||
> [FAQ](#FAQ) below.
|
> [FAQ](#FAQ) below.
|
||||||
|
|
||||||
|
@ -56,15 +49,6 @@ To build Go code, there are several options:
|
||||||
|
|
||||||
- Set up a VPN and access google.golang.org through that.
|
- Set up a VPN and access google.golang.org through that.
|
||||||
|
|
||||||
- Without Go module support: `git clone` the repo manually:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc
|
|
||||||
```
|
|
||||||
|
|
||||||
You will need to do the same for all of grpc's dependencies in `golang.org`,
|
|
||||||
e.g. `golang.org/x/net`.
|
|
||||||
|
|
||||||
- With Go module support: it is possible to use the `replace` feature of `go
|
- With Go module support: it is possible to use the `replace` feature of `go
|
||||||
mod` to create aliases for golang.org packages. In your project's directory:
|
mod` to create aliases for golang.org packages. In your project's directory:
|
||||||
|
|
||||||
|
@ -76,33 +60,13 @@ To build Go code, there are several options:
|
||||||
```
|
```
|
||||||
|
|
||||||
Again, this will need to be done for all transitive dependencies hosted on
|
Again, this will need to be done for all transitive dependencies hosted on
|
||||||
golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652).
|
golang.org as well. For details, refer to [golang/go issue
|
||||||
|
#28652](https://github.com/golang/go/issues/28652).
|
||||||
|
|
||||||
### Compiling error, undefined: grpc.SupportPackageIsVersion
|
### Compiling error, undefined: grpc.SupportPackageIsVersion
|
||||||
|
|
||||||
#### If you are using Go modules:
|
Please update to the latest version of gRPC-Go using
|
||||||
|
`go get google.golang.org/grpc`.
|
||||||
Ensure your gRPC-Go version is `require`d at the appropriate version in
|
|
||||||
the same module containing the generated `.pb.go` files. For example,
|
|
||||||
`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file:
|
|
||||||
|
|
||||||
```go
|
|
||||||
module <your module name>
|
|
||||||
|
|
||||||
require (
|
|
||||||
google.golang.org/grpc v1.27.0
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### If you are *not* using Go modules:
|
|
||||||
|
|
||||||
Update the `proto` package, gRPC package, and rebuild the `.proto` files:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
go get -u github.com/golang/protobuf/{proto,protoc-gen-go}
|
|
||||||
go get -u google.golang.org/grpc
|
|
||||||
protoc --go_out=plugins=grpc:. *.proto
|
|
||||||
```
|
|
||||||
|
|
||||||
### How to turn on logging
|
### How to turn on logging
|
||||||
|
|
||||||
|
@ -121,9 +85,11 @@ possible reasons, including:
|
||||||
1. mis-configured transport credentials, connection failed on handshaking
|
1. mis-configured transport credentials, connection failed on handshaking
|
||||||
1. bytes disrupted, possibly by a proxy in between
|
1. bytes disrupted, possibly by a proxy in between
|
||||||
1. server shutdown
|
1. server shutdown
|
||||||
1. Keepalive parameters caused connection shutdown, for example if you have configured
|
1. Keepalive parameters caused connection shutdown, for example if you have
|
||||||
your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779).
|
configured your server to terminate connections regularly to [trigger DNS
|
||||||
If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters),
|
lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779).
|
||||||
|
If this is the case, you may want to increase your
|
||||||
|
[MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters),
|
||||||
to allow longer RPC calls to finish.
|
to allow longer RPC calls to finish.
|
||||||
|
|
||||||
It can be tricky to debug this because the error happens on the client side but
|
It can be tricky to debug this because the error happens on the client side but
|
||||||
|
|
|
@ -34,26 +34,26 @@ import (
|
||||||
// key/value pairs. Keys must be hashable, and users should define their own
|
// key/value pairs. Keys must be hashable, and users should define their own
|
||||||
// types for keys. Values should not be modified after they are added to an
|
// types for keys. Values should not be modified after they are added to an
|
||||||
// Attributes or if they were received from one. If values implement 'Equal(o
|
// Attributes or if they were received from one. If values implement 'Equal(o
|
||||||
// interface{}) bool', it will be called by (*Attributes).Equal to determine
|
// any) bool', it will be called by (*Attributes).Equal to determine whether
|
||||||
// whether two values with the same key should be considered equal.
|
// two values with the same key should be considered equal.
|
||||||
type Attributes struct {
|
type Attributes struct {
|
||||||
m map[interface{}]interface{}
|
m map[any]any
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns a new Attributes containing the key/value pair.
|
// New returns a new Attributes containing the key/value pair.
|
||||||
func New(key, value interface{}) *Attributes {
|
func New(key, value any) *Attributes {
|
||||||
return &Attributes{m: map[interface{}]interface{}{key: value}}
|
return &Attributes{m: map[any]any{key: value}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithValue returns a new Attributes containing the previous keys and values
|
// WithValue returns a new Attributes containing the previous keys and values
|
||||||
// and the new key/value pair. If the same key appears multiple times, the
|
// and the new key/value pair. If the same key appears multiple times, the
|
||||||
// last value overwrites all previous values for that key. To remove an
|
// last value overwrites all previous values for that key. To remove an
|
||||||
// existing key, use a nil value. value should not be modified later.
|
// existing key, use a nil value. value should not be modified later.
|
||||||
func (a *Attributes) WithValue(key, value interface{}) *Attributes {
|
func (a *Attributes) WithValue(key, value any) *Attributes {
|
||||||
if a == nil {
|
if a == nil {
|
||||||
return New(key, value)
|
return New(key, value)
|
||||||
}
|
}
|
||||||
n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)}
|
n := &Attributes{m: make(map[any]any, len(a.m)+1)}
|
||||||
for k, v := range a.m {
|
for k, v := range a.m {
|
||||||
n.m[k] = v
|
n.m[k] = v
|
||||||
}
|
}
|
||||||
|
@ -63,20 +63,19 @@ func (a *Attributes) WithValue(key, value interface{}) *Attributes {
|
||||||
|
|
||||||
// Value returns the value associated with these attributes for key, or nil if
|
// Value returns the value associated with these attributes for key, or nil if
|
||||||
// no value is associated with key. The returned value should not be modified.
|
// no value is associated with key. The returned value should not be modified.
|
||||||
func (a *Attributes) Value(key interface{}) interface{} {
|
func (a *Attributes) Value(key any) any {
|
||||||
if a == nil {
|
if a == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return a.m[key]
|
return a.m[key]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Equal returns whether a and o are equivalent. If 'Equal(o interface{})
|
// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is
|
||||||
// bool' is implemented for a value in the attributes, it is called to
|
// implemented for a value in the attributes, it is called to determine if the
|
||||||
// determine if the value matches the one stored in the other attributes. If
|
// value matches the one stored in the other attributes. If Equal is not
|
||||||
// Equal is not implemented, standard equality is used to determine if the two
|
// implemented, standard equality is used to determine if the two values are
|
||||||
// values are equal. Note that some types (e.g. maps) aren't comparable by
|
// equal. Note that some types (e.g. maps) aren't comparable by default, so
|
||||||
// default, so they must be wrapped in a struct, or in an alias type, with Equal
|
// they must be wrapped in a struct, or in an alias type, with Equal defined.
|
||||||
// defined.
|
|
||||||
func (a *Attributes) Equal(o *Attributes) bool {
|
func (a *Attributes) Equal(o *Attributes) bool {
|
||||||
if a == nil && o == nil {
|
if a == nil && o == nil {
|
||||||
return true
|
return true
|
||||||
|
@ -93,7 +92,7 @@ func (a *Attributes) Equal(o *Attributes) bool {
|
||||||
// o missing element of a
|
// o missing element of a
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok {
|
if eq, ok := v.(interface{ Equal(o any) bool }); ok {
|
||||||
if !eq.Equal(ov) {
|
if !eq.Equal(ov) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -112,19 +111,31 @@ func (a *Attributes) String() string {
|
||||||
sb.WriteString("{")
|
sb.WriteString("{")
|
||||||
first := true
|
first := true
|
||||||
for k, v := range a.m {
|
for k, v := range a.m {
|
||||||
var key, val string
|
|
||||||
if str, ok := k.(interface{ String() string }); ok {
|
|
||||||
key = str.String()
|
|
||||||
}
|
|
||||||
if str, ok := v.(interface{ String() string }); ok {
|
|
||||||
val = str.String()
|
|
||||||
}
|
|
||||||
if !first {
|
if !first {
|
||||||
sb.WriteString(", ")
|
sb.WriteString(", ")
|
||||||
}
|
}
|
||||||
sb.WriteString(fmt.Sprintf("%q: %q, ", key, val))
|
sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v)))
|
||||||
first = false
|
first = false
|
||||||
}
|
}
|
||||||
sb.WriteString("}")
|
sb.WriteString("}")
|
||||||
return sb.String()
|
return sb.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func str(x any) string {
|
||||||
|
if v, ok := x.(fmt.Stringer); ok {
|
||||||
|
return v.String()
|
||||||
|
} else if v, ok := x.(string); ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("<%p>", x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON helps implement the json.Marshaler interface, thereby rendering
|
||||||
|
// the Attributes correctly when printing (via pretty.JSON) structs containing
|
||||||
|
// Attributes as fields.
|
||||||
|
//
|
||||||
|
// Is it impossible to unmarshal attributes from a JSON representation and this
|
||||||
|
// method is meant only for debugging purposes.
|
||||||
|
func (a *Attributes) MarshalJSON() ([]byte, error) {
|
||||||
|
return []byte(a.String()), nil
|
||||||
|
}
|
||||||
|
|
|
@ -105,8 +105,8 @@ type SubConn interface {
|
||||||
//
|
//
|
||||||
// This will trigger a state transition for the SubConn.
|
// This will trigger a state transition for the SubConn.
|
||||||
//
|
//
|
||||||
// Deprecated: This method is now part of the ClientConn interface and will
|
// Deprecated: this method will be removed. Create new SubConns for new
|
||||||
// eventually be removed from here.
|
// addresses instead.
|
||||||
UpdateAddresses([]resolver.Address)
|
UpdateAddresses([]resolver.Address)
|
||||||
// Connect starts the connecting for this SubConn.
|
// Connect starts the connecting for this SubConn.
|
||||||
Connect()
|
Connect()
|
||||||
|
@ -115,6 +115,13 @@ type SubConn interface {
|
||||||
// creates a new one and returns it. Returns a close function which must
|
// creates a new one and returns it. Returns a close function which must
|
||||||
// be called when the Producer is no longer needed.
|
// be called when the Producer is no longer needed.
|
||||||
GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
|
GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
|
||||||
|
// Shutdown shuts down the SubConn gracefully. Any started RPCs will be
|
||||||
|
// allowed to complete. No future calls should be made on the SubConn.
|
||||||
|
// One final state update will be delivered to the StateListener (or
|
||||||
|
// UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to
|
||||||
|
// indicate the shutdown operation. This may be delivered before
|
||||||
|
// in-progress RPCs are complete and the actual connection is closed.
|
||||||
|
Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSubConnOptions contains options to create new SubConn.
|
// NewSubConnOptions contains options to create new SubConn.
|
||||||
|
@ -129,6 +136,11 @@ type NewSubConnOptions struct {
|
||||||
// HealthCheckEnabled indicates whether health check service should be
|
// HealthCheckEnabled indicates whether health check service should be
|
||||||
// enabled on this SubConn
|
// enabled on this SubConn
|
||||||
HealthCheckEnabled bool
|
HealthCheckEnabled bool
|
||||||
|
// StateListener is called when the state of the subconn changes. If nil,
|
||||||
|
// Balancer.UpdateSubConnState will be called instead. Will never be
|
||||||
|
// invoked until after Connect() is called on the SubConn created with
|
||||||
|
// these options.
|
||||||
|
StateListener func(SubConnState)
|
||||||
}
|
}
|
||||||
|
|
||||||
// State contains the balancer's state relevant to the gRPC ClientConn.
|
// State contains the balancer's state relevant to the gRPC ClientConn.
|
||||||
|
@ -150,16 +162,24 @@ type ClientConn interface {
|
||||||
// NewSubConn is called by balancer to create a new SubConn.
|
// NewSubConn is called by balancer to create a new SubConn.
|
||||||
// It doesn't block and wait for the connections to be established.
|
// It doesn't block and wait for the connections to be established.
|
||||||
// Behaviors of the SubConn can be controlled by options.
|
// Behaviors of the SubConn can be controlled by options.
|
||||||
|
//
|
||||||
|
// Deprecated: please be aware that in a future version, SubConns will only
|
||||||
|
// support one address per SubConn.
|
||||||
NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error)
|
NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error)
|
||||||
// RemoveSubConn removes the SubConn from ClientConn.
|
// RemoveSubConn removes the SubConn from ClientConn.
|
||||||
// The SubConn will be shutdown.
|
// The SubConn will be shutdown.
|
||||||
|
//
|
||||||
|
// Deprecated: use SubConn.Shutdown instead.
|
||||||
RemoveSubConn(SubConn)
|
RemoveSubConn(SubConn)
|
||||||
// UpdateAddresses updates the addresses used in the passed in SubConn.
|
// UpdateAddresses updates the addresses used in the passed in SubConn.
|
||||||
// gRPC checks if the currently connected address is still in the new list.
|
// gRPC checks if the currently connected address is still in the new list.
|
||||||
// If so, the connection will be kept. Else, the connection will be
|
// If so, the connection will be kept. Else, the connection will be
|
||||||
// gracefully closed, and a new connection will be created.
|
// gracefully closed, and a new connection will be created.
|
||||||
//
|
//
|
||||||
// This will trigger a state transition for the SubConn.
|
// This may trigger a state transition for the SubConn.
|
||||||
|
//
|
||||||
|
// Deprecated: this method will be removed. Create new SubConns for new
|
||||||
|
// addresses instead.
|
||||||
UpdateAddresses(SubConn, []resolver.Address)
|
UpdateAddresses(SubConn, []resolver.Address)
|
||||||
|
|
||||||
// UpdateState notifies gRPC that the balancer's internal state has
|
// UpdateState notifies gRPC that the balancer's internal state has
|
||||||
|
@ -250,7 +270,7 @@ type DoneInfo struct {
|
||||||
// trailing metadata.
|
// trailing metadata.
|
||||||
//
|
//
|
||||||
// The only supported type now is *orca_v3.LoadReport.
|
// The only supported type now is *orca_v3.LoadReport.
|
||||||
ServerLoad interface{}
|
ServerLoad any
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -343,9 +363,13 @@ type Balancer interface {
|
||||||
ResolverError(error)
|
ResolverError(error)
|
||||||
// UpdateSubConnState is called by gRPC when the state of a SubConn
|
// UpdateSubConnState is called by gRPC when the state of a SubConn
|
||||||
// changes.
|
// changes.
|
||||||
|
//
|
||||||
|
// Deprecated: Use NewSubConnOptions.StateListener when creating the
|
||||||
|
// SubConn instead.
|
||||||
UpdateSubConnState(SubConn, SubConnState)
|
UpdateSubConnState(SubConn, SubConnState)
|
||||||
// Close closes the balancer. The balancer is not required to call
|
// Close closes the balancer. The balancer is not currently required to
|
||||||
// ClientConn.RemoveSubConn for its existing SubConns.
|
// call SubConn.Shutdown for its existing SubConns; however, this will be
|
||||||
|
// required in a future release, so it is recommended.
|
||||||
Close()
|
Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -390,15 +414,14 @@ var ErrBadResolverState = errors.New("bad resolver state")
|
||||||
type ProducerBuilder interface {
|
type ProducerBuilder interface {
|
||||||
// Build creates a Producer. The first parameter is always a
|
// Build creates a Producer. The first parameter is always a
|
||||||
// grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
|
// grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
|
||||||
// associated SubConn), but is declared as interface{} to avoid a
|
// associated SubConn), but is declared as `any` to avoid a dependency
|
||||||
// dependency cycle. Should also return a close function that will be
|
// cycle. Should also return a close function that will be called when all
|
||||||
// called when all references to the Producer have been given up.
|
// references to the Producer have been given up.
|
||||||
Build(grpcClientConnInterface interface{}) (p Producer, close func())
|
Build(grpcClientConnInterface any) (p Producer, close func())
|
||||||
}
|
}
|
||||||
|
|
||||||
// A Producer is a type shared among potentially many consumers. It is
|
// A Producer is a type shared among potentially many consumers. It is
|
||||||
// associated with a SubConn, and an implementation will typically contain
|
// associated with a SubConn, and an implementation will typically contain
|
||||||
// other methods to provide additional functionality, e.g. configuration or
|
// other methods to provide additional functionality, e.g. configuration or
|
||||||
// subscription registration.
|
// subscription registration.
|
||||||
type Producer interface {
|
type Producer any
|
||||||
}
|
|
||||||
|
|
|
@ -105,7 +105,12 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
||||||
addrsSet.Set(a, nil)
|
addrsSet.Set(a, nil)
|
||||||
if _, ok := b.subConns.Get(a); !ok {
|
if _, ok := b.subConns.Get(a); !ok {
|
||||||
// a is a new address (not existing in b.subConns).
|
// a is a new address (not existing in b.subConns).
|
||||||
sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck})
|
var sc balancer.SubConn
|
||||||
|
opts := balancer.NewSubConnOptions{
|
||||||
|
HealthCheckEnabled: b.config.HealthCheck,
|
||||||
|
StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) },
|
||||||
|
}
|
||||||
|
sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
|
logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
|
||||||
continue
|
continue
|
||||||
|
@ -121,10 +126,10 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
||||||
sc := sci.(balancer.SubConn)
|
sc := sci.(balancer.SubConn)
|
||||||
// a was removed by resolver.
|
// a was removed by resolver.
|
||||||
if _, ok := addrsSet.Get(a); !ok {
|
if _, ok := addrsSet.Get(a); !ok {
|
||||||
b.cc.RemoveSubConn(sc)
|
sc.Shutdown()
|
||||||
b.subConns.Delete(a)
|
b.subConns.Delete(a)
|
||||||
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
|
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
|
||||||
// The entry will be deleted in UpdateSubConnState.
|
// The entry will be deleted in updateSubConnState.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// If resolver state contains no addresses, return an error so ClientConn
|
// If resolver state contains no addresses, return an error so ClientConn
|
||||||
|
@ -177,7 +182,12 @@ func (b *baseBalancer) regeneratePicker() {
|
||||||
b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
|
b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn.
|
||||||
func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||||
|
logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||||
s := state.ConnectivityState
|
s := state.ConnectivityState
|
||||||
if logger.V(2) {
|
if logger.V(2) {
|
||||||
logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
|
logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
|
||||||
|
@ -204,8 +214,8 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
|
||||||
case connectivity.Idle:
|
case connectivity.Idle:
|
||||||
sc.Connect()
|
sc.Connect()
|
||||||
case connectivity.Shutdown:
|
case connectivity.Shutdown:
|
||||||
// When an address was removed by resolver, b called RemoveSubConn but
|
// When an address was removed by resolver, b called Shutdown but kept
|
||||||
// kept the sc's state in scStates. Remove state for this sc here.
|
// the sc's state in scStates. Remove state for this sc here.
|
||||||
delete(b.scStates, sc)
|
delete(b.scStates, sc)
|
||||||
case connectivity.TransientFailure:
|
case connectivity.TransientFailure:
|
||||||
// Save error to be reported via picker.
|
// Save error to be reported via picker.
|
||||||
|
@ -226,7 +236,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close is a nop because base balancer doesn't have internal state to clean up,
|
// Close is a nop because base balancer doesn't have internal state to clean up,
|
||||||
// and it doesn't need to call RemoveSubConn for the SubConns.
|
// and it doesn't need to call Shutdown for the SubConns.
|
||||||
func (b *baseBalancer) Close() {
|
func (b *baseBalancer) Close() {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -99,20 +99,6 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat
|
||||||
// lock held. But the lock guards only the scheduling part. The actual
|
// lock held. But the lock guards only the scheduling part. The actual
|
||||||
// callback is called asynchronously without the lock being held.
|
// callback is called asynchronously without the lock being held.
|
||||||
ok := ccb.serializer.Schedule(func(_ context.Context) {
|
ok := ccb.serializer.Schedule(func(_ context.Context) {
|
||||||
// If the addresses specified in the update contain addresses of type
|
|
||||||
// "grpclb" and the selected LB policy is not "grpclb", these addresses
|
|
||||||
// will be filtered out and ccs will be modified with the updated
|
|
||||||
// address list.
|
|
||||||
if ccb.curBalancerName != grpclbName {
|
|
||||||
var addrs []resolver.Address
|
|
||||||
for _, addr := range ccs.ResolverState.Addresses {
|
|
||||||
if addr.Type == resolver.GRPCLB {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
addrs = append(addrs, addr)
|
|
||||||
}
|
|
||||||
ccs.ResolverState.Addresses = addrs
|
|
||||||
}
|
|
||||||
errCh <- ccb.balancer.UpdateClientConnState(*ccs)
|
errCh <- ccb.balancer.UpdateClientConnState(*ccs)
|
||||||
})
|
})
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -139,7 +125,9 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat
|
||||||
func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) {
|
func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) {
|
||||||
ccb.mu.Lock()
|
ccb.mu.Lock()
|
||||||
ccb.serializer.Schedule(func(_ context.Context) {
|
ccb.serializer.Schedule(func(_ context.Context) {
|
||||||
ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
|
// Even though it is optional for balancers, gracefulswitch ensures
|
||||||
|
// opts.StateListener is set, so this cannot ever be nil.
|
||||||
|
sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
|
||||||
})
|
})
|
||||||
ccb.mu.Unlock()
|
ccb.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
@ -221,7 +209,7 @@ func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ccb.mode = m
|
ccb.mode = m
|
||||||
done := ccb.serializer.Done
|
done := ccb.serializer.Done()
|
||||||
b := ccb.balancer
|
b := ccb.balancer
|
||||||
ok := ccb.serializer.Schedule(func(_ context.Context) {
|
ok := ccb.serializer.Schedule(func(_ context.Context) {
|
||||||
// Close the serializer to ensure that no more calls from gRPC are sent
|
// Close the serializer to ensure that no more calls from gRPC are sent
|
||||||
|
@ -238,11 +226,9 @@ func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) {
|
||||||
}
|
}
|
||||||
ccb.mu.Unlock()
|
ccb.mu.Unlock()
|
||||||
|
|
||||||
// Give enqueued callbacks a chance to finish.
|
// Give enqueued callbacks a chance to finish before closing the balancer.
|
||||||
<-done
|
<-done
|
||||||
// Spawn a goroutine to close the balancer (since it may block trying to
|
b.Close()
|
||||||
// cleanup all allocated resources) and return early.
|
|
||||||
go b.Close()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// exitIdleMode is invoked by grpc when the channel exits idle mode either
|
// exitIdleMode is invoked by grpc when the channel exits idle mode either
|
||||||
|
@ -314,29 +300,19 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
|
||||||
channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
|
channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)}
|
acbw := &acBalancerWrapper{
|
||||||
|
ccb: ccb,
|
||||||
|
ac: ac,
|
||||||
|
producers: make(map[balancer.ProducerBuilder]*refCountedProducer),
|
||||||
|
stateListener: opts.StateListener,
|
||||||
|
}
|
||||||
ac.acbw = acbw
|
ac.acbw = acbw
|
||||||
return acbw, nil
|
return acbw, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||||
if ccb.isIdleOrClosed() {
|
// The graceful switch balancer will never call this.
|
||||||
// It it safe to ignore this call when the balancer is closed or in idle
|
logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc")
|
||||||
// because the ClientConn takes care of closing the connections.
|
|
||||||
//
|
|
||||||
// Not returning early from here when the balancer is closed or in idle
|
|
||||||
// leads to a deadlock though, because of the following sequence of
|
|
||||||
// calls when holding cc.mu:
|
|
||||||
// cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close -->
|
|
||||||
// ccb.RemoveAddrConn --> cc.removeAddrConn
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
acbw, ok := sc.(*acBalancerWrapper)
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
||||||
|
@ -380,7 +356,9 @@ func (ccb *ccBalancerWrapper) Target() string {
|
||||||
// acBalancerWrapper is a wrapper on top of ac for balancers.
|
// acBalancerWrapper is a wrapper on top of ac for balancers.
|
||||||
// It implements balancer.SubConn interface.
|
// It implements balancer.SubConn interface.
|
||||||
type acBalancerWrapper struct {
|
type acBalancerWrapper struct {
|
||||||
ac *addrConn // read-only
|
ac *addrConn // read-only
|
||||||
|
ccb *ccBalancerWrapper // read-only
|
||||||
|
stateListener func(balancer.SubConnState)
|
||||||
|
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
producers map[balancer.ProducerBuilder]*refCountedProducer
|
producers map[balancer.ProducerBuilder]*refCountedProducer
|
||||||
|
@ -398,6 +376,23 @@ func (acbw *acBalancerWrapper) Connect() {
|
||||||
go acbw.ac.connect()
|
go acbw.ac.connect()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (acbw *acBalancerWrapper) Shutdown() {
|
||||||
|
ccb := acbw.ccb
|
||||||
|
if ccb.isIdleOrClosed() {
|
||||||
|
// It it safe to ignore this call when the balancer is closed or in idle
|
||||||
|
// because the ClientConn takes care of closing the connections.
|
||||||
|
//
|
||||||
|
// Not returning early from here when the balancer is closed or in idle
|
||||||
|
// leads to a deadlock though, because of the following sequence of
|
||||||
|
// calls when holding cc.mu:
|
||||||
|
// cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close -->
|
||||||
|
// ccb.RemoveAddrConn --> cc.removeAddrConn
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
|
||||||
|
}
|
||||||
|
|
||||||
// NewStream begins a streaming RPC on the addrConn. If the addrConn is not
|
// NewStream begins a streaming RPC on the addrConn. If the addrConn is not
|
||||||
// ready, blocks until it is or ctx expires. Returns an error when the context
|
// ready, blocks until it is or ctx expires. Returns an error when the context
|
||||||
// expires or the addrConn is shut down.
|
// expires or the addrConn is shut down.
|
||||||
|
@ -411,7 +406,7 @@ func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc,
|
||||||
|
|
||||||
// Invoke performs a unary RPC. If the addrConn is not ready, returns
|
// Invoke performs a unary RPC. If the addrConn is not ready, returns
|
||||||
// errSubConnNotReady.
|
// errSubConnNotReady.
|
||||||
func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error {
|
func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error {
|
||||||
cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...)
|
cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
|
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.30.0
|
// protoc-gen-go v1.31.0
|
||||||
// protoc v4.22.0
|
// protoc v4.22.0
|
||||||
// source: grpc/binlog/v1/binarylog.proto
|
// source: grpc/binlog/v1/binarylog.proto
|
||||||
|
|
||||||
|
|
|
@ -26,12 +26,7 @@ import (
|
||||||
// received. This is typically called by generated code.
|
// received. This is typically called by generated code.
|
||||||
//
|
//
|
||||||
// All errors returned by Invoke are compatible with the status package.
|
// All errors returned by Invoke are compatible with the status package.
|
||||||
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
|
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error {
|
||||||
if err := cc.idlenessMgr.onCallBegin(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer cc.idlenessMgr.onCallEnd()
|
|
||||||
|
|
||||||
// allow interceptor to see all applicable call options, which means those
|
// allow interceptor to see all applicable call options, which means those
|
||||||
// configured as defaults from dial option as well as per-call options
|
// configured as defaults from dial option as well as per-call options
|
||||||
opts = combine(cc.dopts.callOptions, opts)
|
opts = combine(cc.dopts.callOptions, opts)
|
||||||
|
@ -61,13 +56,13 @@ func combine(o1 []CallOption, o2 []CallOption) []CallOption {
|
||||||
// received. This is typically called by generated code.
|
// received. This is typically called by generated code.
|
||||||
//
|
//
|
||||||
// DEPRECATED: Use ClientConn.Invoke instead.
|
// DEPRECATED: Use ClientConn.Invoke instead.
|
||||||
func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
|
func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error {
|
||||||
return cc.Invoke(ctx, method, args, reply, opts...)
|
return cc.Invoke(ctx, method, args, reply, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}
|
var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}
|
||||||
|
|
||||||
func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
|
func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error {
|
||||||
cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
|
cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -34,9 +34,12 @@ import (
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/internal"
|
||||||
"google.golang.org/grpc/internal/backoff"
|
"google.golang.org/grpc/internal/backoff"
|
||||||
"google.golang.org/grpc/internal/channelz"
|
"google.golang.org/grpc/internal/channelz"
|
||||||
"google.golang.org/grpc/internal/grpcsync"
|
"google.golang.org/grpc/internal/grpcsync"
|
||||||
|
"google.golang.org/grpc/internal/idle"
|
||||||
|
"google.golang.org/grpc/internal/pretty"
|
||||||
iresolver "google.golang.org/grpc/internal/resolver"
|
iresolver "google.golang.org/grpc/internal/resolver"
|
||||||
"google.golang.org/grpc/internal/transport"
|
"google.golang.org/grpc/internal/transport"
|
||||||
"google.golang.org/grpc/keepalive"
|
"google.golang.org/grpc/keepalive"
|
||||||
|
@ -53,8 +56,6 @@ import (
|
||||||
const (
|
const (
|
||||||
// minimum time to give a connection to complete
|
// minimum time to give a connection to complete
|
||||||
minConnectTimeout = 20 * time.Second
|
minConnectTimeout = 20 * time.Second
|
||||||
// must match grpclbName in grpclb/grpclb.go
|
|
||||||
grpclbName = "grpclb"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -137,7 +138,6 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires
|
||||||
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
|
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
|
||||||
cc := &ClientConn{
|
cc := &ClientConn{
|
||||||
target: target,
|
target: target,
|
||||||
csMgr: &connectivityStateManager{},
|
|
||||||
conns: make(map[*addrConn]struct{}),
|
conns: make(map[*addrConn]struct{}),
|
||||||
dopts: defaultDialOptions(),
|
dopts: defaultDialOptions(),
|
||||||
czData: new(channelzData),
|
czData: new(channelzData),
|
||||||
|
@ -190,6 +190,8 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
||||||
// Register ClientConn with channelz.
|
// Register ClientConn with channelz.
|
||||||
cc.channelzRegistration(target)
|
cc.channelzRegistration(target)
|
||||||
|
|
||||||
|
cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID)
|
||||||
|
|
||||||
if err := cc.validateTransportCredentials(); err != nil {
|
if err := cc.validateTransportCredentials(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -265,7 +267,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
||||||
// Configure idleness support with configured idle timeout or default idle
|
// Configure idleness support with configured idle timeout or default idle
|
||||||
// timeout duration. Idleness can be explicitly disabled by the user, by
|
// timeout duration. Idleness can be explicitly disabled by the user, by
|
||||||
// setting the dial option to 0.
|
// setting the dial option to 0.
|
||||||
cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout)
|
cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger})
|
||||||
|
|
||||||
// Return early for non-blocking dials.
|
// Return early for non-blocking dials.
|
||||||
if !cc.dopts.block {
|
if !cc.dopts.block {
|
||||||
|
@ -316,6 +318,16 @@ func (cc *ClientConn) addTraceEvent(msg string) {
|
||||||
channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
|
channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type idler ClientConn
|
||||||
|
|
||||||
|
func (i *idler) EnterIdleMode() error {
|
||||||
|
return (*ClientConn)(i).enterIdleMode()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *idler) ExitIdleMode() error {
|
||||||
|
return (*ClientConn)(i).exitIdleMode()
|
||||||
|
}
|
||||||
|
|
||||||
// exitIdleMode moves the channel out of idle mode by recreating the name
|
// exitIdleMode moves the channel out of idle mode by recreating the name
|
||||||
// resolver and load balancer.
|
// resolver and load balancer.
|
||||||
func (cc *ClientConn) exitIdleMode() error {
|
func (cc *ClientConn) exitIdleMode() error {
|
||||||
|
@ -326,7 +338,7 @@ func (cc *ClientConn) exitIdleMode() error {
|
||||||
}
|
}
|
||||||
if cc.idlenessState != ccIdlenessStateIdle {
|
if cc.idlenessState != ccIdlenessStateIdle {
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
logger.Info("ClientConn asked to exit idle mode when not in idle mode")
|
channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -349,7 +361,7 @@ func (cc *ClientConn) exitIdleMode() error {
|
||||||
cc.idlenessState = ccIdlenessStateExitingIdle
|
cc.idlenessState = ccIdlenessStateExitingIdle
|
||||||
exitedIdle := false
|
exitedIdle := false
|
||||||
if cc.blockingpicker == nil {
|
if cc.blockingpicker == nil {
|
||||||
cc.blockingpicker = newPickerWrapper()
|
cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers)
|
||||||
} else {
|
} else {
|
||||||
cc.blockingpicker.exitIdleMode()
|
cc.blockingpicker.exitIdleMode()
|
||||||
exitedIdle = true
|
exitedIdle = true
|
||||||
|
@ -397,7 +409,8 @@ func (cc *ClientConn) enterIdleMode() error {
|
||||||
return ErrClientConnClosing
|
return ErrClientConnClosing
|
||||||
}
|
}
|
||||||
if cc.idlenessState != ccIdlenessStateActive {
|
if cc.idlenessState != ccIdlenessStateActive {
|
||||||
logger.Error("ClientConn asked to enter idle mode when not active")
|
channelz.Errorf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState)
|
||||||
|
cc.mu.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -474,7 +487,6 @@ func (cc *ClientConn) validateTransportCredentials() error {
|
||||||
func (cc *ClientConn) channelzRegistration(target string) {
|
func (cc *ClientConn) channelzRegistration(target string) {
|
||||||
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
|
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
|
||||||
cc.addTraceEvent("created")
|
cc.addTraceEvent("created")
|
||||||
cc.csMgr.channelzID = cc.channelzID
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// chainUnaryClientInterceptors chains all unary client interceptors into one.
|
// chainUnaryClientInterceptors chains all unary client interceptors into one.
|
||||||
|
@ -491,7 +503,7 @@ func chainUnaryClientInterceptors(cc *ClientConn) {
|
||||||
} else if len(interceptors) == 1 {
|
} else if len(interceptors) == 1 {
|
||||||
chainedInt = interceptors[0]
|
chainedInt = interceptors[0]
|
||||||
} else {
|
} else {
|
||||||
chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error {
|
chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error {
|
||||||
return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...)
|
return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -503,7 +515,7 @@ func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, final
|
||||||
if curr == len(interceptors)-1 {
|
if curr == len(interceptors)-1 {
|
||||||
return finalInvoker
|
return finalInvoker
|
||||||
}
|
}
|
||||||
return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
|
return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error {
|
||||||
return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...)
|
return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -539,13 +551,27 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// newConnectivityStateManager creates an connectivityStateManager with
|
||||||
|
// the specified id.
|
||||||
|
func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager {
|
||||||
|
return &connectivityStateManager{
|
||||||
|
channelzID: id,
|
||||||
|
pubSub: grpcsync.NewPubSub(ctx),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// connectivityStateManager keeps the connectivity.State of ClientConn.
|
// connectivityStateManager keeps the connectivity.State of ClientConn.
|
||||||
// This struct will eventually be exported so the balancers can access it.
|
// This struct will eventually be exported so the balancers can access it.
|
||||||
|
//
|
||||||
|
// TODO: If possible, get rid of the `connectivityStateManager` type, and
|
||||||
|
// provide this functionality using the `PubSub`, to avoid keeping track of
|
||||||
|
// the connectivity state at two places.
|
||||||
type connectivityStateManager struct {
|
type connectivityStateManager struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
state connectivity.State
|
state connectivity.State
|
||||||
notifyChan chan struct{}
|
notifyChan chan struct{}
|
||||||
channelzID *channelz.Identifier
|
channelzID *channelz.Identifier
|
||||||
|
pubSub *grpcsync.PubSub
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateState updates the connectivity.State of ClientConn.
|
// updateState updates the connectivity.State of ClientConn.
|
||||||
|
@ -561,6 +587,8 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
csm.state = state
|
csm.state = state
|
||||||
|
csm.pubSub.Publish(state)
|
||||||
|
|
||||||
channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state)
|
channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state)
|
||||||
if csm.notifyChan != nil {
|
if csm.notifyChan != nil {
|
||||||
// There are other goroutines waiting on this channel.
|
// There are other goroutines waiting on this channel.
|
||||||
|
@ -590,7 +618,7 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} {
|
||||||
type ClientConnInterface interface {
|
type ClientConnInterface interface {
|
||||||
// Invoke performs a unary RPC and returns after the response is received
|
// Invoke performs a unary RPC and returns after the response is received
|
||||||
// into reply.
|
// into reply.
|
||||||
Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error
|
Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error
|
||||||
// NewStream begins a streaming RPC.
|
// NewStream begins a streaming RPC.
|
||||||
NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error)
|
NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error)
|
||||||
}
|
}
|
||||||
|
@ -622,7 +650,7 @@ type ClientConn struct {
|
||||||
channelzID *channelz.Identifier // Channelz identifier for the channel.
|
channelzID *channelz.Identifier // Channelz identifier for the channel.
|
||||||
resolverBuilder resolver.Builder // See parseTargetAndFindResolver().
|
resolverBuilder resolver.Builder // See parseTargetAndFindResolver().
|
||||||
balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath.
|
balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath.
|
||||||
idlenessMgr idlenessManager
|
idlenessMgr idle.Manager
|
||||||
|
|
||||||
// The following provide their own synchronization, and therefore don't
|
// The following provide their own synchronization, and therefore don't
|
||||||
// require cc.mu to be held to access them.
|
// require cc.mu to be held to access them.
|
||||||
|
@ -668,6 +696,19 @@ const (
|
||||||
ccIdlenessStateExitingIdle
|
ccIdlenessStateExitingIdle
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func (s ccIdlenessState) String() string {
|
||||||
|
switch s {
|
||||||
|
case ccIdlenessStateActive:
|
||||||
|
return "active"
|
||||||
|
case ccIdlenessStateIdle:
|
||||||
|
return "idle"
|
||||||
|
case ccIdlenessStateExitingIdle:
|
||||||
|
return "exitingIdle"
|
||||||
|
default:
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
|
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
|
||||||
// ctx expires. A true value is returned in former case and false in latter.
|
// ctx expires. A true value is returned in former case and false in latter.
|
||||||
//
|
//
|
||||||
|
@ -759,6 +800,10 @@ func init() {
|
||||||
panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err))
|
panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err))
|
||||||
}
|
}
|
||||||
emptyServiceConfig = cfg.Config.(*ServiceConfig)
|
emptyServiceConfig = cfg.Config.(*ServiceConfig)
|
||||||
|
|
||||||
|
internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() {
|
||||||
|
return cc.csMgr.pubSub.Subscribe(s)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) {
|
func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) {
|
||||||
|
@ -867,6 +912,20 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi
|
||||||
cc.balancerWrapper.updateSubConnState(sc, s, err)
|
cc.balancerWrapper.updateSubConnState(sc, s, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Makes a copy of the input addresses slice and clears out the balancer
|
||||||
|
// attributes field. Addresses are passed during subconn creation and address
|
||||||
|
// update operations. In both cases, we will clear the balancer attributes by
|
||||||
|
// calling this function, and therefore we will be able to use the Equal method
|
||||||
|
// provided by the resolver.Address type for comparison.
|
||||||
|
func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address {
|
||||||
|
out := make([]resolver.Address, len(in))
|
||||||
|
for i := range in {
|
||||||
|
out[i] = in[i]
|
||||||
|
out[i].BalancerAttributes = nil
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
|
// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
|
||||||
//
|
//
|
||||||
// Caller needs to make sure len(addrs) > 0.
|
// Caller needs to make sure len(addrs) > 0.
|
||||||
|
@ -874,7 +933,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub
|
||||||
ac := &addrConn{
|
ac := &addrConn{
|
||||||
state: connectivity.Idle,
|
state: connectivity.Idle,
|
||||||
cc: cc,
|
cc: cc,
|
||||||
addrs: addrs,
|
addrs: copyAddressesWithoutBalancerAttributes(addrs),
|
||||||
scopts: opts,
|
scopts: opts,
|
||||||
dopts: cc.dopts,
|
dopts: cc.dopts,
|
||||||
czData: new(channelzData),
|
czData: new(channelzData),
|
||||||
|
@ -995,8 +1054,9 @@ func equalAddresses(a, b []resolver.Address) bool {
|
||||||
// connections or connection attempts.
|
// connections or connection attempts.
|
||||||
func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
|
func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
|
||||||
ac.mu.Lock()
|
ac.mu.Lock()
|
||||||
channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
|
channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs))
|
||||||
|
|
||||||
|
addrs = copyAddressesWithoutBalancerAttributes(addrs)
|
||||||
if equalAddresses(ac.addrs, addrs) {
|
if equalAddresses(ac.addrs, addrs) {
|
||||||
ac.mu.Unlock()
|
ac.mu.Unlock()
|
||||||
return
|
return
|
||||||
|
@ -1031,8 +1091,8 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
|
||||||
ac.cancel()
|
ac.cancel()
|
||||||
ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx)
|
ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx)
|
||||||
|
|
||||||
// We have to defer here because GracefulClose => Close => onClose, which
|
// We have to defer here because GracefulClose => onClose, which requires
|
||||||
// requires locking ac.mu.
|
// locking ac.mu.
|
||||||
if ac.transport != nil {
|
if ac.transport != nil {
|
||||||
defer ac.transport.GracefulClose()
|
defer ac.transport.GracefulClose()
|
||||||
ac.transport = nil
|
ac.transport = nil
|
||||||
|
@ -1137,23 +1197,13 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel
|
||||||
}
|
}
|
||||||
|
|
||||||
var newBalancerName string
|
var newBalancerName string
|
||||||
if cc.sc != nil && cc.sc.lbConfig != nil {
|
if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) {
|
||||||
|
// No service config or no LB policy specified in config.
|
||||||
|
newBalancerName = PickFirstBalancerName
|
||||||
|
} else if cc.sc.lbConfig != nil {
|
||||||
newBalancerName = cc.sc.lbConfig.name
|
newBalancerName = cc.sc.lbConfig.name
|
||||||
} else {
|
} else { // cc.sc.LB != nil
|
||||||
var isGRPCLB bool
|
newBalancerName = *cc.sc.LB
|
||||||
for _, a := range addrs {
|
|
||||||
if a.Type == resolver.GRPCLB {
|
|
||||||
isGRPCLB = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if isGRPCLB {
|
|
||||||
newBalancerName = grpclbName
|
|
||||||
} else if cc.sc != nil && cc.sc.LB != nil {
|
|
||||||
newBalancerName = *cc.sc.LB
|
|
||||||
} else {
|
|
||||||
newBalancerName = PickFirstBalancerName
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
cc.balancerWrapper.switchTo(newBalancerName)
|
cc.balancerWrapper.switchTo(newBalancerName)
|
||||||
}
|
}
|
||||||
|
@ -1192,7 +1242,10 @@ func (cc *ClientConn) ResetConnectBackoff() {
|
||||||
|
|
||||||
// Close tears down the ClientConn and all underlying connections.
|
// Close tears down the ClientConn and all underlying connections.
|
||||||
func (cc *ClientConn) Close() error {
|
func (cc *ClientConn) Close() error {
|
||||||
defer cc.cancel()
|
defer func() {
|
||||||
|
cc.cancel()
|
||||||
|
<-cc.csMgr.pubSub.Done()
|
||||||
|
}()
|
||||||
|
|
||||||
cc.mu.Lock()
|
cc.mu.Lock()
|
||||||
if cc.conns == nil {
|
if cc.conns == nil {
|
||||||
|
@ -1226,7 +1279,7 @@ func (cc *ClientConn) Close() error {
|
||||||
rWrapper.close()
|
rWrapper.close()
|
||||||
}
|
}
|
||||||
if idlenessMgr != nil {
|
if idlenessMgr != nil {
|
||||||
idlenessMgr.close()
|
idlenessMgr.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
for ac := range conns {
|
for ac := range conns {
|
||||||
|
@ -1336,12 +1389,14 @@ func (ac *addrConn) resetTransport() {
|
||||||
|
|
||||||
if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
|
if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
|
||||||
ac.cc.resolveNow(resolver.ResolveNowOptions{})
|
ac.cc.resolveNow(resolver.ResolveNowOptions{})
|
||||||
// After exhausting all addresses, the addrConn enters
|
ac.mu.Lock()
|
||||||
// TRANSIENT_FAILURE.
|
|
||||||
if acCtx.Err() != nil {
|
if acCtx.Err() != nil {
|
||||||
|
// addrConn was torn down.
|
||||||
|
ac.mu.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ac.mu.Lock()
|
// After exhausting all addresses, the addrConn enters
|
||||||
|
// TRANSIENT_FAILURE.
|
||||||
ac.updateConnectivityState(connectivity.TransientFailure, err)
|
ac.updateConnectivityState(connectivity.TransientFailure, err)
|
||||||
|
|
||||||
// Backoff.
|
// Backoff.
|
||||||
|
@ -1537,7 +1592,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
|
||||||
|
|
||||||
// Set up the health check helper functions.
|
// Set up the health check helper functions.
|
||||||
currentTr := ac.transport
|
currentTr := ac.transport
|
||||||
newStream := func(method string) (interface{}, error) {
|
newStream := func(method string) (any, error) {
|
||||||
ac.mu.Lock()
|
ac.mu.Lock()
|
||||||
if ac.transport != currentTr {
|
if ac.transport != currentTr {
|
||||||
ac.mu.Unlock()
|
ac.mu.Unlock()
|
||||||
|
@ -1625,16 +1680,7 @@ func (ac *addrConn) tearDown(err error) {
|
||||||
ac.updateConnectivityState(connectivity.Shutdown, nil)
|
ac.updateConnectivityState(connectivity.Shutdown, nil)
|
||||||
ac.cancel()
|
ac.cancel()
|
||||||
ac.curAddr = resolver.Address{}
|
ac.curAddr = resolver.Address{}
|
||||||
if err == errConnDrain && curTr != nil {
|
|
||||||
// GracefulClose(...) may be executed multiple times when
|
|
||||||
// i) receiving multiple GoAway frames from the server; or
|
|
||||||
// ii) there are concurrent name resolver/Balancer triggered
|
|
||||||
// address removal and GoAway.
|
|
||||||
// We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu.
|
|
||||||
ac.mu.Unlock()
|
|
||||||
curTr.GracefulClose()
|
|
||||||
ac.mu.Lock()
|
|
||||||
}
|
|
||||||
channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
|
channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
|
||||||
Desc: "Subchannel deleted",
|
Desc: "Subchannel deleted",
|
||||||
Severity: channelz.CtInfo,
|
Severity: channelz.CtInfo,
|
||||||
|
@ -1648,6 +1694,29 @@ func (ac *addrConn) tearDown(err error) {
|
||||||
// being deleted right away.
|
// being deleted right away.
|
||||||
channelz.RemoveEntry(ac.channelzID)
|
channelz.RemoveEntry(ac.channelzID)
|
||||||
ac.mu.Unlock()
|
ac.mu.Unlock()
|
||||||
|
|
||||||
|
// We have to release the lock before the call to GracefulClose/Close here
|
||||||
|
// because both of them call onClose(), which requires locking ac.mu.
|
||||||
|
if curTr != nil {
|
||||||
|
if err == errConnDrain {
|
||||||
|
// Close the transport gracefully when the subConn is being shutdown.
|
||||||
|
//
|
||||||
|
// GracefulClose() may be executed multiple times if:
|
||||||
|
// - multiple GoAway frames are received from the server
|
||||||
|
// - there are concurrent name resolver or balancer triggered
|
||||||
|
// address removal and GoAway
|
||||||
|
curTr.GracefulClose()
|
||||||
|
} else {
|
||||||
|
// Hard close the transport when the channel is entering idle or is
|
||||||
|
// being shutdown. In the case where the channel is being shutdown,
|
||||||
|
// closing of transports is also taken care of by cancelation of cc.ctx.
|
||||||
|
// But in the case where the channel is entering idle, we need to
|
||||||
|
// explicitly close the transports here. Instead of distinguishing
|
||||||
|
// between these two cases, it is simpler to close the transport
|
||||||
|
// unconditionally here.
|
||||||
|
curTr.Close(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ac *addrConn) getState() connectivity.State {
|
func (ac *addrConn) getState() connectivity.State {
|
||||||
|
@ -1807,19 +1876,70 @@ func (cc *ClientConn) parseTargetAndFindResolver() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseTarget uses RFC 3986 semantics to parse the given target into a
|
// parseTarget uses RFC 3986 semantics to parse the given target into a
|
||||||
// resolver.Target struct containing scheme, authority and url. Query
|
// resolver.Target struct containing url. Query params are stripped from the
|
||||||
// params are stripped from the endpoint.
|
// endpoint.
|
||||||
func parseTarget(target string) (resolver.Target, error) {
|
func parseTarget(target string) (resolver.Target, error) {
|
||||||
u, err := url.Parse(target)
|
u, err := url.Parse(target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resolver.Target{}, err
|
return resolver.Target{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return resolver.Target{
|
return resolver.Target{URL: *u}, nil
|
||||||
Scheme: u.Scheme,
|
}
|
||||||
Authority: u.Host,
|
|
||||||
URL: *u,
|
func encodeAuthority(authority string) string {
|
||||||
}, nil
|
const upperhex = "0123456789ABCDEF"
|
||||||
|
|
||||||
|
// Return for characters that must be escaped as per
|
||||||
|
// Valid chars are mentioned here:
|
||||||
|
// https://datatracker.ietf.org/doc/html/rfc3986#section-3.2
|
||||||
|
shouldEscape := func(c byte) bool {
|
||||||
|
// Alphanum are always allowed.
|
||||||
|
if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
switch c {
|
||||||
|
case '-', '_', '.', '~': // Unreserved characters
|
||||||
|
return false
|
||||||
|
case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters
|
||||||
|
return false
|
||||||
|
case ':', '[', ']', '@': // Authority related delimeters
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Everything else must be escaped.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
hexCount := 0
|
||||||
|
for i := 0; i < len(authority); i++ {
|
||||||
|
c := authority[i]
|
||||||
|
if shouldEscape(c) {
|
||||||
|
hexCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if hexCount == 0 {
|
||||||
|
return authority
|
||||||
|
}
|
||||||
|
|
||||||
|
required := len(authority) + 2*hexCount
|
||||||
|
t := make([]byte, required)
|
||||||
|
|
||||||
|
j := 0
|
||||||
|
// This logic is a barebones version of escape in the go net/url library.
|
||||||
|
for i := 0; i < len(authority); i++ {
|
||||||
|
switch c := authority[i]; {
|
||||||
|
case shouldEscape(c):
|
||||||
|
t[j] = '%'
|
||||||
|
t[j+1] = upperhex[c>>4]
|
||||||
|
t[j+2] = upperhex[c&15]
|
||||||
|
j += 3
|
||||||
|
default:
|
||||||
|
t[j] = authority[i]
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return string(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine channel authority. The order of precedence is as follows:
|
// Determine channel authority. The order of precedence is as follows:
|
||||||
|
@ -1872,7 +1992,11 @@ func (cc *ClientConn) determineAuthority() error {
|
||||||
// the channel authority given the user's dial target. For resolvers
|
// the channel authority given the user's dial target. For resolvers
|
||||||
// which don't implement this interface, we will use the endpoint from
|
// which don't implement this interface, we will use the endpoint from
|
||||||
// "scheme://authority/endpoint" as the default authority.
|
// "scheme://authority/endpoint" as the default authority.
|
||||||
cc.authority = endpoint
|
// Escape the endpoint to handle use cases where the endpoint
|
||||||
|
// might not be a valid authority by default.
|
||||||
|
// For example an endpoint which has multiple paths like
|
||||||
|
// 'a/b/c', which is not a valid authority by default.
|
||||||
|
cc.authority = encodeAuthority(endpoint)
|
||||||
}
|
}
|
||||||
channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
|
channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -27,8 +27,8 @@ import (
|
||||||
// omits the name/string, which vary between the two and are not needed for
|
// omits the name/string, which vary between the two and are not needed for
|
||||||
// anything besides the registry in the encoding package.
|
// anything besides the registry in the encoding package.
|
||||||
type baseCodec interface {
|
type baseCodec interface {
|
||||||
Marshal(v interface{}) ([]byte, error)
|
Marshal(v any) ([]byte, error)
|
||||||
Unmarshal(data []byte, v interface{}) error
|
Unmarshal(data []byte, v any) error
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ baseCodec = Codec(nil)
|
var _ baseCodec = Codec(nil)
|
||||||
|
@ -41,9 +41,9 @@ var _ baseCodec = encoding.Codec(nil)
|
||||||
// Deprecated: use encoding.Codec instead.
|
// Deprecated: use encoding.Codec instead.
|
||||||
type Codec interface {
|
type Codec interface {
|
||||||
// Marshal returns the wire format of v.
|
// Marshal returns the wire format of v.
|
||||||
Marshal(v interface{}) ([]byte, error)
|
Marshal(v any) ([]byte, error)
|
||||||
// Unmarshal parses the wire format into v.
|
// Unmarshal parses the wire format into v.
|
||||||
Unmarshal(data []byte, v interface{}) error
|
Unmarshal(data []byte, v any) error
|
||||||
// String returns the name of the Codec implementation. This is unused by
|
// String returns the name of the Codec implementation. This is unused by
|
||||||
// gRPC.
|
// gRPC.
|
||||||
String() string
|
String() string
|
||||||
|
|
|
@ -78,6 +78,7 @@ type dialOptions struct {
|
||||||
defaultServiceConfigRawJSON *string
|
defaultServiceConfigRawJSON *string
|
||||||
resolvers []resolver.Builder
|
resolvers []resolver.Builder
|
||||||
idleTimeout time.Duration
|
idleTimeout time.Duration
|
||||||
|
recvBufferPool SharedBufferPool
|
||||||
}
|
}
|
||||||
|
|
||||||
// DialOption configures how we set up the connection.
|
// DialOption configures how we set up the connection.
|
||||||
|
@ -138,6 +139,20 @@ func newJoinDialOption(opts ...DialOption) DialOption {
|
||||||
return &joinDialOption{opts: opts}
|
return &joinDialOption{opts: opts}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithSharedWriteBuffer allows reusing per-connection transport write buffer.
|
||||||
|
// If this option is set to true every connection will release the buffer after
|
||||||
|
// flushing the data on the wire.
|
||||||
|
//
|
||||||
|
// # Experimental
|
||||||
|
//
|
||||||
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||||
|
// later release.
|
||||||
|
func WithSharedWriteBuffer(val bool) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.copts.SharedWriteBuffer = val
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// WithWriteBufferSize determines how much data can be batched before doing a
|
// WithWriteBufferSize determines how much data can be batched before doing a
|
||||||
// write on the wire. The corresponding memory allocation for this buffer will
|
// write on the wire. The corresponding memory allocation for this buffer will
|
||||||
// be twice the size to keep syscalls low. The default value for this buffer is
|
// be twice the size to keep syscalls low. The default value for this buffer is
|
||||||
|
@ -628,6 +643,7 @@ func defaultDialOptions() dialOptions {
|
||||||
ReadBufferSize: defaultReadBufSize,
|
ReadBufferSize: defaultReadBufSize,
|
||||||
UseProxy: true,
|
UseProxy: true,
|
||||||
},
|
},
|
||||||
|
recvBufferPool: nopBufferPool{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -676,3 +692,24 @@ func WithIdleTimeout(d time.Duration) DialOption {
|
||||||
o.idleTimeout = d
|
o.idleTimeout = d
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithRecvBufferPool returns a DialOption that configures the ClientConn
|
||||||
|
// to use the provided shared buffer pool for parsing incoming messages. Depending
|
||||||
|
// on the application's workload, this could result in reduced memory allocation.
|
||||||
|
//
|
||||||
|
// If you are unsure about how to implement a memory pool but want to utilize one,
|
||||||
|
// begin with grpc.NewSharedBufferPool.
|
||||||
|
//
|
||||||
|
// Note: The shared buffer pool feature will not be active if any of the following
|
||||||
|
// options are used: WithStatsHandler, EnableTracing, or binary logging. In such
|
||||||
|
// cases, the shared buffer pool will be ignored.
|
||||||
|
//
|
||||||
|
// # Experimental
|
||||||
|
//
|
||||||
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||||
|
// later release.
|
||||||
|
func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.recvBufferPool = bufferPool
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
|
@ -90,9 +90,9 @@ func GetCompressor(name string) Compressor {
|
||||||
// methods can be called from concurrent goroutines.
|
// methods can be called from concurrent goroutines.
|
||||||
type Codec interface {
|
type Codec interface {
|
||||||
// Marshal returns the wire format of v.
|
// Marshal returns the wire format of v.
|
||||||
Marshal(v interface{}) ([]byte, error)
|
Marshal(v any) ([]byte, error)
|
||||||
// Unmarshal parses the wire format into v.
|
// Unmarshal parses the wire format into v.
|
||||||
Unmarshal(data []byte, v interface{}) error
|
Unmarshal(data []byte, v any) error
|
||||||
// Name returns the name of the Codec implementation. The returned string
|
// Name returns the name of the Codec implementation. The returned string
|
||||||
// will be used as part of content type in transmission. The result must be
|
// will be used as part of content type in transmission. The result must be
|
||||||
// static; the result cannot change between calls.
|
// static; the result cannot change between calls.
|
||||||
|
|
|
@ -37,7 +37,7 @@ func init() {
|
||||||
// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
|
// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
|
||||||
type codec struct{}
|
type codec struct{}
|
||||||
|
|
||||||
func (codec) Marshal(v interface{}) ([]byte, error) {
|
func (codec) Marshal(v any) ([]byte, error) {
|
||||||
vv, ok := v.(proto.Message)
|
vv, ok := v.(proto.Message)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
|
return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
|
||||||
|
@ -45,7 +45,7 @@ func (codec) Marshal(v interface{}) ([]byte, error) {
|
||||||
return proto.Marshal(vv)
|
return proto.Marshal(vv)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (codec) Unmarshal(data []byte, v interface{}) error {
|
func (codec) Unmarshal(data []byte, v any) error {
|
||||||
vv, ok := v.(proto.Message)
|
vv, ok := v.(proto.Message)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
|
return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
|
||||||
|
|
|
@ -31,71 +31,71 @@ type componentData struct {
|
||||||
|
|
||||||
var cache = map[string]*componentData{}
|
var cache = map[string]*componentData{}
|
||||||
|
|
||||||
func (c *componentData) InfoDepth(depth int, args ...interface{}) {
|
func (c *componentData) InfoDepth(depth int, args ...any) {
|
||||||
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
|
args = append([]any{"[" + string(c.name) + "]"}, args...)
|
||||||
grpclog.InfoDepth(depth+1, args...)
|
grpclog.InfoDepth(depth+1, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *componentData) WarningDepth(depth int, args ...interface{}) {
|
func (c *componentData) WarningDepth(depth int, args ...any) {
|
||||||
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
|
args = append([]any{"[" + string(c.name) + "]"}, args...)
|
||||||
grpclog.WarningDepth(depth+1, args...)
|
grpclog.WarningDepth(depth+1, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *componentData) ErrorDepth(depth int, args ...interface{}) {
|
func (c *componentData) ErrorDepth(depth int, args ...any) {
|
||||||
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
|
args = append([]any{"[" + string(c.name) + "]"}, args...)
|
||||||
grpclog.ErrorDepth(depth+1, args...)
|
grpclog.ErrorDepth(depth+1, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *componentData) FatalDepth(depth int, args ...interface{}) {
|
func (c *componentData) FatalDepth(depth int, args ...any) {
|
||||||
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
|
args = append([]any{"[" + string(c.name) + "]"}, args...)
|
||||||
grpclog.FatalDepth(depth+1, args...)
|
grpclog.FatalDepth(depth+1, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *componentData) Info(args ...interface{}) {
|
func (c *componentData) Info(args ...any) {
|
||||||
c.InfoDepth(1, args...)
|
c.InfoDepth(1, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *componentData) Warning(args ...interface{}) {
|
func (c *componentData) Warning(args ...any) {
|
||||||
c.WarningDepth(1, args...)
|
c.WarningDepth(1, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *componentData) Error(args ...interface{}) {
|
func (c *componentData) Error(args ...any) {
|
||||||
c.ErrorDepth(1, args...)
|
c.ErrorDepth(1, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *componentData) Fatal(args ...interface{}) {
|
func (c *componentData) Fatal(args ...any) {
|
||||||
c.FatalDepth(1, args...)
|
c.FatalDepth(1, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *componentData) Infof(format string, args ...interface{}) {
|
func (c *componentData) Infof(format string, args ...any) {
|
||||||
c.InfoDepth(1, fmt.Sprintf(format, args...))
|
c.InfoDepth(1, fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *componentData) Warningf(format string, args ...interface{}) {
|
func (c *componentData) Warningf(format string, args ...any) {
|
||||||
c.WarningDepth(1, fmt.Sprintf(format, args...))
|
c.WarningDepth(1, fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *componentData) Errorf(format string, args ...interface{}) {
|
func (c *componentData) Errorf(format string, args ...any) {
|
||||||
c.ErrorDepth(1, fmt.Sprintf(format, args...))
|
c.ErrorDepth(1, fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *componentData) Fatalf(format string, args ...interface{}) {
|
func (c *componentData) Fatalf(format string, args ...any) {
|
||||||
c.FatalDepth(1, fmt.Sprintf(format, args...))
|
c.FatalDepth(1, fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *componentData) Infoln(args ...interface{}) {
|
func (c *componentData) Infoln(args ...any) {
|
||||||
c.InfoDepth(1, args...)
|
c.InfoDepth(1, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *componentData) Warningln(args ...interface{}) {
|
func (c *componentData) Warningln(args ...any) {
|
||||||
c.WarningDepth(1, args...)
|
c.WarningDepth(1, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *componentData) Errorln(args ...interface{}) {
|
func (c *componentData) Errorln(args ...any) {
|
||||||
c.ErrorDepth(1, args...)
|
c.ErrorDepth(1, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *componentData) Fatalln(args ...interface{}) {
|
func (c *componentData) Fatalln(args ...any) {
|
||||||
c.FatalDepth(1, args...)
|
c.FatalDepth(1, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,53 +42,53 @@ func V(l int) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Info logs to the INFO log.
|
// Info logs to the INFO log.
|
||||||
func Info(args ...interface{}) {
|
func Info(args ...any) {
|
||||||
grpclog.Logger.Info(args...)
|
grpclog.Logger.Info(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
|
// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
|
||||||
func Infof(format string, args ...interface{}) {
|
func Infof(format string, args ...any) {
|
||||||
grpclog.Logger.Infof(format, args...)
|
grpclog.Logger.Infof(format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
|
// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
|
||||||
func Infoln(args ...interface{}) {
|
func Infoln(args ...any) {
|
||||||
grpclog.Logger.Infoln(args...)
|
grpclog.Logger.Infoln(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warning logs to the WARNING log.
|
// Warning logs to the WARNING log.
|
||||||
func Warning(args ...interface{}) {
|
func Warning(args ...any) {
|
||||||
grpclog.Logger.Warning(args...)
|
grpclog.Logger.Warning(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
|
// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
|
||||||
func Warningf(format string, args ...interface{}) {
|
func Warningf(format string, args ...any) {
|
||||||
grpclog.Logger.Warningf(format, args...)
|
grpclog.Logger.Warningf(format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
|
// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
|
||||||
func Warningln(args ...interface{}) {
|
func Warningln(args ...any) {
|
||||||
grpclog.Logger.Warningln(args...)
|
grpclog.Logger.Warningln(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error logs to the ERROR log.
|
// Error logs to the ERROR log.
|
||||||
func Error(args ...interface{}) {
|
func Error(args ...any) {
|
||||||
grpclog.Logger.Error(args...)
|
grpclog.Logger.Error(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
|
// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||||
func Errorf(format string, args ...interface{}) {
|
func Errorf(format string, args ...any) {
|
||||||
grpclog.Logger.Errorf(format, args...)
|
grpclog.Logger.Errorf(format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
|
// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||||
func Errorln(args ...interface{}) {
|
func Errorln(args ...any) {
|
||||||
grpclog.Logger.Errorln(args...)
|
grpclog.Logger.Errorln(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
|
// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
|
||||||
// It calls os.Exit() with exit code 1.
|
// It calls os.Exit() with exit code 1.
|
||||||
func Fatal(args ...interface{}) {
|
func Fatal(args ...any) {
|
||||||
grpclog.Logger.Fatal(args...)
|
grpclog.Logger.Fatal(args...)
|
||||||
// Make sure fatal logs will exit.
|
// Make sure fatal logs will exit.
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
@ -96,7 +96,7 @@ func Fatal(args ...interface{}) {
|
||||||
|
|
||||||
// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
|
// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
|
||||||
// It calls os.Exit() with exit code 1.
|
// It calls os.Exit() with exit code 1.
|
||||||
func Fatalf(format string, args ...interface{}) {
|
func Fatalf(format string, args ...any) {
|
||||||
grpclog.Logger.Fatalf(format, args...)
|
grpclog.Logger.Fatalf(format, args...)
|
||||||
// Make sure fatal logs will exit.
|
// Make sure fatal logs will exit.
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
@ -104,7 +104,7 @@ func Fatalf(format string, args ...interface{}) {
|
||||||
|
|
||||||
// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
|
// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
|
||||||
// It calle os.Exit()) with exit code 1.
|
// It calle os.Exit()) with exit code 1.
|
||||||
func Fatalln(args ...interface{}) {
|
func Fatalln(args ...any) {
|
||||||
grpclog.Logger.Fatalln(args...)
|
grpclog.Logger.Fatalln(args...)
|
||||||
// Make sure fatal logs will exit.
|
// Make sure fatal logs will exit.
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
@ -113,20 +113,20 @@ func Fatalln(args ...interface{}) {
|
||||||
// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
|
// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
|
||||||
//
|
//
|
||||||
// Deprecated: use Info.
|
// Deprecated: use Info.
|
||||||
func Print(args ...interface{}) {
|
func Print(args ...any) {
|
||||||
grpclog.Logger.Info(args...)
|
grpclog.Logger.Info(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
|
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
|
||||||
//
|
//
|
||||||
// Deprecated: use Infof.
|
// Deprecated: use Infof.
|
||||||
func Printf(format string, args ...interface{}) {
|
func Printf(format string, args ...any) {
|
||||||
grpclog.Logger.Infof(format, args...)
|
grpclog.Logger.Infof(format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
|
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
|
||||||
//
|
//
|
||||||
// Deprecated: use Infoln.
|
// Deprecated: use Infoln.
|
||||||
func Println(args ...interface{}) {
|
func Println(args ...any) {
|
||||||
grpclog.Logger.Infoln(args...)
|
grpclog.Logger.Infoln(args...)
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,12 +24,12 @@ import "google.golang.org/grpc/internal/grpclog"
|
||||||
//
|
//
|
||||||
// Deprecated: use LoggerV2.
|
// Deprecated: use LoggerV2.
|
||||||
type Logger interface {
|
type Logger interface {
|
||||||
Fatal(args ...interface{})
|
Fatal(args ...any)
|
||||||
Fatalf(format string, args ...interface{})
|
Fatalf(format string, args ...any)
|
||||||
Fatalln(args ...interface{})
|
Fatalln(args ...any)
|
||||||
Print(args ...interface{})
|
Print(args ...any)
|
||||||
Printf(format string, args ...interface{})
|
Printf(format string, args ...any)
|
||||||
Println(args ...interface{})
|
Println(args ...any)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetLogger sets the logger that is used in grpc. Call only from
|
// SetLogger sets the logger that is used in grpc. Call only from
|
||||||
|
@ -45,39 +45,39 @@ type loggerWrapper struct {
|
||||||
Logger
|
Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerWrapper) Info(args ...interface{}) {
|
func (g *loggerWrapper) Info(args ...any) {
|
||||||
g.Logger.Print(args...)
|
g.Logger.Print(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerWrapper) Infoln(args ...interface{}) {
|
func (g *loggerWrapper) Infoln(args ...any) {
|
||||||
g.Logger.Println(args...)
|
g.Logger.Println(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerWrapper) Infof(format string, args ...interface{}) {
|
func (g *loggerWrapper) Infof(format string, args ...any) {
|
||||||
g.Logger.Printf(format, args...)
|
g.Logger.Printf(format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerWrapper) Warning(args ...interface{}) {
|
func (g *loggerWrapper) Warning(args ...any) {
|
||||||
g.Logger.Print(args...)
|
g.Logger.Print(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerWrapper) Warningln(args ...interface{}) {
|
func (g *loggerWrapper) Warningln(args ...any) {
|
||||||
g.Logger.Println(args...)
|
g.Logger.Println(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerWrapper) Warningf(format string, args ...interface{}) {
|
func (g *loggerWrapper) Warningf(format string, args ...any) {
|
||||||
g.Logger.Printf(format, args...)
|
g.Logger.Printf(format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerWrapper) Error(args ...interface{}) {
|
func (g *loggerWrapper) Error(args ...any) {
|
||||||
g.Logger.Print(args...)
|
g.Logger.Print(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerWrapper) Errorln(args ...interface{}) {
|
func (g *loggerWrapper) Errorln(args ...any) {
|
||||||
g.Logger.Println(args...)
|
g.Logger.Println(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerWrapper) Errorf(format string, args ...interface{}) {
|
func (g *loggerWrapper) Errorf(format string, args ...any) {
|
||||||
g.Logger.Printf(format, args...)
|
g.Logger.Printf(format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,35 +33,35 @@ import (
|
||||||
// LoggerV2 does underlying logging work for grpclog.
|
// LoggerV2 does underlying logging work for grpclog.
|
||||||
type LoggerV2 interface {
|
type LoggerV2 interface {
|
||||||
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
|
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
|
||||||
Info(args ...interface{})
|
Info(args ...any)
|
||||||
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
|
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
|
||||||
Infoln(args ...interface{})
|
Infoln(args ...any)
|
||||||
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
|
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
|
||||||
Infof(format string, args ...interface{})
|
Infof(format string, args ...any)
|
||||||
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
|
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
|
||||||
Warning(args ...interface{})
|
Warning(args ...any)
|
||||||
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
|
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
|
||||||
Warningln(args ...interface{})
|
Warningln(args ...any)
|
||||||
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
|
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
|
||||||
Warningf(format string, args ...interface{})
|
Warningf(format string, args ...any)
|
||||||
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
||||||
Error(args ...interface{})
|
Error(args ...any)
|
||||||
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||||
Errorln(args ...interface{})
|
Errorln(args ...any)
|
||||||
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||||
Errorf(format string, args ...interface{})
|
Errorf(format string, args ...any)
|
||||||
// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
||||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||||
Fatal(args ...interface{})
|
Fatal(args ...any)
|
||||||
// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||||
Fatalln(args ...interface{})
|
Fatalln(args ...any)
|
||||||
// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||||
Fatalf(format string, args ...interface{})
|
Fatalf(format string, args ...any)
|
||||||
// V reports whether verbosity level l is at least the requested verbose level.
|
// V reports whether verbosity level l is at least the requested verbose level.
|
||||||
V(l int) bool
|
V(l int) bool
|
||||||
}
|
}
|
||||||
|
@ -182,53 +182,53 @@ func (g *loggerT) output(severity int, s string) {
|
||||||
g.m[severity].Output(2, string(b))
|
g.m[severity].Output(2, string(b))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Info(args ...interface{}) {
|
func (g *loggerT) Info(args ...any) {
|
||||||
g.output(infoLog, fmt.Sprint(args...))
|
g.output(infoLog, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Infoln(args ...interface{}) {
|
func (g *loggerT) Infoln(args ...any) {
|
||||||
g.output(infoLog, fmt.Sprintln(args...))
|
g.output(infoLog, fmt.Sprintln(args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Infof(format string, args ...interface{}) {
|
func (g *loggerT) Infof(format string, args ...any) {
|
||||||
g.output(infoLog, fmt.Sprintf(format, args...))
|
g.output(infoLog, fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Warning(args ...interface{}) {
|
func (g *loggerT) Warning(args ...any) {
|
||||||
g.output(warningLog, fmt.Sprint(args...))
|
g.output(warningLog, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Warningln(args ...interface{}) {
|
func (g *loggerT) Warningln(args ...any) {
|
||||||
g.output(warningLog, fmt.Sprintln(args...))
|
g.output(warningLog, fmt.Sprintln(args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Warningf(format string, args ...interface{}) {
|
func (g *loggerT) Warningf(format string, args ...any) {
|
||||||
g.output(warningLog, fmt.Sprintf(format, args...))
|
g.output(warningLog, fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Error(args ...interface{}) {
|
func (g *loggerT) Error(args ...any) {
|
||||||
g.output(errorLog, fmt.Sprint(args...))
|
g.output(errorLog, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Errorln(args ...interface{}) {
|
func (g *loggerT) Errorln(args ...any) {
|
||||||
g.output(errorLog, fmt.Sprintln(args...))
|
g.output(errorLog, fmt.Sprintln(args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Errorf(format string, args ...interface{}) {
|
func (g *loggerT) Errorf(format string, args ...any) {
|
||||||
g.output(errorLog, fmt.Sprintf(format, args...))
|
g.output(errorLog, fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Fatal(args ...interface{}) {
|
func (g *loggerT) Fatal(args ...any) {
|
||||||
g.output(fatalLog, fmt.Sprint(args...))
|
g.output(fatalLog, fmt.Sprint(args...))
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Fatalln(args ...interface{}) {
|
func (g *loggerT) Fatalln(args ...any) {
|
||||||
g.output(fatalLog, fmt.Sprintln(args...))
|
g.output(fatalLog, fmt.Sprintln(args...))
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Fatalf(format string, args ...interface{}) {
|
func (g *loggerT) Fatalf(format string, args ...any) {
|
||||||
g.output(fatalLog, fmt.Sprintf(format, args...))
|
g.output(fatalLog, fmt.Sprintf(format, args...))
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
@ -248,11 +248,11 @@ func (g *loggerT) V(l int) bool {
|
||||||
type DepthLoggerV2 interface {
|
type DepthLoggerV2 interface {
|
||||||
LoggerV2
|
LoggerV2
|
||||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||||
InfoDepth(depth int, args ...interface{})
|
InfoDepth(depth int, args ...any)
|
||||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||||
WarningDepth(depth int, args ...interface{})
|
WarningDepth(depth int, args ...any)
|
||||||
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||||
ErrorDepth(depth int, args ...interface{})
|
ErrorDepth(depth int, args ...any)
|
||||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||||
FatalDepth(depth int, args ...interface{})
|
FatalDepth(depth int, args ...any)
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
|
// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
|
||||||
type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error
|
type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error
|
||||||
|
|
||||||
// UnaryClientInterceptor intercepts the execution of a unary RPC on the client.
|
// UnaryClientInterceptor intercepts the execution of a unary RPC on the client.
|
||||||
// Unary interceptors can be specified as a DialOption, using
|
// Unary interceptors can be specified as a DialOption, using
|
||||||
|
@ -40,7 +40,7 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{
|
||||||
// defaults from the ClientConn as well as per-call options.
|
// defaults from the ClientConn as well as per-call options.
|
||||||
//
|
//
|
||||||
// The returned error must be compatible with the status package.
|
// The returned error must be compatible with the status package.
|
||||||
type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
|
type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
|
||||||
|
|
||||||
// Streamer is called by StreamClientInterceptor to create a ClientStream.
|
// Streamer is called by StreamClientInterceptor to create a ClientStream.
|
||||||
type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error)
|
type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error)
|
||||||
|
@ -66,7 +66,7 @@ type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *Cli
|
||||||
// server side. All per-rpc information may be mutated by the interceptor.
|
// server side. All per-rpc information may be mutated by the interceptor.
|
||||||
type UnaryServerInfo struct {
|
type UnaryServerInfo struct {
|
||||||
// Server is the service implementation the user provides. This is read-only.
|
// Server is the service implementation the user provides. This is read-only.
|
||||||
Server interface{}
|
Server any
|
||||||
// FullMethod is the full RPC method string, i.e., /package.service/method.
|
// FullMethod is the full RPC method string, i.e., /package.service/method.
|
||||||
FullMethod string
|
FullMethod string
|
||||||
}
|
}
|
||||||
|
@ -78,13 +78,13 @@ type UnaryServerInfo struct {
|
||||||
// status package, or be one of the context errors. Otherwise, gRPC will use
|
// status package, or be one of the context errors. Otherwise, gRPC will use
|
||||||
// codes.Unknown as the status code and err.Error() as the status message of the
|
// codes.Unknown as the status code and err.Error() as the status message of the
|
||||||
// RPC.
|
// RPC.
|
||||||
type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
|
type UnaryHandler func(ctx context.Context, req any) (any, error)
|
||||||
|
|
||||||
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
|
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
|
||||||
// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper
|
// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper
|
||||||
// of the service method implementation. It is the responsibility of the interceptor to invoke handler
|
// of the service method implementation. It is the responsibility of the interceptor to invoke handler
|
||||||
// to complete the RPC.
|
// to complete the RPC.
|
||||||
type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error)
|
type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error)
|
||||||
|
|
||||||
// StreamServerInfo consists of various information about a streaming RPC on
|
// StreamServerInfo consists of various information about a streaming RPC on
|
||||||
// server side. All per-rpc information may be mutated by the interceptor.
|
// server side. All per-rpc information may be mutated by the interceptor.
|
||||||
|
@ -101,4 +101,4 @@ type StreamServerInfo struct {
|
||||||
// info contains all the information of this RPC the interceptor can operate on. And handler is the
|
// info contains all the information of this RPC the interceptor can operate on. And handler is the
|
||||||
// service method implementation. It is the responsibility of the interceptor to invoke handler to
|
// service method implementation. It is the responsibility of the interceptor to invoke handler to
|
||||||
// complete the RPC.
|
// complete the RPC.
|
||||||
type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error
|
type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error
|
||||||
|
|
59
vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
generated
vendored
59
vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
generated
vendored
|
@ -200,8 +200,8 @@ func (gsb *Balancer) ExitIdle() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateSubConnState forwards the update to the appropriate child.
|
// updateSubConnState forwards the update to the appropriate child.
|
||||||
func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) {
|
||||||
gsb.currentMu.Lock()
|
gsb.currentMu.Lock()
|
||||||
defer gsb.currentMu.Unlock()
|
defer gsb.currentMu.Unlock()
|
||||||
gsb.mu.Lock()
|
gsb.mu.Lock()
|
||||||
|
@ -214,13 +214,26 @@ func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubC
|
||||||
} else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] {
|
} else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] {
|
||||||
balToUpdate = gsb.balancerPending
|
balToUpdate = gsb.balancerPending
|
||||||
}
|
}
|
||||||
gsb.mu.Unlock()
|
|
||||||
if balToUpdate == nil {
|
if balToUpdate == nil {
|
||||||
// SubConn belonged to a stale lb policy that has not yet fully closed,
|
// SubConn belonged to a stale lb policy that has not yet fully closed,
|
||||||
// or the balancer was already closed.
|
// or the balancer was already closed.
|
||||||
|
gsb.mu.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
balToUpdate.UpdateSubConnState(sc, state)
|
if state.ConnectivityState == connectivity.Shutdown {
|
||||||
|
delete(balToUpdate.subconns, sc)
|
||||||
|
}
|
||||||
|
gsb.mu.Unlock()
|
||||||
|
if cb != nil {
|
||||||
|
cb(state)
|
||||||
|
} else {
|
||||||
|
balToUpdate.UpdateSubConnState(sc, state)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSubConnState forwards the update to the appropriate child.
|
||||||
|
func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||||
|
gsb.updateSubConnState(sc, state, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes any active child balancers.
|
// Close closes any active child balancers.
|
||||||
|
@ -242,7 +255,7 @@ func (gsb *Balancer) Close() {
|
||||||
//
|
//
|
||||||
// It implements the balancer.ClientConn interface and is passed down in that
|
// It implements the balancer.ClientConn interface and is passed down in that
|
||||||
// capacity to the wrapped balancer. It maintains a set of subConns created by
|
// capacity to the wrapped balancer. It maintains a set of subConns created by
|
||||||
// the wrapped balancer and calls from the latter to create/update/remove
|
// the wrapped balancer and calls from the latter to create/update/shutdown
|
||||||
// SubConns update this set before being forwarded to the parent ClientConn.
|
// SubConns update this set before being forwarded to the parent ClientConn.
|
||||||
// State updates from the wrapped balancer can result in invocation of the
|
// State updates from the wrapped balancer can result in invocation of the
|
||||||
// graceful switch logic.
|
// graceful switch logic.
|
||||||
|
@ -254,21 +267,10 @@ type balancerWrapper struct {
|
||||||
subconns map[balancer.SubConn]bool // subconns created by this balancer
|
subconns map[balancer.SubConn]bool // subconns created by this balancer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
// Close closes the underlying LB policy and shuts down the subconns it
|
||||||
if state.ConnectivityState == connectivity.Shutdown {
|
// created. bw must not be referenced via balancerCurrent or balancerPending in
|
||||||
bw.gsb.mu.Lock()
|
// gsb when called. gsb.mu must not be held. Does not panic with a nil
|
||||||
delete(bw.subconns, sc)
|
// receiver.
|
||||||
bw.gsb.mu.Unlock()
|
|
||||||
}
|
|
||||||
// There is no need to protect this read with a mutex, as the write to the
|
|
||||||
// Balancer field happens in SwitchTo, which completes before this can be
|
|
||||||
// called.
|
|
||||||
bw.Balancer.UpdateSubConnState(sc, state)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the underlying LB policy and removes the subconns it created. bw
|
|
||||||
// must not be referenced via balancerCurrent or balancerPending in gsb when
|
|
||||||
// called. gsb.mu must not be held. Does not panic with a nil receiver.
|
|
||||||
func (bw *balancerWrapper) Close() {
|
func (bw *balancerWrapper) Close() {
|
||||||
// before Close is called.
|
// before Close is called.
|
||||||
if bw == nil {
|
if bw == nil {
|
||||||
|
@ -281,7 +283,7 @@ func (bw *balancerWrapper) Close() {
|
||||||
bw.Balancer.Close()
|
bw.Balancer.Close()
|
||||||
bw.gsb.mu.Lock()
|
bw.gsb.mu.Lock()
|
||||||
for sc := range bw.subconns {
|
for sc := range bw.subconns {
|
||||||
bw.gsb.cc.RemoveSubConn(sc)
|
sc.Shutdown()
|
||||||
}
|
}
|
||||||
bw.gsb.mu.Unlock()
|
bw.gsb.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
@ -335,13 +337,16 @@ func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.Ne
|
||||||
}
|
}
|
||||||
bw.gsb.mu.Unlock()
|
bw.gsb.mu.Unlock()
|
||||||
|
|
||||||
|
var sc balancer.SubConn
|
||||||
|
oldListener := opts.StateListener
|
||||||
|
opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) }
|
||||||
sc, err := bw.gsb.cc.NewSubConn(addrs, opts)
|
sc, err := bw.gsb.cc.NewSubConn(addrs, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
bw.gsb.mu.Lock()
|
bw.gsb.mu.Lock()
|
||||||
if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call
|
if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call
|
||||||
bw.gsb.cc.RemoveSubConn(sc)
|
sc.Shutdown()
|
||||||
bw.gsb.mu.Unlock()
|
bw.gsb.mu.Unlock()
|
||||||
return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
|
return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
|
||||||
}
|
}
|
||||||
|
@ -360,13 +365,9 @@ func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||||
bw.gsb.mu.Lock()
|
// Note: existing third party balancers may call this, so it must remain
|
||||||
if !bw.gsb.balancerCurrentOrPending(bw) {
|
// until RemoveSubConn is fully removed.
|
||||||
bw.gsb.mu.Unlock()
|
sc.Shutdown()
|
||||||
return
|
|
||||||
}
|
|
||||||
bw.gsb.mu.Unlock()
|
|
||||||
bw.gsb.cc.RemoveSubConn(sc)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
||||||
|
|
|
@ -25,7 +25,7 @@ import (
|
||||||
// Parser converts loads from metadata into a concrete type.
|
// Parser converts loads from metadata into a concrete type.
|
||||||
type Parser interface {
|
type Parser interface {
|
||||||
// Parse parses loads from metadata.
|
// Parse parses loads from metadata.
|
||||||
Parse(md metadata.MD) interface{}
|
Parse(md metadata.MD) any
|
||||||
}
|
}
|
||||||
|
|
||||||
var parser Parser
|
var parser Parser
|
||||||
|
@ -38,7 +38,7 @@ func SetParser(lr Parser) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse calls parser.Read().
|
// Parse calls parser.Read().
|
||||||
func Parse(md metadata.MD) interface{} {
|
func Parse(md metadata.MD) any {
|
||||||
if parser == nil {
|
if parser == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -230,7 +230,7 @@ type ClientMessage struct {
|
||||||
OnClientSide bool
|
OnClientSide bool
|
||||||
// Message can be a proto.Message or []byte. Other messages formats are not
|
// Message can be a proto.Message or []byte. Other messages formats are not
|
||||||
// supported.
|
// supported.
|
||||||
Message interface{}
|
Message any
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry {
|
func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry {
|
||||||
|
@ -270,7 +270,7 @@ type ServerMessage struct {
|
||||||
OnClientSide bool
|
OnClientSide bool
|
||||||
// Message can be a proto.Message or []byte. Other messages formats are not
|
// Message can be a proto.Message or []byte. Other messages formats are not
|
||||||
// supported.
|
// supported.
|
||||||
Message interface{}
|
Message any
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry {
|
func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry {
|
||||||
|
|
|
@ -28,25 +28,25 @@ import "sync"
|
||||||
// the underlying mutex used for synchronization.
|
// the underlying mutex used for synchronization.
|
||||||
//
|
//
|
||||||
// Unbounded supports values of any type to be stored in it by using a channel
|
// Unbounded supports values of any type to be stored in it by using a channel
|
||||||
// of `interface{}`. This means that a call to Put() incurs an extra memory
|
// of `any`. This means that a call to Put() incurs an extra memory allocation,
|
||||||
// allocation, and also that users need a type assertion while reading. For
|
// and also that users need a type assertion while reading. For performance
|
||||||
// performance critical code paths, using Unbounded is strongly discouraged and
|
// critical code paths, using Unbounded is strongly discouraged and defining a
|
||||||
// defining a new type specific implementation of this buffer is preferred. See
|
// new type specific implementation of this buffer is preferred. See
|
||||||
// internal/transport/transport.go for an example of this.
|
// internal/transport/transport.go for an example of this.
|
||||||
type Unbounded struct {
|
type Unbounded struct {
|
||||||
c chan interface{}
|
c chan any
|
||||||
closed bool
|
closed bool
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
backlog []interface{}
|
backlog []any
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewUnbounded returns a new instance of Unbounded.
|
// NewUnbounded returns a new instance of Unbounded.
|
||||||
func NewUnbounded() *Unbounded {
|
func NewUnbounded() *Unbounded {
|
||||||
return &Unbounded{c: make(chan interface{}, 1)}
|
return &Unbounded{c: make(chan any, 1)}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put adds t to the unbounded buffer.
|
// Put adds t to the unbounded buffer.
|
||||||
func (b *Unbounded) Put(t interface{}) {
|
func (b *Unbounded) Put(t any) {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
if b.closed {
|
if b.closed {
|
||||||
|
@ -89,7 +89,7 @@ func (b *Unbounded) Load() {
|
||||||
//
|
//
|
||||||
// If the unbounded buffer is closed, the read channel returned by this method
|
// If the unbounded buffer is closed, the read channel returned by this method
|
||||||
// is closed.
|
// is closed.
|
||||||
func (b *Unbounded) Get() <-chan interface{} {
|
func (b *Unbounded) Get() <-chan any {
|
||||||
return b.c
|
return b.c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,9 +24,7 @@
|
||||||
package channelz
|
package channelz
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
@ -40,8 +38,11 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
db dbWrapper
|
// IDGen is the global channelz entity ID generator. It should not be used
|
||||||
idGen idGenerator
|
// outside this package except by tests.
|
||||||
|
IDGen IDGenerator
|
||||||
|
|
||||||
|
db dbWrapper
|
||||||
// EntryPerPage defines the number of channelz entries to be shown on a web page.
|
// EntryPerPage defines the number of channelz entries to be shown on a web page.
|
||||||
EntryPerPage = int64(50)
|
EntryPerPage = int64(50)
|
||||||
curState int32
|
curState int32
|
||||||
|
@ -52,14 +53,14 @@ var (
|
||||||
func TurnOn() {
|
func TurnOn() {
|
||||||
if !IsOn() {
|
if !IsOn() {
|
||||||
db.set(newChannelMap())
|
db.set(newChannelMap())
|
||||||
idGen.reset()
|
IDGen.Reset()
|
||||||
atomic.StoreInt32(&curState, 1)
|
atomic.StoreInt32(&curState, 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsOn returns whether channelz data collection is on.
|
// IsOn returns whether channelz data collection is on.
|
||||||
func IsOn() bool {
|
func IsOn() bool {
|
||||||
return atomic.CompareAndSwapInt32(&curState, 1, 1)
|
return atomic.LoadInt32(&curState) == 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel).
|
// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel).
|
||||||
|
@ -97,43 +98,6 @@ func (d *dbWrapper) get() *channelMap {
|
||||||
return d.DB
|
return d.DB
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewChannelzStorageForTesting initializes channelz data storage and id
|
|
||||||
// generator for testing purposes.
|
|
||||||
//
|
|
||||||
// Returns a cleanup function to be invoked by the test, which waits for up to
|
|
||||||
// 10s for all channelz state to be reset by the grpc goroutines when those
|
|
||||||
// entities get closed. This cleanup function helps with ensuring that tests
|
|
||||||
// don't mess up each other.
|
|
||||||
func NewChannelzStorageForTesting() (cleanup func() error) {
|
|
||||||
db.set(newChannelMap())
|
|
||||||
idGen.reset()
|
|
||||||
|
|
||||||
return func() error {
|
|
||||||
cm := db.get()
|
|
||||||
if cm == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
ticker := time.NewTicker(10 * time.Millisecond)
|
|
||||||
defer ticker.Stop()
|
|
||||||
for {
|
|
||||||
cm.mu.RLock()
|
|
||||||
topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)
|
|
||||||
cm.mu.RUnlock()
|
|
||||||
|
|
||||||
if err := ctx.Err(); err != nil {
|
|
||||||
return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets)
|
|
||||||
}
|
|
||||||
if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
<-ticker.C
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTopChannels returns a slice of top channel's ChannelMetric, along with a
|
// GetTopChannels returns a slice of top channel's ChannelMetric, along with a
|
||||||
// boolean indicating whether there's more top channels to be queried for.
|
// boolean indicating whether there's more top channels to be queried for.
|
||||||
//
|
//
|
||||||
|
@ -193,7 +157,7 @@ func GetServer(id int64) *ServerMetric {
|
||||||
//
|
//
|
||||||
// If channelz is not turned ON, the channelz database is not mutated.
|
// If channelz is not turned ON, the channelz database is not mutated.
|
||||||
func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier {
|
func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier {
|
||||||
id := idGen.genID()
|
id := IDGen.genID()
|
||||||
var parent int64
|
var parent int64
|
||||||
isTopChannel := true
|
isTopChannel := true
|
||||||
if pid != nil {
|
if pid != nil {
|
||||||
|
@ -229,7 +193,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er
|
||||||
if pid == nil {
|
if pid == nil {
|
||||||
return nil, errors.New("a SubChannel's parent id cannot be nil")
|
return nil, errors.New("a SubChannel's parent id cannot be nil")
|
||||||
}
|
}
|
||||||
id := idGen.genID()
|
id := IDGen.genID()
|
||||||
if !IsOn() {
|
if !IsOn() {
|
||||||
return newIdentifer(RefSubChannel, id, pid), nil
|
return newIdentifer(RefSubChannel, id, pid), nil
|
||||||
}
|
}
|
||||||
|
@ -251,7 +215,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er
|
||||||
//
|
//
|
||||||
// If channelz is not turned ON, the channelz database is not mutated.
|
// If channelz is not turned ON, the channelz database is not mutated.
|
||||||
func RegisterServer(s Server, ref string) *Identifier {
|
func RegisterServer(s Server, ref string) *Identifier {
|
||||||
id := idGen.genID()
|
id := IDGen.genID()
|
||||||
if !IsOn() {
|
if !IsOn() {
|
||||||
return newIdentifer(RefServer, id, nil)
|
return newIdentifer(RefServer, id, nil)
|
||||||
}
|
}
|
||||||
|
@ -277,7 +241,7 @@ func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, e
|
||||||
if pid == nil {
|
if pid == nil {
|
||||||
return nil, errors.New("a ListenSocket's parent id cannot be 0")
|
return nil, errors.New("a ListenSocket's parent id cannot be 0")
|
||||||
}
|
}
|
||||||
id := idGen.genID()
|
id := IDGen.genID()
|
||||||
if !IsOn() {
|
if !IsOn() {
|
||||||
return newIdentifer(RefListenSocket, id, pid), nil
|
return newIdentifer(RefListenSocket, id, pid), nil
|
||||||
}
|
}
|
||||||
|
@ -297,7 +261,7 @@ func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, e
|
||||||
if pid == nil {
|
if pid == nil {
|
||||||
return nil, errors.New("a NormalSocket's parent id cannot be 0")
|
return nil, errors.New("a NormalSocket's parent id cannot be 0")
|
||||||
}
|
}
|
||||||
id := idGen.genID()
|
id := IDGen.genID()
|
||||||
if !IsOn() {
|
if !IsOn() {
|
||||||
return newIdentifer(RefNormalSocket, id, pid), nil
|
return newIdentifer(RefNormalSocket, id, pid), nil
|
||||||
}
|
}
|
||||||
|
@ -776,14 +740,17 @@ func (c *channelMap) GetServer(id int64) *ServerMetric {
|
||||||
return sm
|
return sm
|
||||||
}
|
}
|
||||||
|
|
||||||
type idGenerator struct {
|
// IDGenerator is an incrementing atomic that tracks IDs for channelz entities.
|
||||||
|
type IDGenerator struct {
|
||||||
id int64
|
id int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *idGenerator) reset() {
|
// Reset resets the generated ID back to zero. Should only be used at
|
||||||
|
// initialization or by tests sensitive to the ID number.
|
||||||
|
func (i *IDGenerator) Reset() {
|
||||||
atomic.StoreInt64(&i.id, 0)
|
atomic.StoreInt64(&i.id, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *idGenerator) genID() int64 {
|
func (i *IDGenerator) genID() int64 {
|
||||||
return atomic.AddInt64(&i.id, 1)
|
return atomic.AddInt64(&i.id, 1)
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ func withParens(id *Identifier) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Info logs and adds a trace event if channelz is on.
|
// Info logs and adds a trace event if channelz is on.
|
||||||
func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) {
|
||||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||||
Desc: fmt.Sprint(args...),
|
Desc: fmt.Sprint(args...),
|
||||||
Severity: CtInfo,
|
Severity: CtInfo,
|
||||||
|
@ -39,7 +39,7 @@ func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Infof logs and adds a trace event if channelz is on.
|
// Infof logs and adds a trace event if channelz is on.
|
||||||
func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) {
|
||||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||||
Desc: fmt.Sprintf(format, args...),
|
Desc: fmt.Sprintf(format, args...),
|
||||||
Severity: CtInfo,
|
Severity: CtInfo,
|
||||||
|
@ -47,7 +47,7 @@ func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...inter
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warning logs and adds a trace event if channelz is on.
|
// Warning logs and adds a trace event if channelz is on.
|
||||||
func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) {
|
||||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||||
Desc: fmt.Sprint(args...),
|
Desc: fmt.Sprint(args...),
|
||||||
Severity: CtWarning,
|
Severity: CtWarning,
|
||||||
|
@ -55,7 +55,7 @@ func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warningf logs and adds a trace event if channelz is on.
|
// Warningf logs and adds a trace event if channelz is on.
|
||||||
func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) {
|
||||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||||
Desc: fmt.Sprintf(format, args...),
|
Desc: fmt.Sprintf(format, args...),
|
||||||
Severity: CtWarning,
|
Severity: CtWarning,
|
||||||
|
@ -63,7 +63,7 @@ func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...in
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error logs and adds a trace event if channelz is on.
|
// Error logs and adds a trace event if channelz is on.
|
||||||
func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) {
|
||||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||||
Desc: fmt.Sprint(args...),
|
Desc: fmt.Sprint(args...),
|
||||||
Severity: CtError,
|
Severity: CtError,
|
||||||
|
@ -71,7 +71,7 @@ func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errorf logs and adds a trace event if channelz is on.
|
// Errorf logs and adds a trace event if channelz is on.
|
||||||
func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) {
|
||||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||||
Desc: fmt.Sprintf(format, args...),
|
Desc: fmt.Sprintf(format, args...),
|
||||||
Severity: CtError,
|
Severity: CtError,
|
||||||
|
|
|
@ -628,6 +628,7 @@ type tracedChannel interface {
|
||||||
|
|
||||||
type channelTrace struct {
|
type channelTrace struct {
|
||||||
cm *channelMap
|
cm *channelMap
|
||||||
|
clearCalled bool
|
||||||
createdTime time.Time
|
createdTime time.Time
|
||||||
eventCount int64
|
eventCount int64
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
|
@ -656,6 +657,10 @@ func (c *channelTrace) append(e *TraceEvent) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *channelTrace) clear() {
|
func (c *channelTrace) clear() {
|
||||||
|
if c.clearCalled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.clearCalled = true
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
for _, e := range c.events {
|
for _, e := range c.events {
|
||||||
if e.RefID != 0 {
|
if e.RefID != 0 {
|
||||||
|
|
|
@ -23,7 +23,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetSocketOption gets the socket option info of the conn.
|
// GetSocketOption gets the socket option info of the conn.
|
||||||
func GetSocketOption(socket interface{}) *SocketOptionData {
|
func GetSocketOption(socket any) *SocketOptionData {
|
||||||
c, ok := socket.(syscall.Conn)
|
c, ok := socket.(syscall.Conn)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -22,6 +22,6 @@
|
||||||
package channelz
|
package channelz
|
||||||
|
|
||||||
// GetSocketOption gets the socket option info of the conn.
|
// GetSocketOption gets the socket option info of the conn.
|
||||||
func GetSocketOption(c interface{}) *SocketOptionData {
|
func GetSocketOption(c any) *SocketOptionData {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,12 +25,12 @@ import (
|
||||||
type requestInfoKey struct{}
|
type requestInfoKey struct{}
|
||||||
|
|
||||||
// NewRequestInfoContext creates a context with ri.
|
// NewRequestInfoContext creates a context with ri.
|
||||||
func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context {
|
func NewRequestInfoContext(ctx context.Context, ri any) context.Context {
|
||||||
return context.WithValue(ctx, requestInfoKey{}, ri)
|
return context.WithValue(ctx, requestInfoKey{}, ri)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestInfoFromContext extracts the RequestInfo from ctx.
|
// RequestInfoFromContext extracts the RequestInfo from ctx.
|
||||||
func RequestInfoFromContext(ctx context.Context) interface{} {
|
func RequestInfoFromContext(ctx context.Context) any {
|
||||||
return ctx.Value(requestInfoKey{})
|
return ctx.Value(requestInfoKey{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,11 +39,11 @@ func RequestInfoFromContext(ctx context.Context) interface{} {
|
||||||
type clientHandshakeInfoKey struct{}
|
type clientHandshakeInfoKey struct{}
|
||||||
|
|
||||||
// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx.
|
// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx.
|
||||||
func ClientHandshakeInfoFromContext(ctx context.Context) interface{} {
|
func ClientHandshakeInfoFromContext(ctx context.Context) any {
|
||||||
return ctx.Value(clientHandshakeInfoKey{})
|
return ctx.Value(clientHandshakeInfoKey{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientHandshakeInfoContext creates a context with chi.
|
// NewClientHandshakeInfoContext creates a context with chi.
|
||||||
func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context {
|
func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context {
|
||||||
return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
|
return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,9 +37,15 @@ var (
|
||||||
// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
|
// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
|
||||||
RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
|
RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
|
||||||
// PickFirstLBConfig is set if we should support configuration of the
|
// PickFirstLBConfig is set if we should support configuration of the
|
||||||
// pick_first LB policy, which can be enabled by setting the environment
|
// pick_first LB policy.
|
||||||
// variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true".
|
PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true)
|
||||||
PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false)
|
// LeastRequestLB is set if we should support the least_request_experimental
|
||||||
|
// LB policy, which can be enabled by setting the environment variable
|
||||||
|
// "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true".
|
||||||
|
LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false)
|
||||||
|
// ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS
|
||||||
|
// handshakes that can be performed.
|
||||||
|
ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100)
|
||||||
)
|
)
|
||||||
|
|
||||||
func boolFromEnv(envVar string, def bool) bool {
|
func boolFromEnv(envVar string, def bool) bool {
|
||||||
|
|
|
@ -30,7 +30,7 @@ var Logger LoggerV2
|
||||||
var DepthLogger DepthLoggerV2
|
var DepthLogger DepthLoggerV2
|
||||||
|
|
||||||
// InfoDepth logs to the INFO log at the specified depth.
|
// InfoDepth logs to the INFO log at the specified depth.
|
||||||
func InfoDepth(depth int, args ...interface{}) {
|
func InfoDepth(depth int, args ...any) {
|
||||||
if DepthLogger != nil {
|
if DepthLogger != nil {
|
||||||
DepthLogger.InfoDepth(depth, args...)
|
DepthLogger.InfoDepth(depth, args...)
|
||||||
} else {
|
} else {
|
||||||
|
@ -39,7 +39,7 @@ func InfoDepth(depth int, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// WarningDepth logs to the WARNING log at the specified depth.
|
// WarningDepth logs to the WARNING log at the specified depth.
|
||||||
func WarningDepth(depth int, args ...interface{}) {
|
func WarningDepth(depth int, args ...any) {
|
||||||
if DepthLogger != nil {
|
if DepthLogger != nil {
|
||||||
DepthLogger.WarningDepth(depth, args...)
|
DepthLogger.WarningDepth(depth, args...)
|
||||||
} else {
|
} else {
|
||||||
|
@ -48,7 +48,7 @@ func WarningDepth(depth int, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrorDepth logs to the ERROR log at the specified depth.
|
// ErrorDepth logs to the ERROR log at the specified depth.
|
||||||
func ErrorDepth(depth int, args ...interface{}) {
|
func ErrorDepth(depth int, args ...any) {
|
||||||
if DepthLogger != nil {
|
if DepthLogger != nil {
|
||||||
DepthLogger.ErrorDepth(depth, args...)
|
DepthLogger.ErrorDepth(depth, args...)
|
||||||
} else {
|
} else {
|
||||||
|
@ -57,7 +57,7 @@ func ErrorDepth(depth int, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// FatalDepth logs to the FATAL log at the specified depth.
|
// FatalDepth logs to the FATAL log at the specified depth.
|
||||||
func FatalDepth(depth int, args ...interface{}) {
|
func FatalDepth(depth int, args ...any) {
|
||||||
if DepthLogger != nil {
|
if DepthLogger != nil {
|
||||||
DepthLogger.FatalDepth(depth, args...)
|
DepthLogger.FatalDepth(depth, args...)
|
||||||
} else {
|
} else {
|
||||||
|
@ -71,35 +71,35 @@ func FatalDepth(depth int, args ...interface{}) {
|
||||||
// is defined here to avoid a circular dependency.
|
// is defined here to avoid a circular dependency.
|
||||||
type LoggerV2 interface {
|
type LoggerV2 interface {
|
||||||
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
|
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
|
||||||
Info(args ...interface{})
|
Info(args ...any)
|
||||||
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
|
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
|
||||||
Infoln(args ...interface{})
|
Infoln(args ...any)
|
||||||
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
|
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
|
||||||
Infof(format string, args ...interface{})
|
Infof(format string, args ...any)
|
||||||
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
|
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
|
||||||
Warning(args ...interface{})
|
Warning(args ...any)
|
||||||
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
|
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
|
||||||
Warningln(args ...interface{})
|
Warningln(args ...any)
|
||||||
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
|
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
|
||||||
Warningf(format string, args ...interface{})
|
Warningf(format string, args ...any)
|
||||||
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
||||||
Error(args ...interface{})
|
Error(args ...any)
|
||||||
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||||
Errorln(args ...interface{})
|
Errorln(args ...any)
|
||||||
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||||
Errorf(format string, args ...interface{})
|
Errorf(format string, args ...any)
|
||||||
// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
||||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||||
Fatal(args ...interface{})
|
Fatal(args ...any)
|
||||||
// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||||
Fatalln(args ...interface{})
|
Fatalln(args ...any)
|
||||||
// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||||
Fatalf(format string, args ...interface{})
|
Fatalf(format string, args ...any)
|
||||||
// V reports whether verbosity level l is at least the requested verbose level.
|
// V reports whether verbosity level l is at least the requested verbose level.
|
||||||
V(l int) bool
|
V(l int) bool
|
||||||
}
|
}
|
||||||
|
@ -116,11 +116,11 @@ type LoggerV2 interface {
|
||||||
// later release.
|
// later release.
|
||||||
type DepthLoggerV2 interface {
|
type DepthLoggerV2 interface {
|
||||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||||
InfoDepth(depth int, args ...interface{})
|
InfoDepth(depth int, args ...any)
|
||||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||||
WarningDepth(depth int, args ...interface{})
|
WarningDepth(depth int, args ...any)
|
||||||
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||||
ErrorDepth(depth int, args ...interface{})
|
ErrorDepth(depth int, args ...any)
|
||||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||||
FatalDepth(depth int, args ...interface{})
|
FatalDepth(depth int, args ...any)
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ type PrefixLogger struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Infof does info logging.
|
// Infof does info logging.
|
||||||
func (pl *PrefixLogger) Infof(format string, args ...interface{}) {
|
func (pl *PrefixLogger) Infof(format string, args ...any) {
|
||||||
if pl != nil {
|
if pl != nil {
|
||||||
// Handle nil, so the tests can pass in a nil logger.
|
// Handle nil, so the tests can pass in a nil logger.
|
||||||
format = pl.prefix + format
|
format = pl.prefix + format
|
||||||
|
@ -42,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warningf does warning logging.
|
// Warningf does warning logging.
|
||||||
func (pl *PrefixLogger) Warningf(format string, args ...interface{}) {
|
func (pl *PrefixLogger) Warningf(format string, args ...any) {
|
||||||
if pl != nil {
|
if pl != nil {
|
||||||
format = pl.prefix + format
|
format = pl.prefix + format
|
||||||
pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
|
pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
|
||||||
|
@ -52,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errorf does error logging.
|
// Errorf does error logging.
|
||||||
func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
|
func (pl *PrefixLogger) Errorf(format string, args ...any) {
|
||||||
if pl != nil {
|
if pl != nil {
|
||||||
format = pl.prefix + format
|
format = pl.prefix + format
|
||||||
pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
|
pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
|
||||||
|
@ -62,7 +62,7 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Debugf does info logging at verbose level 2.
|
// Debugf does info logging at verbose level 2.
|
||||||
func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
|
func (pl *PrefixLogger) Debugf(format string, args ...any) {
|
||||||
// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
|
// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
|
||||||
// rewrite PrefixLogger a little to ensure that we don't use the global
|
// rewrite PrefixLogger a little to ensure that we don't use the global
|
||||||
// `Logger` here, and instead use the `logger` field.
|
// `Logger` here, and instead use the `logger` field.
|
||||||
|
|
|
@ -80,6 +80,13 @@ func Uint32() uint32 {
|
||||||
return r.Uint32()
|
return r.Uint32()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source.
|
||||||
|
func ExpFloat64() float64 {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
return r.ExpFloat64()
|
||||||
|
}
|
||||||
|
|
||||||
// Shuffle implements rand.Shuffle on the grpcrand global source.
|
// Shuffle implements rand.Shuffle on the grpcrand global source.
|
||||||
var Shuffle = func(n int, f func(int, int)) {
|
var Shuffle = func(n int, f func(int, int)) {
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
|
|
|
@ -32,10 +32,10 @@ import (
|
||||||
//
|
//
|
||||||
// This type is safe for concurrent access.
|
// This type is safe for concurrent access.
|
||||||
type CallbackSerializer struct {
|
type CallbackSerializer struct {
|
||||||
// Done is closed once the serializer is shut down completely, i.e all
|
// done is closed once the serializer is shut down completely, i.e all
|
||||||
// scheduled callbacks are executed and the serializer has deallocated all
|
// scheduled callbacks are executed and the serializer has deallocated all
|
||||||
// its resources.
|
// its resources.
|
||||||
Done chan struct{}
|
done chan struct{}
|
||||||
|
|
||||||
callbacks *buffer.Unbounded
|
callbacks *buffer.Unbounded
|
||||||
closedMu sync.Mutex
|
closedMu sync.Mutex
|
||||||
|
@ -48,12 +48,12 @@ type CallbackSerializer struct {
|
||||||
// callbacks will be added once this context is canceled, and any pending un-run
|
// callbacks will be added once this context is canceled, and any pending un-run
|
||||||
// callbacks will be executed before the serializer is shut down.
|
// callbacks will be executed before the serializer is shut down.
|
||||||
func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
|
func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
|
||||||
t := &CallbackSerializer{
|
cs := &CallbackSerializer{
|
||||||
Done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
callbacks: buffer.NewUnbounded(),
|
callbacks: buffer.NewUnbounded(),
|
||||||
}
|
}
|
||||||
go t.run(ctx)
|
go cs.run(ctx)
|
||||||
return t
|
return cs
|
||||||
}
|
}
|
||||||
|
|
||||||
// Schedule adds a callback to be scheduled after existing callbacks are run.
|
// Schedule adds a callback to be scheduled after existing callbacks are run.
|
||||||
|
@ -64,56 +64,62 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
|
||||||
// Return value indicates if the callback was successfully added to the list of
|
// Return value indicates if the callback was successfully added to the list of
|
||||||
// callbacks to be executed by the serializer. It is not possible to add
|
// callbacks to be executed by the serializer. It is not possible to add
|
||||||
// callbacks once the context passed to NewCallbackSerializer is cancelled.
|
// callbacks once the context passed to NewCallbackSerializer is cancelled.
|
||||||
func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool {
|
func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool {
|
||||||
t.closedMu.Lock()
|
cs.closedMu.Lock()
|
||||||
defer t.closedMu.Unlock()
|
defer cs.closedMu.Unlock()
|
||||||
|
|
||||||
if t.closed {
|
if cs.closed {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
t.callbacks.Put(f)
|
cs.callbacks.Put(f)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *CallbackSerializer) run(ctx context.Context) {
|
func (cs *CallbackSerializer) run(ctx context.Context) {
|
||||||
var backlog []func(context.Context)
|
var backlog []func(context.Context)
|
||||||
|
|
||||||
defer close(t.Done)
|
defer close(cs.done)
|
||||||
for ctx.Err() == nil {
|
for ctx.Err() == nil {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
// Do nothing here. Next iteration of the for loop will not happen,
|
// Do nothing here. Next iteration of the for loop will not happen,
|
||||||
// since ctx.Err() would be non-nil.
|
// since ctx.Err() would be non-nil.
|
||||||
case callback, ok := <-t.callbacks.Get():
|
case callback, ok := <-cs.callbacks.Get():
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
t.callbacks.Load()
|
cs.callbacks.Load()
|
||||||
callback.(func(ctx context.Context))(ctx)
|
callback.(func(ctx context.Context))(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetch pending callbacks if any, and execute them before returning from
|
// Fetch pending callbacks if any, and execute them before returning from
|
||||||
// this method and closing t.Done.
|
// this method and closing cs.done.
|
||||||
t.closedMu.Lock()
|
cs.closedMu.Lock()
|
||||||
t.closed = true
|
cs.closed = true
|
||||||
backlog = t.fetchPendingCallbacks()
|
backlog = cs.fetchPendingCallbacks()
|
||||||
t.callbacks.Close()
|
cs.callbacks.Close()
|
||||||
t.closedMu.Unlock()
|
cs.closedMu.Unlock()
|
||||||
for _, b := range backlog {
|
for _, b := range backlog {
|
||||||
b(ctx)
|
b(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) {
|
func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) {
|
||||||
var backlog []func(context.Context)
|
var backlog []func(context.Context)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case b := <-t.callbacks.Get():
|
case b := <-cs.callbacks.Get():
|
||||||
backlog = append(backlog, b.(func(context.Context)))
|
backlog = append(backlog, b.(func(context.Context)))
|
||||||
t.callbacks.Load()
|
cs.callbacks.Load()
|
||||||
default:
|
default:
|
||||||
return backlog
|
return backlog
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Done returns a channel that is closed after the context passed to
|
||||||
|
// NewCallbackSerializer is canceled and all callbacks have been executed.
|
||||||
|
func (cs *CallbackSerializer) Done() <-chan struct{} {
|
||||||
|
return cs.done
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,121 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2023 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package grpcsync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Subscriber represents an entity that is subscribed to messages published on
|
||||||
|
// a PubSub. It wraps the callback to be invoked by the PubSub when a new
|
||||||
|
// message is published.
|
||||||
|
type Subscriber interface {
|
||||||
|
// OnMessage is invoked when a new message is published. Implementations
|
||||||
|
// must not block in this method.
|
||||||
|
OnMessage(msg any)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PubSub is a simple one-to-many publish-subscribe system that supports
|
||||||
|
// messages of arbitrary type. It guarantees that messages are delivered in
|
||||||
|
// the same order in which they were published.
|
||||||
|
//
|
||||||
|
// Publisher invokes the Publish() method to publish new messages, while
|
||||||
|
// subscribers interested in receiving these messages register a callback
|
||||||
|
// via the Subscribe() method.
|
||||||
|
//
|
||||||
|
// Once a PubSub is stopped, no more messages can be published, but any pending
|
||||||
|
// published messages will be delivered to the subscribers. Done may be used
|
||||||
|
// to determine when all published messages have been delivered.
|
||||||
|
type PubSub struct {
|
||||||
|
cs *CallbackSerializer
|
||||||
|
|
||||||
|
// Access to the below fields are guarded by this mutex.
|
||||||
|
mu sync.Mutex
|
||||||
|
msg any
|
||||||
|
subscribers map[Subscriber]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPubSub returns a new PubSub instance. Users should cancel the
|
||||||
|
// provided context to shutdown the PubSub.
|
||||||
|
func NewPubSub(ctx context.Context) *PubSub {
|
||||||
|
return &PubSub{
|
||||||
|
cs: NewCallbackSerializer(ctx),
|
||||||
|
subscribers: map[Subscriber]bool{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe registers the provided Subscriber to the PubSub.
|
||||||
|
//
|
||||||
|
// If the PubSub contains a previously published message, the Subscriber's
|
||||||
|
// OnMessage() callback will be invoked asynchronously with the existing
|
||||||
|
// message to begin with, and subsequently for every newly published message.
|
||||||
|
//
|
||||||
|
// The caller is responsible for invoking the returned cancel function to
|
||||||
|
// unsubscribe itself from the PubSub.
|
||||||
|
func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) {
|
||||||
|
ps.mu.Lock()
|
||||||
|
defer ps.mu.Unlock()
|
||||||
|
|
||||||
|
ps.subscribers[sub] = true
|
||||||
|
|
||||||
|
if ps.msg != nil {
|
||||||
|
msg := ps.msg
|
||||||
|
ps.cs.Schedule(func(context.Context) {
|
||||||
|
ps.mu.Lock()
|
||||||
|
defer ps.mu.Unlock()
|
||||||
|
if !ps.subscribers[sub] {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sub.OnMessage(msg)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() {
|
||||||
|
ps.mu.Lock()
|
||||||
|
defer ps.mu.Unlock()
|
||||||
|
delete(ps.subscribers, sub)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish publishes the provided message to the PubSub, and invokes
|
||||||
|
// callbacks registered by subscribers asynchronously.
|
||||||
|
func (ps *PubSub) Publish(msg any) {
|
||||||
|
ps.mu.Lock()
|
||||||
|
defer ps.mu.Unlock()
|
||||||
|
|
||||||
|
ps.msg = msg
|
||||||
|
for sub := range ps.subscribers {
|
||||||
|
s := sub
|
||||||
|
ps.cs.Schedule(func(context.Context) {
|
||||||
|
ps.mu.Lock()
|
||||||
|
defer ps.mu.Unlock()
|
||||||
|
if !ps.subscribers[s] {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.OnMessage(msg)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done returns a channel that is closed after the context passed to NewPubSub
|
||||||
|
// is canceled and all updates have been sent to subscribers.
|
||||||
|
func (ps *PubSub) Done() <-chan struct{} {
|
||||||
|
return ps.cs.Done()
|
||||||
|
}
|
188
vendor/google.golang.org/grpc/idle.go → vendor/google.golang.org/grpc/internal/idle/idle.go
generated
vendored
188
vendor/google.golang.org/grpc/idle.go → vendor/google.golang.org/grpc/internal/idle/idle.go
generated
vendored
|
@ -16,7 +16,9 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package grpc
|
// Package idle contains a component for managing idleness (entering and exiting)
|
||||||
|
// based on RPC activity.
|
||||||
|
package idle
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -24,6 +26,8 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// For overriding in unit tests.
|
// For overriding in unit tests.
|
||||||
|
@ -31,31 +35,31 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer {
|
||||||
return time.AfterFunc(d, f)
|
return time.AfterFunc(d, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter
|
// Enforcer is the functionality provided by grpc.ClientConn to enter
|
||||||
// and exit from idle mode.
|
// and exit from idle mode.
|
||||||
type idlenessEnforcer interface {
|
type Enforcer interface {
|
||||||
exitIdleMode() error
|
ExitIdleMode() error
|
||||||
enterIdleMode() error
|
EnterIdleMode() error
|
||||||
}
|
}
|
||||||
|
|
||||||
// idlenessManager defines the functionality required to track RPC activity on a
|
// Manager defines the functionality required to track RPC activity on a
|
||||||
// channel.
|
// channel.
|
||||||
type idlenessManager interface {
|
type Manager interface {
|
||||||
onCallBegin() error
|
OnCallBegin() error
|
||||||
onCallEnd()
|
OnCallEnd()
|
||||||
close()
|
Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
type noopIdlenessManager struct{}
|
type noopManager struct{}
|
||||||
|
|
||||||
func (noopIdlenessManager) onCallBegin() error { return nil }
|
func (noopManager) OnCallBegin() error { return nil }
|
||||||
func (noopIdlenessManager) onCallEnd() {}
|
func (noopManager) OnCallEnd() {}
|
||||||
func (noopIdlenessManager) close() {}
|
func (noopManager) Close() {}
|
||||||
|
|
||||||
// idlenessManagerImpl implements the idlenessManager interface. It uses atomic
|
// manager implements the Manager interface. It uses atomic operations to
|
||||||
// operations to synchronize access to shared state and a mutex to guarantee
|
// synchronize access to shared state and a mutex to guarantee mutual exclusion
|
||||||
// mutual exclusion in a critical section.
|
// in a critical section.
|
||||||
type idlenessManagerImpl struct {
|
type manager struct {
|
||||||
// State accessed atomically.
|
// State accessed atomically.
|
||||||
lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed.
|
lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed.
|
||||||
activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there.
|
activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there.
|
||||||
|
@ -64,14 +68,15 @@ type idlenessManagerImpl struct {
|
||||||
|
|
||||||
// Can be accessed without atomics or mutex since these are set at creation
|
// Can be accessed without atomics or mutex since these are set at creation
|
||||||
// time and read-only after that.
|
// time and read-only after that.
|
||||||
enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn.
|
enforcer Enforcer // Functionality provided by grpc.ClientConn.
|
||||||
timeout int64 // Idle timeout duration nanos stored as an int64.
|
timeout int64 // Idle timeout duration nanos stored as an int64.
|
||||||
|
logger grpclog.LoggerV2
|
||||||
|
|
||||||
// idleMu is used to guarantee mutual exclusion in two scenarios:
|
// idleMu is used to guarantee mutual exclusion in two scenarios:
|
||||||
// - Opposing intentions:
|
// - Opposing intentions:
|
||||||
// - a: Idle timeout has fired and handleIdleTimeout() is trying to put
|
// - a: Idle timeout has fired and handleIdleTimeout() is trying to put
|
||||||
// the channel in idle mode because the channel has been inactive.
|
// the channel in idle mode because the channel has been inactive.
|
||||||
// - b: At the same time an RPC is made on the channel, and onCallBegin()
|
// - b: At the same time an RPC is made on the channel, and OnCallBegin()
|
||||||
// is trying to prevent the channel from going idle.
|
// is trying to prevent the channel from going idle.
|
||||||
// - Competing intentions:
|
// - Competing intentions:
|
||||||
// - The channel is in idle mode and there are multiple RPCs starting at
|
// - The channel is in idle mode and there are multiple RPCs starting at
|
||||||
|
@ -83,28 +88,37 @@ type idlenessManagerImpl struct {
|
||||||
timer *time.Timer
|
timer *time.Timer
|
||||||
}
|
}
|
||||||
|
|
||||||
// newIdlenessManager creates a new idleness manager implementation for the
|
// ManagerOptions is a collection of options used by
|
||||||
|
// NewManager.
|
||||||
|
type ManagerOptions struct {
|
||||||
|
Enforcer Enforcer
|
||||||
|
Timeout time.Duration
|
||||||
|
Logger grpclog.LoggerV2
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewManager creates a new idleness manager implementation for the
|
||||||
// given idle timeout.
|
// given idle timeout.
|
||||||
func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager {
|
func NewManager(opts ManagerOptions) Manager {
|
||||||
if idleTimeout == 0 {
|
if opts.Timeout == 0 {
|
||||||
return noopIdlenessManager{}
|
return noopManager{}
|
||||||
}
|
}
|
||||||
|
|
||||||
i := &idlenessManagerImpl{
|
m := &manager{
|
||||||
enforcer: enforcer,
|
enforcer: opts.Enforcer,
|
||||||
timeout: int64(idleTimeout),
|
timeout: int64(opts.Timeout),
|
||||||
|
logger: opts.Logger,
|
||||||
}
|
}
|
||||||
i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout)
|
m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout)
|
||||||
return i
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
// resetIdleTimer resets the idle timer to the given duration. This method
|
// resetIdleTimer resets the idle timer to the given duration. This method
|
||||||
// should only be called from the timer callback.
|
// should only be called from the timer callback.
|
||||||
func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) {
|
func (m *manager) resetIdleTimer(d time.Duration) {
|
||||||
i.idleMu.Lock()
|
m.idleMu.Lock()
|
||||||
defer i.idleMu.Unlock()
|
defer m.idleMu.Unlock()
|
||||||
|
|
||||||
if i.timer == nil {
|
if m.timer == nil {
|
||||||
// Only close sets timer to nil. We are done.
|
// Only close sets timer to nil. We are done.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -112,47 +126,47 @@ func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) {
|
||||||
// It is safe to ignore the return value from Reset() because this method is
|
// It is safe to ignore the return value from Reset() because this method is
|
||||||
// only ever called from the timer callback, which means the timer has
|
// only ever called from the timer callback, which means the timer has
|
||||||
// already fired.
|
// already fired.
|
||||||
i.timer.Reset(d)
|
m.timer.Reset(d)
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleIdleTimeout is the timer callback that is invoked upon expiry of the
|
// handleIdleTimeout is the timer callback that is invoked upon expiry of the
|
||||||
// configured idle timeout. The channel is considered inactive if there are no
|
// configured idle timeout. The channel is considered inactive if there are no
|
||||||
// ongoing calls and no RPC activity since the last time the timer fired.
|
// ongoing calls and no RPC activity since the last time the timer fired.
|
||||||
func (i *idlenessManagerImpl) handleIdleTimeout() {
|
func (m *manager) handleIdleTimeout() {
|
||||||
if i.isClosed() {
|
if m.isClosed() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if atomic.LoadInt32(&i.activeCallsCount) > 0 {
|
if atomic.LoadInt32(&m.activeCallsCount) > 0 {
|
||||||
i.resetIdleTimer(time.Duration(i.timeout))
|
m.resetIdleTimer(time.Duration(m.timeout))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// There has been activity on the channel since we last got here. Reset the
|
// There has been activity on the channel since we last got here. Reset the
|
||||||
// timer and return.
|
// timer and return.
|
||||||
if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 {
|
if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
|
||||||
// Set the timer to fire after a duration of idle timeout, calculated
|
// Set the timer to fire after a duration of idle timeout, calculated
|
||||||
// from the time the most recent RPC completed.
|
// from the time the most recent RPC completed.
|
||||||
atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0)
|
atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0)
|
||||||
i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano()))
|
m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// This CAS operation is extremely likely to succeed given that there has
|
// This CAS operation is extremely likely to succeed given that there has
|
||||||
// been no activity since the last time we were here. Setting the
|
// been no activity since the last time we were here. Setting the
|
||||||
// activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the
|
// activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the
|
||||||
// channel is either in idle mode or is trying to get there.
|
// channel is either in idle mode or is trying to get there.
|
||||||
if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) {
|
if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) {
|
||||||
// This CAS operation can fail if an RPC started after we checked for
|
// This CAS operation can fail if an RPC started after we checked for
|
||||||
// activity at the top of this method, or one was ongoing from before
|
// activity at the top of this method, or one was ongoing from before
|
||||||
// the last time we were here. In both case, reset the timer and return.
|
// the last time we were here. In both case, reset the timer and return.
|
||||||
i.resetIdleTimer(time.Duration(i.timeout))
|
m.resetIdleTimer(time.Duration(m.timeout))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now that we've set the active calls count to -math.MaxInt32, it's time to
|
// Now that we've set the active calls count to -math.MaxInt32, it's time to
|
||||||
// actually move to idle mode.
|
// actually move to idle mode.
|
||||||
if i.tryEnterIdleMode() {
|
if m.tryEnterIdleMode() {
|
||||||
// Successfully entered idle mode. No timer needed until we exit idle.
|
// Successfully entered idle mode. No timer needed until we exit idle.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -160,8 +174,8 @@ func (i *idlenessManagerImpl) handleIdleTimeout() {
|
||||||
// Failed to enter idle mode due to a concurrent RPC that kept the channel
|
// Failed to enter idle mode due to a concurrent RPC that kept the channel
|
||||||
// active, or because of an error from the channel. Undo the attempt to
|
// active, or because of an error from the channel. Undo the attempt to
|
||||||
// enter idle, and reset the timer to try again later.
|
// enter idle, and reset the timer to try again later.
|
||||||
atomic.AddInt32(&i.activeCallsCount, math.MaxInt32)
|
atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
|
||||||
i.resetIdleTimer(time.Duration(i.timeout))
|
m.resetIdleTimer(time.Duration(m.timeout))
|
||||||
}
|
}
|
||||||
|
|
||||||
// tryEnterIdleMode instructs the channel to enter idle mode. But before
|
// tryEnterIdleMode instructs the channel to enter idle mode. But before
|
||||||
|
@ -171,15 +185,15 @@ func (i *idlenessManagerImpl) handleIdleTimeout() {
|
||||||
// Return value indicates whether or not the channel moved to idle mode.
|
// Return value indicates whether or not the channel moved to idle mode.
|
||||||
//
|
//
|
||||||
// Holds idleMu which ensures mutual exclusion with exitIdleMode.
|
// Holds idleMu which ensures mutual exclusion with exitIdleMode.
|
||||||
func (i *idlenessManagerImpl) tryEnterIdleMode() bool {
|
func (m *manager) tryEnterIdleMode() bool {
|
||||||
i.idleMu.Lock()
|
m.idleMu.Lock()
|
||||||
defer i.idleMu.Unlock()
|
defer m.idleMu.Unlock()
|
||||||
|
|
||||||
if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 {
|
if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 {
|
||||||
// We raced and lost to a new RPC. Very rare, but stop entering idle.
|
// We raced and lost to a new RPC. Very rare, but stop entering idle.
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 {
|
if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
|
||||||
// An very short RPC could have come in (and also finished) after we
|
// An very short RPC could have come in (and also finished) after we
|
||||||
// checked for calls count and activity in handleIdleTimeout(), but
|
// checked for calls count and activity in handleIdleTimeout(), but
|
||||||
// before the CAS operation. So, we need to check for activity again.
|
// before the CAS operation. So, we need to check for activity again.
|
||||||
|
@ -189,99 +203,99 @@ func (i *idlenessManagerImpl) tryEnterIdleMode() bool {
|
||||||
// No new RPCs have come in since we last set the active calls count value
|
// No new RPCs have come in since we last set the active calls count value
|
||||||
// -math.MaxInt32 in the timer callback. And since we have the lock, it is
|
// -math.MaxInt32 in the timer callback. And since we have the lock, it is
|
||||||
// safe to enter idle mode now.
|
// safe to enter idle mode now.
|
||||||
if err := i.enforcer.enterIdleMode(); err != nil {
|
if err := m.enforcer.EnterIdleMode(); err != nil {
|
||||||
logger.Errorf("Failed to enter idle mode: %v", err)
|
m.logger.Errorf("Failed to enter idle mode: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Successfully entered idle mode.
|
// Successfully entered idle mode.
|
||||||
i.actuallyIdle = true
|
m.actuallyIdle = true
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// onCallBegin is invoked at the start of every RPC.
|
// OnCallBegin is invoked at the start of every RPC.
|
||||||
func (i *idlenessManagerImpl) onCallBegin() error {
|
func (m *manager) OnCallBegin() error {
|
||||||
if i.isClosed() {
|
if m.isClosed() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if atomic.AddInt32(&i.activeCallsCount, 1) > 0 {
|
if atomic.AddInt32(&m.activeCallsCount, 1) > 0 {
|
||||||
// Channel is not idle now. Set the activity bit and allow the call.
|
// Channel is not idle now. Set the activity bit and allow the call.
|
||||||
atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1)
|
atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Channel is either in idle mode or is in the process of moving to idle
|
// Channel is either in idle mode or is in the process of moving to idle
|
||||||
// mode. Attempt to exit idle mode to allow this RPC.
|
// mode. Attempt to exit idle mode to allow this RPC.
|
||||||
if err := i.exitIdleMode(); err != nil {
|
if err := m.exitIdleMode(); err != nil {
|
||||||
// Undo the increment to calls count, and return an error causing the
|
// Undo the increment to calls count, and return an error causing the
|
||||||
// RPC to fail.
|
// RPC to fail.
|
||||||
atomic.AddInt32(&i.activeCallsCount, -1)
|
atomic.AddInt32(&m.activeCallsCount, -1)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1)
|
atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// exitIdleMode instructs the channel to exit idle mode.
|
// exitIdleMode instructs the channel to exit idle mode.
|
||||||
//
|
//
|
||||||
// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
|
// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
|
||||||
func (i *idlenessManagerImpl) exitIdleMode() error {
|
func (m *manager) exitIdleMode() error {
|
||||||
i.idleMu.Lock()
|
m.idleMu.Lock()
|
||||||
defer i.idleMu.Unlock()
|
defer m.idleMu.Unlock()
|
||||||
|
|
||||||
if !i.actuallyIdle {
|
if !m.actuallyIdle {
|
||||||
// This can happen in two scenarios:
|
// This can happen in two scenarios:
|
||||||
// - handleIdleTimeout() set the calls count to -math.MaxInt32 and called
|
// - handleIdleTimeout() set the calls count to -math.MaxInt32 and called
|
||||||
// tryEnterIdleMode(). But before the latter could grab the lock, an RPC
|
// tryEnterIdleMode(). But before the latter could grab the lock, an RPC
|
||||||
// came in and onCallBegin() noticed that the calls count is negative.
|
// came in and OnCallBegin() noticed that the calls count is negative.
|
||||||
// - Channel is in idle mode, and multiple new RPCs come in at the same
|
// - Channel is in idle mode, and multiple new RPCs come in at the same
|
||||||
// time, all of them notice a negative calls count in onCallBegin and get
|
// time, all of them notice a negative calls count in OnCallBegin and get
|
||||||
// here. The first one to get the lock would got the channel to exit idle.
|
// here. The first one to get the lock would got the channel to exit idle.
|
||||||
//
|
//
|
||||||
// Either way, nothing to do here.
|
// Either way, nothing to do here.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := i.enforcer.exitIdleMode(); err != nil {
|
if err := m.enforcer.ExitIdleMode(); err != nil {
|
||||||
return fmt.Errorf("channel failed to exit idle mode: %v", err)
|
return fmt.Errorf("channel failed to exit idle mode: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Undo the idle entry process. This also respects any new RPC attempts.
|
// Undo the idle entry process. This also respects any new RPC attempts.
|
||||||
atomic.AddInt32(&i.activeCallsCount, math.MaxInt32)
|
atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
|
||||||
i.actuallyIdle = false
|
m.actuallyIdle = false
|
||||||
|
|
||||||
// Start a new timer to fire after the configured idle timeout.
|
// Start a new timer to fire after the configured idle timeout.
|
||||||
i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout)
|
m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// onCallEnd is invoked at the end of every RPC.
|
// OnCallEnd is invoked at the end of every RPC.
|
||||||
func (i *idlenessManagerImpl) onCallEnd() {
|
func (m *manager) OnCallEnd() {
|
||||||
if i.isClosed() {
|
if m.isClosed() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Record the time at which the most recent call finished.
|
// Record the time at which the most recent call finished.
|
||||||
atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano())
|
atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano())
|
||||||
|
|
||||||
// Decrement the active calls count. This count can temporarily go negative
|
// Decrement the active calls count. This count can temporarily go negative
|
||||||
// when the timer callback is in the process of moving the channel to idle
|
// when the timer callback is in the process of moving the channel to idle
|
||||||
// mode, but one or more RPCs come in and complete before the timer callback
|
// mode, but one or more RPCs come in and complete before the timer callback
|
||||||
// can get done with the process of moving to idle mode.
|
// can get done with the process of moving to idle mode.
|
||||||
atomic.AddInt32(&i.activeCallsCount, -1)
|
atomic.AddInt32(&m.activeCallsCount, -1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *idlenessManagerImpl) isClosed() bool {
|
func (m *manager) isClosed() bool {
|
||||||
return atomic.LoadInt32(&i.closed) == 1
|
return atomic.LoadInt32(&m.closed) == 1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *idlenessManagerImpl) close() {
|
func (m *manager) Close() {
|
||||||
atomic.StoreInt32(&i.closed, 1)
|
atomic.StoreInt32(&m.closed, 1)
|
||||||
|
|
||||||
i.idleMu.Lock()
|
m.idleMu.Lock()
|
||||||
i.timer.Stop()
|
m.timer.Stop()
|
||||||
i.timer = nil
|
m.timer = nil
|
||||||
i.idleMu.Unlock()
|
m.idleMu.Unlock()
|
||||||
}
|
}
|
|
@ -30,7 +30,7 @@ import (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// WithHealthCheckFunc is set by dialoptions.go
|
// WithHealthCheckFunc is set by dialoptions.go
|
||||||
WithHealthCheckFunc interface{} // func (HealthChecker) DialOption
|
WithHealthCheckFunc any // func (HealthChecker) DialOption
|
||||||
// HealthCheckFunc is used to provide client-side LB channel health checking
|
// HealthCheckFunc is used to provide client-side LB channel health checking
|
||||||
HealthCheckFunc HealthChecker
|
HealthCheckFunc HealthChecker
|
||||||
// BalancerUnregister is exported by package balancer to unregister a balancer.
|
// BalancerUnregister is exported by package balancer to unregister a balancer.
|
||||||
|
@ -38,8 +38,12 @@ var (
|
||||||
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
|
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
|
||||||
// default, but tests may wish to set it lower for convenience.
|
// default, but tests may wish to set it lower for convenience.
|
||||||
KeepaliveMinPingTime = 10 * time.Second
|
KeepaliveMinPingTime = 10 * time.Second
|
||||||
|
// KeepaliveMinServerPingTime is the minimum ping interval for servers.
|
||||||
|
// This must be 1s by default, but tests may wish to set it lower for
|
||||||
|
// convenience.
|
||||||
|
KeepaliveMinServerPingTime = time.Second
|
||||||
// ParseServiceConfig parses a JSON representation of the service config.
|
// ParseServiceConfig parses a JSON representation of the service config.
|
||||||
ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult
|
ParseServiceConfig any // func(string) *serviceconfig.ParseResult
|
||||||
// EqualServiceConfigForTesting is for testing service config generation and
|
// EqualServiceConfigForTesting is for testing service config generation and
|
||||||
// parsing. Both a and b should be returned by ParseServiceConfig.
|
// parsing. Both a and b should be returned by ParseServiceConfig.
|
||||||
// This function compares the config without rawJSON stripped, in case the
|
// This function compares the config without rawJSON stripped, in case the
|
||||||
|
@ -49,33 +53,33 @@ var (
|
||||||
// given name. This is set by package certprovider for use from xDS
|
// given name. This is set by package certprovider for use from xDS
|
||||||
// bootstrap code while parsing certificate provider configs in the
|
// bootstrap code while parsing certificate provider configs in the
|
||||||
// bootstrap file.
|
// bootstrap file.
|
||||||
GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder
|
GetCertificateProviderBuilder any // func(string) certprovider.Builder
|
||||||
// GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
|
// GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
|
||||||
// stored in the passed in attributes. This is set by
|
// stored in the passed in attributes. This is set by
|
||||||
// credentials/xds/xds.go.
|
// credentials/xds/xds.go.
|
||||||
GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo
|
GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo
|
||||||
// GetServerCredentials returns the transport credentials configured on a
|
// GetServerCredentials returns the transport credentials configured on a
|
||||||
// gRPC server. An xDS-enabled server needs to know what type of credentials
|
// gRPC server. An xDS-enabled server needs to know what type of credentials
|
||||||
// is configured on the underlying gRPC server. This is set by server.go.
|
// is configured on the underlying gRPC server. This is set by server.go.
|
||||||
GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials
|
GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials
|
||||||
// CanonicalString returns the canonical string of the code defined here:
|
// CanonicalString returns the canonical string of the code defined here:
|
||||||
// https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.
|
// https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.
|
||||||
//
|
//
|
||||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||||
// deleted or changed.
|
// deleted or changed.
|
||||||
CanonicalString interface{} // func (codes.Code) string
|
CanonicalString any // func (codes.Code) string
|
||||||
// DrainServerTransports initiates a graceful close of existing connections
|
// DrainServerTransports initiates a graceful close of existing connections
|
||||||
// on a gRPC server accepted on the provided listener address. An
|
// on a gRPC server accepted on the provided listener address. An
|
||||||
// xDS-enabled server invokes this method on a grpc.Server when a particular
|
// xDS-enabled server invokes this method on a grpc.Server when a particular
|
||||||
// listener moves to "not-serving" mode.
|
// listener moves to "not-serving" mode.
|
||||||
DrainServerTransports interface{} // func(*grpc.Server, string)
|
DrainServerTransports any // func(*grpc.Server, string)
|
||||||
// AddGlobalServerOptions adds an array of ServerOption that will be
|
// AddGlobalServerOptions adds an array of ServerOption that will be
|
||||||
// effective globally for newly created servers. The priority will be: 1.
|
// effective globally for newly created servers. The priority will be: 1.
|
||||||
// user-provided; 2. this method; 3. default values.
|
// user-provided; 2. this method; 3. default values.
|
||||||
//
|
//
|
||||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||||
// deleted or changed.
|
// deleted or changed.
|
||||||
AddGlobalServerOptions interface{} // func(opt ...ServerOption)
|
AddGlobalServerOptions any // func(opt ...ServerOption)
|
||||||
// ClearGlobalServerOptions clears the array of extra ServerOption. This
|
// ClearGlobalServerOptions clears the array of extra ServerOption. This
|
||||||
// method is useful in testing and benchmarking.
|
// method is useful in testing and benchmarking.
|
||||||
//
|
//
|
||||||
|
@ -88,14 +92,14 @@ var (
|
||||||
//
|
//
|
||||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||||
// deleted or changed.
|
// deleted or changed.
|
||||||
AddGlobalDialOptions interface{} // func(opt ...DialOption)
|
AddGlobalDialOptions any // func(opt ...DialOption)
|
||||||
// DisableGlobalDialOptions returns a DialOption that prevents the
|
// DisableGlobalDialOptions returns a DialOption that prevents the
|
||||||
// ClientConn from applying the global DialOptions (set via
|
// ClientConn from applying the global DialOptions (set via
|
||||||
// AddGlobalDialOptions).
|
// AddGlobalDialOptions).
|
||||||
//
|
//
|
||||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||||
// deleted or changed.
|
// deleted or changed.
|
||||||
DisableGlobalDialOptions interface{} // func() grpc.DialOption
|
DisableGlobalDialOptions any // func() grpc.DialOption
|
||||||
// ClearGlobalDialOptions clears the array of extra DialOption. This
|
// ClearGlobalDialOptions clears the array of extra DialOption. This
|
||||||
// method is useful in testing and benchmarking.
|
// method is useful in testing and benchmarking.
|
||||||
//
|
//
|
||||||
|
@ -104,23 +108,26 @@ var (
|
||||||
ClearGlobalDialOptions func()
|
ClearGlobalDialOptions func()
|
||||||
// JoinDialOptions combines the dial options passed as arguments into a
|
// JoinDialOptions combines the dial options passed as arguments into a
|
||||||
// single dial option.
|
// single dial option.
|
||||||
JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption
|
JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption
|
||||||
// JoinServerOptions combines the server options passed as arguments into a
|
// JoinServerOptions combines the server options passed as arguments into a
|
||||||
// single server option.
|
// single server option.
|
||||||
JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption
|
JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption
|
||||||
|
|
||||||
// WithBinaryLogger returns a DialOption that specifies the binary logger
|
// WithBinaryLogger returns a DialOption that specifies the binary logger
|
||||||
// for a ClientConn.
|
// for a ClientConn.
|
||||||
//
|
//
|
||||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||||
// deleted or changed.
|
// deleted or changed.
|
||||||
WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption
|
WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption
|
||||||
// BinaryLogger returns a ServerOption that can set the binary logger for a
|
// BinaryLogger returns a ServerOption that can set the binary logger for a
|
||||||
// server.
|
// server.
|
||||||
//
|
//
|
||||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||||
// deleted or changed.
|
// deleted or changed.
|
||||||
BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption
|
BinaryLogger any // func(binarylog.Logger) grpc.ServerOption
|
||||||
|
|
||||||
|
// SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn
|
||||||
|
SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber)
|
||||||
|
|
||||||
// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
|
// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
|
||||||
// the provided xds bootstrap config instead of the global configuration from
|
// the provided xds bootstrap config instead of the global configuration from
|
||||||
|
@ -131,7 +138,7 @@ var (
|
||||||
//
|
//
|
||||||
// This function should ONLY be used for testing and may not work with some
|
// This function should ONLY be used for testing and may not work with some
|
||||||
// other features, including the CSDS service.
|
// other features, including the CSDS service.
|
||||||
NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error)
|
NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error)
|
||||||
|
|
||||||
// RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
|
// RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
|
||||||
// Specifier Plugin for testing purposes, regardless of the XDSRLS environment
|
// Specifier Plugin for testing purposes, regardless of the XDSRLS environment
|
||||||
|
@ -163,7 +170,11 @@ var (
|
||||||
UnregisterRBACHTTPFilterForTesting func()
|
UnregisterRBACHTTPFilterForTesting func()
|
||||||
|
|
||||||
// ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY.
|
// ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY.
|
||||||
ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions)
|
ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions)
|
||||||
|
|
||||||
|
// GRPCResolverSchemeExtraMetadata determines when gRPC will add extra
|
||||||
|
// metadata to RPCs.
|
||||||
|
GRPCResolverSchemeExtraMetadata string = "xds"
|
||||||
)
|
)
|
||||||
|
|
||||||
// HealthChecker defines the signature of the client-side LB channel health checking function.
|
// HealthChecker defines the signature of the client-side LB channel health checking function.
|
||||||
|
@ -174,7 +185,7 @@ var (
|
||||||
//
|
//
|
||||||
// The health checking protocol is defined at:
|
// The health checking protocol is defined at:
|
||||||
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
|
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
|
||||||
type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error
|
type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
|
// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
|
||||||
|
|
|
@ -35,7 +35,7 @@ const mdKey = mdKeyType("grpc.internal.address.metadata")
|
||||||
|
|
||||||
type mdValue metadata.MD
|
type mdValue metadata.MD
|
||||||
|
|
||||||
func (m mdValue) Equal(o interface{}) bool {
|
func (m mdValue) Equal(o any) bool {
|
||||||
om, ok := o.(mdValue)
|
om, ok := o.(mdValue)
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
|
|
|
@ -35,7 +35,7 @@ const jsonIndent = " "
|
||||||
// ToJSON marshals the input into a json string.
|
// ToJSON marshals the input into a json string.
|
||||||
//
|
//
|
||||||
// If marshal fails, it falls back to fmt.Sprintf("%+v").
|
// If marshal fails, it falls back to fmt.Sprintf("%+v").
|
||||||
func ToJSON(e interface{}) string {
|
func ToJSON(e any) string {
|
||||||
switch ee := e.(type) {
|
switch ee := e.(type) {
|
||||||
case protov1.Message:
|
case protov1.Message:
|
||||||
mm := jsonpb.Marshaler{Indent: jsonIndent}
|
mm := jsonpb.Marshaler{Indent: jsonIndent}
|
||||||
|
|
|
@ -92,7 +92,7 @@ type ClientStream interface {
|
||||||
// calling RecvMsg on the same stream at the same time, but it is not safe
|
// calling RecvMsg on the same stream at the same time, but it is not safe
|
||||||
// to call SendMsg on the same stream in different goroutines. It is also
|
// to call SendMsg on the same stream in different goroutines. It is also
|
||||||
// not safe to call CloseSend concurrently with SendMsg.
|
// not safe to call CloseSend concurrently with SendMsg.
|
||||||
SendMsg(m interface{}) error
|
SendMsg(m any) error
|
||||||
// RecvMsg blocks until it receives a message into m or the stream is
|
// RecvMsg blocks until it receives a message into m or the stream is
|
||||||
// done. It returns io.EOF when the stream completes successfully. On
|
// done. It returns io.EOF when the stream completes successfully. On
|
||||||
// any other error, the stream is aborted and the error contains the RPC
|
// any other error, the stream is aborted and the error contains the RPC
|
||||||
|
@ -101,7 +101,7 @@ type ClientStream interface {
|
||||||
// It is safe to have a goroutine calling SendMsg and another goroutine
|
// It is safe to have a goroutine calling SendMsg and another goroutine
|
||||||
// calling RecvMsg on the same stream at the same time, but it is not
|
// calling RecvMsg on the same stream at the same time, but it is not
|
||||||
// safe to call RecvMsg on the same stream in different goroutines.
|
// safe to call RecvMsg on the same stream in different goroutines.
|
||||||
RecvMsg(m interface{}) error
|
RecvMsg(m any) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClientInterceptor is an interceptor for gRPC client streams.
|
// ClientInterceptor is an interceptor for gRPC client streams.
|
||||||
|
|
|
@ -62,7 +62,8 @@ const (
|
||||||
defaultPort = "443"
|
defaultPort = "443"
|
||||||
defaultDNSSvrPort = "53"
|
defaultDNSSvrPort = "53"
|
||||||
golang = "GO"
|
golang = "GO"
|
||||||
// txtPrefix is the prefix string to be prepended to the host name for txt record lookup.
|
// txtPrefix is the prefix string to be prepended to the host name for txt
|
||||||
|
// record lookup.
|
||||||
txtPrefix = "_grpc_config."
|
txtPrefix = "_grpc_config."
|
||||||
// In DNS, service config is encoded in a TXT record via the mechanism
|
// In DNS, service config is encoded in a TXT record via the mechanism
|
||||||
// described in RFC-1464 using the attribute name grpc_config.
|
// described in RFC-1464 using the attribute name grpc_config.
|
||||||
|
@ -86,14 +87,14 @@ var (
|
||||||
minDNSResRate = 30 * time.Second
|
minDNSResRate = 30 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) {
|
var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) {
|
||||||
return func(ctx context.Context, network, address string) (net.Conn, error) {
|
return func(ctx context.Context, network, _ string) (net.Conn, error) {
|
||||||
var dialer net.Dialer
|
var dialer net.Dialer
|
||||||
return dialer.DialContext(ctx, network, authority)
|
return dialer.DialContext(ctx, network, address)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var customAuthorityResolver = func(authority string) (netResolver, error) {
|
var newNetResolver = func(authority string) (netResolver, error) {
|
||||||
host, port, err := parseTarget(authority, defaultDNSSvrPort)
|
host, port, err := parseTarget(authority, defaultDNSSvrPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -103,7 +104,7 @@ var customAuthorityResolver = func(authority string) (netResolver, error) {
|
||||||
|
|
||||||
return &net.Resolver{
|
return &net.Resolver{
|
||||||
PreferGo: true,
|
PreferGo: true,
|
||||||
Dial: customAuthorityDialler(authorityWithPort),
|
Dial: addressDialer(authorityWithPort),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,7 +115,8 @@ func NewBuilder() resolver.Builder {
|
||||||
|
|
||||||
type dnsBuilder struct{}
|
type dnsBuilder struct{}
|
||||||
|
|
||||||
// Build creates and starts a DNS resolver that watches the name resolution of the target.
|
// Build creates and starts a DNS resolver that watches the name resolution of
|
||||||
|
// the target.
|
||||||
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
||||||
host, port, err := parseTarget(target.Endpoint(), defaultPort)
|
host, port, err := parseTarget(target.Endpoint(), defaultPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -143,7 +145,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
|
||||||
if target.URL.Host == "" {
|
if target.URL.Host == "" {
|
||||||
d.resolver = defaultResolver
|
d.resolver = defaultResolver
|
||||||
} else {
|
} else {
|
||||||
d.resolver, err = customAuthorityResolver(target.URL.Host)
|
d.resolver, err = newNetResolver(target.URL.Host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -180,19 +182,22 @@ type dnsResolver struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
cc resolver.ClientConn
|
cc resolver.ClientConn
|
||||||
// rn channel is used by ResolveNow() to force an immediate resolution of the target.
|
// rn channel is used by ResolveNow() to force an immediate resolution of the
|
||||||
|
// target.
|
||||||
rn chan struct{}
|
rn chan struct{}
|
||||||
// wg is used to enforce Close() to return after the watcher() goroutine has finished.
|
// wg is used to enforce Close() to return after the watcher() goroutine has
|
||||||
// Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
|
// finished. Otherwise, data race will be possible. [Race Example] in
|
||||||
// replace the real lookup functions with mocked ones to facilitate testing.
|
// dns_resolver_test we replace the real lookup functions with mocked ones to
|
||||||
// If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
|
// facilitate testing. If Close() doesn't wait for watcher() goroutine
|
||||||
// will warns lookup (READ the lookup function pointers) inside watcher() goroutine
|
// finishes, race detector sometimes will warns lookup (READ the lookup
|
||||||
// has data race with replaceNetFunc (WRITE the lookup function pointers).
|
// function pointers) inside watcher() goroutine has data race with
|
||||||
|
// replaceNetFunc (WRITE the lookup function pointers).
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
disableServiceConfig bool
|
disableServiceConfig bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
|
// ResolveNow invoke an immediate resolution of the target that this
|
||||||
|
// dnsResolver watches.
|
||||||
func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) {
|
func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) {
|
||||||
select {
|
select {
|
||||||
case d.rn <- struct{}{}:
|
case d.rn <- struct{}{}:
|
||||||
|
@ -220,8 +225,8 @@ func (d *dnsResolver) watcher() {
|
||||||
|
|
||||||
var timer *time.Timer
|
var timer *time.Timer
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least
|
// Success resolving, wait for the next ResolveNow. However, also wait 30
|
||||||
// to prevent constantly re-resolving.
|
// seconds at the very least to prevent constantly re-resolving.
|
||||||
backoffIndex = 1
|
backoffIndex = 1
|
||||||
timer = newTimerDNSResRate(minDNSResRate)
|
timer = newTimerDNSResRate(minDNSResRate)
|
||||||
select {
|
select {
|
||||||
|
@ -231,7 +236,8 @@ func (d *dnsResolver) watcher() {
|
||||||
case <-d.rn:
|
case <-d.rn:
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Poll on an error found in DNS Resolver or an error received from ClientConn.
|
// Poll on an error found in DNS Resolver or an error received from
|
||||||
|
// ClientConn.
|
||||||
timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex))
|
timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex))
|
||||||
backoffIndex++
|
backoffIndex++
|
||||||
}
|
}
|
||||||
|
@ -278,7 +284,8 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleDNSError(err error, lookupType string) error {
|
func handleDNSError(err error, lookupType string) error {
|
||||||
if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
|
dnsErr, ok := err.(*net.DNSError)
|
||||||
|
if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
|
||||||
// Timeouts and temporary errors should be communicated to gRPC to
|
// Timeouts and temporary errors should be communicated to gRPC to
|
||||||
// attempt another DNS query (with backoff). Other errors should be
|
// attempt another DNS query (with backoff). Other errors should be
|
||||||
// suppressed (they may represent the absence of a TXT record).
|
// suppressed (they may represent the absence of a TXT record).
|
||||||
|
@ -307,10 +314,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
|
||||||
res += s
|
res += s
|
||||||
}
|
}
|
||||||
|
|
||||||
// TXT record must have "grpc_config=" attribute in order to be used as service config.
|
// TXT record must have "grpc_config=" attribute in order to be used as
|
||||||
|
// service config.
|
||||||
if !strings.HasPrefix(res, txtAttribute) {
|
if !strings.HasPrefix(res, txtAttribute) {
|
||||||
logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
|
logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
|
||||||
// This is not an error; it is the equivalent of not having a service config.
|
// This is not an error; it is the equivalent of not having a service
|
||||||
|
// config.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
sc := canaryingSC(strings.TrimPrefix(res, txtAttribute))
|
sc := canaryingSC(strings.TrimPrefix(res, txtAttribute))
|
||||||
|
@ -352,9 +361,10 @@ func (d *dnsResolver) lookup() (*resolver.State, error) {
|
||||||
return &state, nil
|
return &state, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
|
// formatIP returns ok = false if addr is not a valid textual representation of
|
||||||
// If addr is an IPv4 address, return the addr and ok = true.
|
// an IP address. If addr is an IPv4 address, return the addr and ok = true.
|
||||||
// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
|
// If addr is an IPv6 address, return the addr enclosed in square brackets and
|
||||||
|
// ok = true.
|
||||||
func formatIP(addr string) (addrIP string, ok bool) {
|
func formatIP(addr string) (addrIP string, ok bool) {
|
||||||
ip := net.ParseIP(addr)
|
ip := net.ParseIP(addr)
|
||||||
if ip == nil {
|
if ip == nil {
|
||||||
|
@ -366,10 +376,10 @@ func formatIP(addr string) (addrIP string, ok bool) {
|
||||||
return "[" + addr + "]", true
|
return "[" + addr + "]", true
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseTarget takes the user input target string and default port, returns formatted host and port info.
|
// parseTarget takes the user input target string and default port, returns
|
||||||
// If target doesn't specify a port, set the port to be the defaultPort.
|
// formatted host and port info. If target doesn't specify a port, set the port
|
||||||
// If target is in IPv6 format and host-name is enclosed in square brackets, brackets
|
// to be the defaultPort. If target is in IPv6 format and host-name is enclosed
|
||||||
// are stripped when setting the host.
|
// in square brackets, brackets are stripped when setting the host.
|
||||||
// examples:
|
// examples:
|
||||||
// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443"
|
// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443"
|
||||||
// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80"
|
// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80"
|
||||||
|
@ -385,12 +395,14 @@ func parseTarget(target, defaultPort string) (host, port string, err error) {
|
||||||
}
|
}
|
||||||
if host, port, err = net.SplitHostPort(target); err == nil {
|
if host, port, err = net.SplitHostPort(target); err == nil {
|
||||||
if port == "" {
|
if port == "" {
|
||||||
// If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error.
|
// If the port field is empty (target ends with colon), e.g. "[::1]:",
|
||||||
|
// this is an error.
|
||||||
return "", "", errEndsWithColon
|
return "", "", errEndsWithColon
|
||||||
}
|
}
|
||||||
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
|
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
|
||||||
if host == "" {
|
if host == "" {
|
||||||
// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
|
// Keep consistent with net.Dial(): If the host is empty, as in ":80",
|
||||||
|
// the local system is assumed.
|
||||||
host = "localhost"
|
host = "localhost"
|
||||||
}
|
}
|
||||||
return host, port, nil
|
return host, port, nil
|
||||||
|
|
|
@ -49,7 +49,7 @@ func New(c codes.Code, msg string) *Status {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Newf returns New(c, fmt.Sprintf(format, a...)).
|
// Newf returns New(c, fmt.Sprintf(format, a...)).
|
||||||
func Newf(c codes.Code, format string, a ...interface{}) *Status {
|
func Newf(c codes.Code, format string, a ...any) *Status {
|
||||||
return New(c, fmt.Sprintf(format, a...))
|
return New(c, fmt.Sprintf(format, a...))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ func Err(c codes.Code, msg string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errorf returns Error(c, fmt.Sprintf(format, a...)).
|
// Errorf returns Error(c, fmt.Sprintf(format, a...)).
|
||||||
func Errorf(c codes.Code, format string, a ...interface{}) error {
|
func Errorf(c codes.Code, format string, a ...any) error {
|
||||||
return Err(c, fmt.Sprintf(format, a...))
|
return Err(c, fmt.Sprintf(format, a...))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,11 +120,11 @@ func (s *Status) WithDetails(details ...proto.Message) (*Status, error) {
|
||||||
|
|
||||||
// Details returns a slice of details messages attached to the status.
|
// Details returns a slice of details messages attached to the status.
|
||||||
// If a detail cannot be decoded, the error is returned in place of the detail.
|
// If a detail cannot be decoded, the error is returned in place of the detail.
|
||||||
func (s *Status) Details() []interface{} {
|
func (s *Status) Details() []any {
|
||||||
if s == nil || s.s == nil {
|
if s == nil || s.s == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
details := make([]interface{}, 0, len(s.s.Details))
|
details := make([]any, 0, len(s.s.Details))
|
||||||
for _, any := range s.s.Details {
|
for _, any := range s.s.Details {
|
||||||
detail := &ptypes.DynamicAny{}
|
detail := &ptypes.DynamicAny{}
|
||||||
if err := ptypes.UnmarshalAny(any, detail); err != nil {
|
if err := ptypes.UnmarshalAny(any, detail); err != nil {
|
||||||
|
|
|
@ -40,7 +40,7 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
type itemNode struct {
|
type itemNode struct {
|
||||||
it interface{}
|
it any
|
||||||
next *itemNode
|
next *itemNode
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ type itemList struct {
|
||||||
tail *itemNode
|
tail *itemNode
|
||||||
}
|
}
|
||||||
|
|
||||||
func (il *itemList) enqueue(i interface{}) {
|
func (il *itemList) enqueue(i any) {
|
||||||
n := &itemNode{it: i}
|
n := &itemNode{it: i}
|
||||||
if il.tail == nil {
|
if il.tail == nil {
|
||||||
il.head, il.tail = n, n
|
il.head, il.tail = n, n
|
||||||
|
@ -61,11 +61,11 @@ func (il *itemList) enqueue(i interface{}) {
|
||||||
|
|
||||||
// peek returns the first item in the list without removing it from the
|
// peek returns the first item in the list without removing it from the
|
||||||
// list.
|
// list.
|
||||||
func (il *itemList) peek() interface{} {
|
func (il *itemList) peek() any {
|
||||||
return il.head.it
|
return il.head.it
|
||||||
}
|
}
|
||||||
|
|
||||||
func (il *itemList) dequeue() interface{} {
|
func (il *itemList) dequeue() any {
|
||||||
if il.head == nil {
|
if il.head == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -336,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) {
|
func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) {
|
||||||
var wakeUp bool
|
var wakeUp bool
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
if c.err != nil {
|
if c.err != nil {
|
||||||
|
@ -373,7 +373,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note argument f should never be nil.
|
// Note argument f should never be nil.
|
||||||
func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) {
|
func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
if c.err != nil {
|
if c.err != nil {
|
||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
|
@ -387,7 +387,7 @@ func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bo
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *controlBuffer) get(block bool) (interface{}, error) {
|
func (c *controlBuffer) get(block bool) (any, error) {
|
||||||
for {
|
for {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
if c.err != nil {
|
if c.err != nil {
|
||||||
|
@ -830,7 +830,7 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *loopyWriter) handle(i interface{}) error {
|
func (l *loopyWriter) handle(i any) error {
|
||||||
switch i := i.(type) {
|
switch i := i.(type) {
|
||||||
case *incomingWindowUpdate:
|
case *incomingWindowUpdate:
|
||||||
l.incomingWindowUpdateHandler(i)
|
l.incomingWindowUpdateHandler(i)
|
||||||
|
|
|
@ -330,7 +330,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||||
readerDone: make(chan struct{}),
|
readerDone: make(chan struct{}),
|
||||||
writerDone: make(chan struct{}),
|
writerDone: make(chan struct{}),
|
||||||
goAway: make(chan struct{}),
|
goAway: make(chan struct{}),
|
||||||
framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize),
|
framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize),
|
||||||
fc: &trInFlow{limit: uint32(icwz)},
|
fc: &trInFlow{limit: uint32(icwz)},
|
||||||
scheme: scheme,
|
scheme: scheme,
|
||||||
activeStreams: make(map[uint32]*Stream),
|
activeStreams: make(map[uint32]*Stream),
|
||||||
|
@ -762,7 +762,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||||||
firstTry := true
|
firstTry := true
|
||||||
var ch chan struct{}
|
var ch chan struct{}
|
||||||
transportDrainRequired := false
|
transportDrainRequired := false
|
||||||
checkForStreamQuota := func(it interface{}) bool {
|
checkForStreamQuota := func(it any) bool {
|
||||||
if t.streamQuota <= 0 { // Can go negative if server decreases it.
|
if t.streamQuota <= 0 { // Can go negative if server decreases it.
|
||||||
if firstTry {
|
if firstTry {
|
||||||
t.waitingStreams++
|
t.waitingStreams++
|
||||||
|
@ -800,7 +800,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
var hdrListSizeErr error
|
var hdrListSizeErr error
|
||||||
checkForHeaderListSize := func(it interface{}) bool {
|
checkForHeaderListSize := func(it any) bool {
|
||||||
if t.maxSendHeaderListSize == nil {
|
if t.maxSendHeaderListSize == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -815,7 +815,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
success, err := t.controlBuf.executeAndPut(func(it interface{}) bool {
|
success, err := t.controlBuf.executeAndPut(func(it any) bool {
|
||||||
return checkForHeaderListSize(it) && checkForStreamQuota(it)
|
return checkForHeaderListSize(it) && checkForStreamQuota(it)
|
||||||
}, hdr)
|
}, hdr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -927,7 +927,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
|
||||||
rst: rst,
|
rst: rst,
|
||||||
rstCode: rstCode,
|
rstCode: rstCode,
|
||||||
}
|
}
|
||||||
addBackStreamQuota := func(interface{}) bool {
|
addBackStreamQuota := func(any) bool {
|
||||||
t.streamQuota++
|
t.streamQuota++
|
||||||
if t.streamQuota > 0 && t.waitingStreams > 0 {
|
if t.streamQuota > 0 && t.waitingStreams > 0 {
|
||||||
select {
|
select {
|
||||||
|
@ -1080,7 +1080,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) {
|
||||||
// for the transport and the stream based on the current bdp
|
// for the transport and the stream based on the current bdp
|
||||||
// estimation.
|
// estimation.
|
||||||
func (t *http2Client) updateFlowControl(n uint32) {
|
func (t *http2Client) updateFlowControl(n uint32) {
|
||||||
updateIWS := func(interface{}) bool {
|
updateIWS := func(any) bool {
|
||||||
t.initialWindowSize = int32(n)
|
t.initialWindowSize = int32(n)
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
for _, s := range t.activeStreams {
|
for _, s := range t.activeStreams {
|
||||||
|
@ -1233,7 +1233,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
|
||||||
}
|
}
|
||||||
updateFuncs = append(updateFuncs, updateStreamQuota)
|
updateFuncs = append(updateFuncs, updateStreamQuota)
|
||||||
}
|
}
|
||||||
t.controlBuf.executeAndPut(func(interface{}) bool {
|
t.controlBuf.executeAndPut(func(any) bool {
|
||||||
for _, f := range updateFuncs {
|
for _, f := range updateFuncs {
|
||||||
f()
|
f()
|
||||||
}
|
}
|
||||||
|
@ -1505,14 +1505,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
isHeader := false
|
// For headers, set them in s.header and close headerChan. For trailers or
|
||||||
|
// trailers-only, closeStream will set the trailers and close headerChan as
|
||||||
// If headerChan hasn't been closed yet
|
// needed.
|
||||||
if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
|
if !endStream {
|
||||||
s.headerValid = true
|
// If headerChan hasn't been closed yet (expected, given we checked it
|
||||||
if !endStream {
|
// above, but something else could have potentially closed the whole
|
||||||
// HEADERS frame block carries a Response-Headers.
|
// stream).
|
||||||
isHeader = true
|
if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
|
||||||
|
s.headerValid = true
|
||||||
// These values can be set without any synchronization because
|
// These values can be set without any synchronization because
|
||||||
// stream goroutine will read it only after seeing a closed
|
// stream goroutine will read it only after seeing a closed
|
||||||
// headerChan which we'll close after setting this.
|
// headerChan which we'll close after setting this.
|
||||||
|
@ -1520,15 +1521,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||||
if len(mdata) > 0 {
|
if len(mdata) > 0 {
|
||||||
s.header = mdata
|
s.header = mdata
|
||||||
}
|
}
|
||||||
} else {
|
close(s.headerChan)
|
||||||
// HEADERS frame block carries a Trailers-Only.
|
|
||||||
s.noHeaders = true
|
|
||||||
}
|
}
|
||||||
close(s.headerChan)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, sh := range t.statsHandlers {
|
for _, sh := range t.statsHandlers {
|
||||||
if isHeader {
|
if !endStream {
|
||||||
inHeader := &stats.InHeader{
|
inHeader := &stats.InHeader{
|
||||||
Client: true,
|
Client: true,
|
||||||
WireLength: int(frame.Header().Length),
|
WireLength: int(frame.Header().Length),
|
||||||
|
@ -1554,9 +1552,10 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||||
statusGen = status.New(rawStatusCode, grpcMessage)
|
statusGen = status.New(rawStatusCode, grpcMessage)
|
||||||
}
|
}
|
||||||
|
|
||||||
// if client received END_STREAM from server while stream was still active, send RST_STREAM
|
// If client received END_STREAM from server while stream was still active,
|
||||||
rst := s.getState() == streamActive
|
// send RST_STREAM.
|
||||||
t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true)
|
rstStream := s.getState() == streamActive
|
||||||
|
t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, statusGen, mdata, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// readServerPreface reads and handles the initial settings frame from the
|
// readServerPreface reads and handles the initial settings frame from the
|
||||||
|
|
|
@ -165,7 +165,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||||
if config.MaxHeaderListSize != nil {
|
if config.MaxHeaderListSize != nil {
|
||||||
maxHeaderListSize = *config.MaxHeaderListSize
|
maxHeaderListSize = *config.MaxHeaderListSize
|
||||||
}
|
}
|
||||||
framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize)
|
framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize)
|
||||||
// Send initial settings as connection preface to client.
|
// Send initial settings as connection preface to client.
|
||||||
isettings := []http2.Setting{{
|
isettings := []http2.Setting{{
|
||||||
ID: http2.SettingMaxFrameSize,
|
ID: http2.SettingMaxFrameSize,
|
||||||
|
@ -233,7 +233,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||||
kp.Timeout = defaultServerKeepaliveTimeout
|
kp.Timeout = defaultServerKeepaliveTimeout
|
||||||
}
|
}
|
||||||
if kp.Time != infinity {
|
if kp.Time != infinity {
|
||||||
if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
|
if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil {
|
||||||
return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
|
return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -850,7 +850,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
t.controlBuf.executeAndPut(func(interface{}) bool {
|
t.controlBuf.executeAndPut(func(any) bool {
|
||||||
for _, f := range updateFuncs {
|
for _, f := range updateFuncs {
|
||||||
f()
|
f()
|
||||||
}
|
}
|
||||||
|
@ -934,7 +934,7 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD)
|
||||||
return headerFields
|
return headerFields
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
|
func (t *http2Server) checkForHeaderListSize(it any) bool {
|
||||||
if t.maxSendHeaderListSize == nil {
|
if t.maxSendHeaderListSize == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,6 +30,7 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
|
@ -309,6 +310,7 @@ func decodeGrpcMessageUnchecked(msg string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type bufWriter struct {
|
type bufWriter struct {
|
||||||
|
pool *sync.Pool
|
||||||
buf []byte
|
buf []byte
|
||||||
offset int
|
offset int
|
||||||
batchSize int
|
batchSize int
|
||||||
|
@ -316,12 +318,17 @@ type bufWriter struct {
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func newBufWriter(conn net.Conn, batchSize int) *bufWriter {
|
func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter {
|
||||||
return &bufWriter{
|
w := &bufWriter{
|
||||||
buf: make([]byte, batchSize*2),
|
|
||||||
batchSize: batchSize,
|
batchSize: batchSize,
|
||||||
conn: conn,
|
conn: conn,
|
||||||
|
pool: pool,
|
||||||
}
|
}
|
||||||
|
// this indicates that we should use non shared buf
|
||||||
|
if pool == nil {
|
||||||
|
w.buf = make([]byte, batchSize)
|
||||||
|
}
|
||||||
|
return w
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *bufWriter) Write(b []byte) (n int, err error) {
|
func (w *bufWriter) Write(b []byte) (n int, err error) {
|
||||||
|
@ -332,19 +339,34 @@ func (w *bufWriter) Write(b []byte) (n int, err error) {
|
||||||
n, err = w.conn.Write(b)
|
n, err = w.conn.Write(b)
|
||||||
return n, toIOError(err)
|
return n, toIOError(err)
|
||||||
}
|
}
|
||||||
|
if w.buf == nil {
|
||||||
|
b := w.pool.Get().(*[]byte)
|
||||||
|
w.buf = *b
|
||||||
|
}
|
||||||
for len(b) > 0 {
|
for len(b) > 0 {
|
||||||
nn := copy(w.buf[w.offset:], b)
|
nn := copy(w.buf[w.offset:], b)
|
||||||
b = b[nn:]
|
b = b[nn:]
|
||||||
w.offset += nn
|
w.offset += nn
|
||||||
n += nn
|
n += nn
|
||||||
if w.offset >= w.batchSize {
|
if w.offset >= w.batchSize {
|
||||||
err = w.Flush()
|
err = w.flushKeepBuffer()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *bufWriter) Flush() error {
|
func (w *bufWriter) Flush() error {
|
||||||
|
err := w.flushKeepBuffer()
|
||||||
|
// Only release the buffer if we are in a "shared" mode
|
||||||
|
if w.buf != nil && w.pool != nil {
|
||||||
|
b := w.buf
|
||||||
|
w.pool.Put(&b)
|
||||||
|
w.buf = nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *bufWriter) flushKeepBuffer() error {
|
||||||
if w.err != nil {
|
if w.err != nil {
|
||||||
return w.err
|
return w.err
|
||||||
}
|
}
|
||||||
|
@ -381,7 +403,10 @@ type framer struct {
|
||||||
fr *http2.Framer
|
fr *http2.Framer
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer {
|
var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool)
|
||||||
|
var writeBufferMutex sync.Mutex
|
||||||
|
|
||||||
|
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer {
|
||||||
if writeBufferSize < 0 {
|
if writeBufferSize < 0 {
|
||||||
writeBufferSize = 0
|
writeBufferSize = 0
|
||||||
}
|
}
|
||||||
|
@ -389,7 +414,11 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList
|
||||||
if readBufferSize > 0 {
|
if readBufferSize > 0 {
|
||||||
r = bufio.NewReaderSize(r, readBufferSize)
|
r = bufio.NewReaderSize(r, readBufferSize)
|
||||||
}
|
}
|
||||||
w := newBufWriter(conn, writeBufferSize)
|
var pool *sync.Pool
|
||||||
|
if sharedWriteBuffer {
|
||||||
|
pool = getWriteBufferPool(writeBufferSize)
|
||||||
|
}
|
||||||
|
w := newBufWriter(conn, writeBufferSize, pool)
|
||||||
f := &framer{
|
f := &framer{
|
||||||
writer: w,
|
writer: w,
|
||||||
fr: http2.NewFramer(w, r),
|
fr: http2.NewFramer(w, r),
|
||||||
|
@ -403,6 +432,24 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList
|
||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getWriteBufferPool(writeBufferSize int) *sync.Pool {
|
||||||
|
writeBufferMutex.Lock()
|
||||||
|
defer writeBufferMutex.Unlock()
|
||||||
|
size := writeBufferSize * 2
|
||||||
|
pool, ok := writeBufferPoolMap[size]
|
||||||
|
if ok {
|
||||||
|
return pool
|
||||||
|
}
|
||||||
|
pool = &sync.Pool{
|
||||||
|
New: func() any {
|
||||||
|
b := make([]byte, size)
|
||||||
|
return &b
|
||||||
|
},
|
||||||
|
}
|
||||||
|
writeBufferPoolMap[size] = pool
|
||||||
|
return pool
|
||||||
|
}
|
||||||
|
|
||||||
// parseDialTarget returns the network and address to pass to dialer.
|
// parseDialTarget returns the network and address to pass to dialer.
|
||||||
func parseDialTarget(target string) (string, string) {
|
func parseDialTarget(target string) (string, string) {
|
||||||
net := "tcp"
|
net := "tcp"
|
||||||
|
|
|
@ -43,10 +43,6 @@ import (
|
||||||
"google.golang.org/grpc/tap"
|
"google.golang.org/grpc/tap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrNoHeaders is used as a signal that a trailers only response was received,
|
|
||||||
// and is not a real error.
|
|
||||||
var ErrNoHeaders = errors.New("stream has no headers")
|
|
||||||
|
|
||||||
const logLevel = 2
|
const logLevel = 2
|
||||||
|
|
||||||
type bufferPool struct {
|
type bufferPool struct {
|
||||||
|
@ -56,7 +52,7 @@ type bufferPool struct {
|
||||||
func newBufferPool() *bufferPool {
|
func newBufferPool() *bufferPool {
|
||||||
return &bufferPool{
|
return &bufferPool{
|
||||||
pool: sync.Pool{
|
pool: sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() any {
|
||||||
return new(bytes.Buffer)
|
return new(bytes.Buffer)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -390,14 +386,10 @@ func (s *Stream) Header() (metadata.MD, error) {
|
||||||
}
|
}
|
||||||
s.waitOnHeader()
|
s.waitOnHeader()
|
||||||
|
|
||||||
if !s.headerValid {
|
if !s.headerValid || s.noHeaders {
|
||||||
return nil, s.status.Err()
|
return nil, s.status.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.noHeaders {
|
|
||||||
return nil, ErrNoHeaders
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.header.Copy(), nil
|
return s.header.Copy(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -559,6 +551,7 @@ type ServerConfig struct {
|
||||||
InitialConnWindowSize int32
|
InitialConnWindowSize int32
|
||||||
WriteBufferSize int
|
WriteBufferSize int
|
||||||
ReadBufferSize int
|
ReadBufferSize int
|
||||||
|
SharedWriteBuffer bool
|
||||||
ChannelzParentID *channelz.Identifier
|
ChannelzParentID *channelz.Identifier
|
||||||
MaxHeaderListSize *uint32
|
MaxHeaderListSize *uint32
|
||||||
HeaderTableSize *uint32
|
HeaderTableSize *uint32
|
||||||
|
@ -592,6 +585,8 @@ type ConnectOptions struct {
|
||||||
WriteBufferSize int
|
WriteBufferSize int
|
||||||
// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
|
// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
|
||||||
ReadBufferSize int
|
ReadBufferSize int
|
||||||
|
// SharedWriteBuffer indicates whether connections should reuse write buffer
|
||||||
|
SharedWriteBuffer bool
|
||||||
// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
|
// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
|
||||||
ChannelzParentID *channelz.Identifier
|
ChannelzParentID *channelz.Identifier
|
||||||
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
|
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
|
||||||
|
@ -736,7 +731,7 @@ type ServerTransport interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// connectionErrorf creates an ConnectionError with the specified error description.
|
// connectionErrorf creates an ConnectionError with the specified error description.
|
||||||
func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError {
|
func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError {
|
||||||
return ConnectionError{
|
return ConnectionError{
|
||||||
Desc: fmt.Sprintf(format, a...),
|
Desc: fmt.Sprintf(format, a...),
|
||||||
temp: temp,
|
temp: temp,
|
||||||
|
|
|
@ -28,21 +28,26 @@ import (
|
||||||
"google.golang.org/grpc/internal/channelz"
|
"google.golang.org/grpc/internal/channelz"
|
||||||
istatus "google.golang.org/grpc/internal/status"
|
istatus "google.golang.org/grpc/internal/status"
|
||||||
"google.golang.org/grpc/internal/transport"
|
"google.golang.org/grpc/internal/transport"
|
||||||
|
"google.golang.org/grpc/stats"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
|
// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
|
||||||
// actions and unblock when there's a picker update.
|
// actions and unblock when there's a picker update.
|
||||||
type pickerWrapper struct {
|
type pickerWrapper struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
done bool
|
done bool
|
||||||
idle bool
|
idle bool
|
||||||
blockingCh chan struct{}
|
blockingCh chan struct{}
|
||||||
picker balancer.Picker
|
picker balancer.Picker
|
||||||
|
statsHandlers []stats.Handler // to record blocking picker calls
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPickerWrapper() *pickerWrapper {
|
func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper {
|
||||||
return &pickerWrapper{blockingCh: make(chan struct{})}
|
return &pickerWrapper{
|
||||||
|
blockingCh: make(chan struct{}),
|
||||||
|
statsHandlers: statsHandlers,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
||||||
|
@ -95,6 +100,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||||||
var ch chan struct{}
|
var ch chan struct{}
|
||||||
|
|
||||||
var lastPickErr error
|
var lastPickErr error
|
||||||
|
|
||||||
for {
|
for {
|
||||||
pw.mu.Lock()
|
pw.mu.Lock()
|
||||||
if pw.done {
|
if pw.done {
|
||||||
|
@ -129,6 +135,20 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the channel is set, it means that the pick call had to wait for a
|
||||||
|
// new picker at some point. Either it's the first iteration and this
|
||||||
|
// function received the first picker, or a picker errored with
|
||||||
|
// ErrNoSubConnAvailable or errored with failfast set to false, which
|
||||||
|
// will trigger a continue to the next iteration. In the first case this
|
||||||
|
// conditional will hit if this call had to block (the channel is set).
|
||||||
|
// In the second case, the only way it will get to this conditional is
|
||||||
|
// if there is a new picker.
|
||||||
|
if ch != nil {
|
||||||
|
for _, sh := range pw.statsHandlers {
|
||||||
|
sh.HandleRPC(ctx, &stats.PickerUpdated{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ch = pw.blockingCh
|
ch = pw.blockingCh
|
||||||
p := pw.picker
|
p := pw.picker
|
||||||
pw.mu.Unlock()
|
pw.mu.Unlock()
|
||||||
|
|
|
@ -26,12 +26,18 @@ import (
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/internal/envconfig"
|
"google.golang.org/grpc/internal/envconfig"
|
||||||
|
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
||||||
"google.golang.org/grpc/internal/grpcrand"
|
"google.golang.org/grpc/internal/grpcrand"
|
||||||
|
"google.golang.org/grpc/internal/pretty"
|
||||||
|
"google.golang.org/grpc/resolver"
|
||||||
"google.golang.org/grpc/serviceconfig"
|
"google.golang.org/grpc/serviceconfig"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PickFirstBalancerName is the name of the pick_first balancer.
|
const (
|
||||||
const PickFirstBalancerName = "pick_first"
|
// PickFirstBalancerName is the name of the pick_first balancer.
|
||||||
|
PickFirstBalancerName = "pick_first"
|
||||||
|
logPrefix = "[pick-first-lb %p] "
|
||||||
|
)
|
||||||
|
|
||||||
func newPickfirstBuilder() balancer.Builder {
|
func newPickfirstBuilder() balancer.Builder {
|
||||||
return &pickfirstBuilder{}
|
return &pickfirstBuilder{}
|
||||||
|
@ -40,7 +46,9 @@ func newPickfirstBuilder() balancer.Builder {
|
||||||
type pickfirstBuilder struct{}
|
type pickfirstBuilder struct{}
|
||||||
|
|
||||||
func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||||
return &pickfirstBalancer{cc: cc}
|
b := &pickfirstBalancer{cc: cc}
|
||||||
|
b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
|
||||||
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*pickfirstBuilder) Name() string {
|
func (*pickfirstBuilder) Name() string {
|
||||||
|
@ -57,23 +65,36 @@ type pfConfig struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
|
func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
|
||||||
cfg := &pfConfig{}
|
if !envconfig.PickFirstLBConfig {
|
||||||
if err := json.Unmarshal(js, cfg); err != nil {
|
// Prior to supporting loadbalancing configuration, the pick_first LB
|
||||||
|
// policy did not implement the balancer.ConfigParser interface. This
|
||||||
|
// meant that if a non-empty configuration was passed to it, the service
|
||||||
|
// config unmarshaling code would throw a warning log, but would
|
||||||
|
// continue using the pick_first LB policy. The code below ensures the
|
||||||
|
// same behavior is retained if the env var is not set.
|
||||||
|
if string(js) != "{}" {
|
||||||
|
logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js))
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var cfg pfConfig
|
||||||
|
if err := json.Unmarshal(js, &cfg); err != nil {
|
||||||
return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
|
return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
|
||||||
}
|
}
|
||||||
return cfg, nil
|
return cfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type pickfirstBalancer struct {
|
type pickfirstBalancer struct {
|
||||||
|
logger *internalgrpclog.PrefixLogger
|
||||||
state connectivity.State
|
state connectivity.State
|
||||||
cc balancer.ClientConn
|
cc balancer.ClientConn
|
||||||
subConn balancer.SubConn
|
subConn balancer.SubConn
|
||||||
cfg *pfConfig
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *pickfirstBalancer) ResolverError(err error) {
|
func (b *pickfirstBalancer) ResolverError(err error) {
|
||||||
if logger.V(2) {
|
if b.logger.V(2) {
|
||||||
logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err)
|
b.logger.Infof("Received error from the name resolver: %v", err)
|
||||||
}
|
}
|
||||||
if b.subConn == nil {
|
if b.subConn == nil {
|
||||||
b.state = connectivity.TransientFailure
|
b.state = connectivity.TransientFailure
|
||||||
|
@ -96,35 +117,44 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
|
||||||
// The resolver reported an empty address list. Treat it like an error by
|
// The resolver reported an empty address list. Treat it like an error by
|
||||||
// calling b.ResolverError.
|
// calling b.ResolverError.
|
||||||
if b.subConn != nil {
|
if b.subConn != nil {
|
||||||
// Remove the old subConn. All addresses were removed, so it is no longer
|
// Shut down the old subConn. All addresses were removed, so it is
|
||||||
// valid.
|
// no longer valid.
|
||||||
b.cc.RemoveSubConn(b.subConn)
|
b.subConn.Shutdown()
|
||||||
b.subConn = nil
|
b.subConn = nil
|
||||||
}
|
}
|
||||||
b.ResolverError(errors.New("produced zero addresses"))
|
b.ResolverError(errors.New("produced zero addresses"))
|
||||||
return balancer.ErrBadResolverState
|
return balancer.ErrBadResolverState
|
||||||
}
|
}
|
||||||
|
|
||||||
if state.BalancerConfig != nil {
|
// We don't have to guard this block with the env var because ParseConfig
|
||||||
cfg, ok := state.BalancerConfig.(*pfConfig)
|
// already does so.
|
||||||
if !ok {
|
cfg, ok := state.BalancerConfig.(pfConfig)
|
||||||
return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig)
|
if state.BalancerConfig != nil && !ok {
|
||||||
}
|
return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig)
|
||||||
b.cfg = cfg
|
|
||||||
}
|
}
|
||||||
|
if cfg.ShuffleAddressList {
|
||||||
if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList {
|
addrs = append([]resolver.Address{}, addrs...)
|
||||||
grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
|
grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if b.logger.V(2) {
|
||||||
|
b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
|
||||||
|
}
|
||||||
|
|
||||||
if b.subConn != nil {
|
if b.subConn != nil {
|
||||||
b.cc.UpdateAddresses(b.subConn, addrs)
|
b.cc.UpdateAddresses(b.subConn, addrs)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
|
var subConn balancer.SubConn
|
||||||
|
subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{
|
||||||
|
StateListener: func(state balancer.SubConnState) {
|
||||||
|
b.updateSubConnState(subConn, state)
|
||||||
|
},
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if logger.V(2) {
|
if b.logger.V(2) {
|
||||||
logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
|
b.logger.Infof("Failed to create new SubConn: %v", err)
|
||||||
}
|
}
|
||||||
b.state = connectivity.TransientFailure
|
b.state = connectivity.TransientFailure
|
||||||
b.cc.UpdateState(balancer.State{
|
b.cc.UpdateState(balancer.State{
|
||||||
|
@ -143,13 +173,19 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UpdateSubConnState is unused as a StateListener is always registered when
|
||||||
|
// creating SubConns.
|
||||||
func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
|
func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
|
||||||
if logger.V(2) {
|
b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
|
||||||
logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state)
|
}
|
||||||
|
|
||||||
|
func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
|
||||||
|
if b.logger.V(2) {
|
||||||
|
b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state)
|
||||||
}
|
}
|
||||||
if b.subConn != subConn {
|
if b.subConn != subConn {
|
||||||
if logger.V(2) {
|
if b.logger.V(2) {
|
||||||
logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized")
|
b.logger.Infof("Ignored state change because subConn is not recognized")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,7 +37,7 @@ type PreparedMsg struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encode marshalls and compresses the message using the codec and compressor for the stream.
|
// Encode marshalls and compresses the message using the codec and compressor for the stream.
|
||||||
func (p *PreparedMsg) Encode(s Stream, msg interface{}) error {
|
func (p *PreparedMsg) Encode(s Stream, msg any) error {
|
||||||
ctx := s.Context()
|
ctx := s.Context()
|
||||||
rpcInfo, ok := rpcInfoFromContext(ctx)
|
rpcInfo, ok := rpcInfoFromContext(ctx)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
|
|
@ -20,7 +20,7 @@ package resolver
|
||||||
|
|
||||||
type addressMapEntry struct {
|
type addressMapEntry struct {
|
||||||
addr Address
|
addr Address
|
||||||
value interface{}
|
value any
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddressMap is a map of addresses to arbitrary values taking into account
|
// AddressMap is a map of addresses to arbitrary values taking into account
|
||||||
|
@ -69,7 +69,7 @@ func (l addressMapEntryList) find(addr Address) int {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the value for the address in the map, if present.
|
// Get returns the value for the address in the map, if present.
|
||||||
func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) {
|
func (a *AddressMap) Get(addr Address) (value any, ok bool) {
|
||||||
addrKey := toMapKey(&addr)
|
addrKey := toMapKey(&addr)
|
||||||
entryList := a.m[addrKey]
|
entryList := a.m[addrKey]
|
||||||
if entry := entryList.find(addr); entry != -1 {
|
if entry := entryList.find(addr); entry != -1 {
|
||||||
|
@ -79,7 +79,7 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set updates or adds the value to the address in the map.
|
// Set updates or adds the value to the address in the map.
|
||||||
func (a *AddressMap) Set(addr Address, value interface{}) {
|
func (a *AddressMap) Set(addr Address, value any) {
|
||||||
addrKey := toMapKey(&addr)
|
addrKey := toMapKey(&addr)
|
||||||
entryList := a.m[addrKey]
|
entryList := a.m[addrKey]
|
||||||
if entry := entryList.find(addr); entry != -1 {
|
if entry := entryList.find(addr); entry != -1 {
|
||||||
|
@ -127,8 +127,8 @@ func (a *AddressMap) Keys() []Address {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Values returns a slice of all current map values.
|
// Values returns a slice of all current map values.
|
||||||
func (a *AddressMap) Values() []interface{} {
|
func (a *AddressMap) Values() []any {
|
||||||
ret := make([]interface{}, 0, a.Len())
|
ret := make([]any, 0, a.Len())
|
||||||
for _, entryList := range a.m {
|
for _, entryList := range a.m {
|
||||||
for _, entry := range entryList {
|
for _, entry := range entryList {
|
||||||
ret = append(ret, entry.value)
|
ret = append(ret, entry.value)
|
||||||
|
|
|
@ -77,25 +77,6 @@ func GetDefaultScheme() string {
|
||||||
return defaultScheme
|
return defaultScheme
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddressType indicates the address type returned by name resolution.
|
|
||||||
//
|
|
||||||
// Deprecated: use Attributes in Address instead.
|
|
||||||
type AddressType uint8
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Backend indicates the address is for a backend server.
|
|
||||||
//
|
|
||||||
// Deprecated: use Attributes in Address instead.
|
|
||||||
Backend AddressType = iota
|
|
||||||
// GRPCLB indicates the address is for a grpclb load balancer.
|
|
||||||
//
|
|
||||||
// Deprecated: to select the GRPCLB load balancing policy, use a service
|
|
||||||
// config with a corresponding loadBalancingConfig. To supply balancer
|
|
||||||
// addresses to the GRPCLB load balancing policy, set State.Attributes
|
|
||||||
// using balancer/grpclb/state.Set.
|
|
||||||
GRPCLB
|
|
||||||
)
|
|
||||||
|
|
||||||
// Address represents a server the client connects to.
|
// Address represents a server the client connects to.
|
||||||
//
|
//
|
||||||
// # Experimental
|
// # Experimental
|
||||||
|
@ -111,9 +92,6 @@ type Address struct {
|
||||||
// the address, instead of the hostname from the Dial target string. In most cases,
|
// the address, instead of the hostname from the Dial target string. In most cases,
|
||||||
// this should not be set.
|
// this should not be set.
|
||||||
//
|
//
|
||||||
// If Type is GRPCLB, ServerName should be the name of the remote load
|
|
||||||
// balancer, not the name of the backend.
|
|
||||||
//
|
|
||||||
// WARNING: ServerName must only be populated with trusted values. It
|
// WARNING: ServerName must only be populated with trusted values. It
|
||||||
// is insecure to populate it with data from untrusted inputs since untrusted
|
// is insecure to populate it with data from untrusted inputs since untrusted
|
||||||
// values could be used to bypass the authority checks performed by TLS.
|
// values could be used to bypass the authority checks performed by TLS.
|
||||||
|
@ -126,27 +104,29 @@ type Address struct {
|
||||||
// BalancerAttributes contains arbitrary data about this address intended
|
// BalancerAttributes contains arbitrary data about this address intended
|
||||||
// for consumption by the LB policy. These attributes do not affect SubConn
|
// for consumption by the LB policy. These attributes do not affect SubConn
|
||||||
// creation, connection establishment, handshaking, etc.
|
// creation, connection establishment, handshaking, etc.
|
||||||
BalancerAttributes *attributes.Attributes
|
|
||||||
|
|
||||||
// Type is the type of this address.
|
|
||||||
//
|
//
|
||||||
// Deprecated: use Attributes instead.
|
// Deprecated: when an Address is inside an Endpoint, this field should not
|
||||||
Type AddressType
|
// be used, and it will eventually be removed entirely.
|
||||||
|
BalancerAttributes *attributes.Attributes
|
||||||
|
|
||||||
// Metadata is the information associated with Addr, which may be used
|
// Metadata is the information associated with Addr, which may be used
|
||||||
// to make load balancing decision.
|
// to make load balancing decision.
|
||||||
//
|
//
|
||||||
// Deprecated: use Attributes instead.
|
// Deprecated: use Attributes instead.
|
||||||
Metadata interface{}
|
Metadata any
|
||||||
}
|
}
|
||||||
|
|
||||||
// Equal returns whether a and o are identical. Metadata is compared directly,
|
// Equal returns whether a and o are identical. Metadata is compared directly,
|
||||||
// not with any recursive introspection.
|
// not with any recursive introspection.
|
||||||
|
//
|
||||||
|
// This method compares all fields of the address. When used to tell apart
|
||||||
|
// addresses during subchannel creation or connection establishment, it might be
|
||||||
|
// more appropriate for the caller to implement custom equality logic.
|
||||||
func (a Address) Equal(o Address) bool {
|
func (a Address) Equal(o Address) bool {
|
||||||
return a.Addr == o.Addr && a.ServerName == o.ServerName &&
|
return a.Addr == o.Addr && a.ServerName == o.ServerName &&
|
||||||
a.Attributes.Equal(o.Attributes) &&
|
a.Attributes.Equal(o.Attributes) &&
|
||||||
a.BalancerAttributes.Equal(o.BalancerAttributes) &&
|
a.BalancerAttributes.Equal(o.BalancerAttributes) &&
|
||||||
a.Type == o.Type && a.Metadata == o.Metadata
|
a.Metadata == o.Metadata
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns JSON formatted string representation of the address.
|
// String returns JSON formatted string representation of the address.
|
||||||
|
@ -190,11 +170,37 @@ type BuildOptions struct {
|
||||||
Dialer func(context.Context, string) (net.Conn, error)
|
Dialer func(context.Context, string) (net.Conn, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// An Endpoint is one network endpoint, or server, which may have multiple
|
||||||
|
// addresses with which it can be accessed.
|
||||||
|
type Endpoint struct {
|
||||||
|
// Addresses contains a list of addresses used to access this endpoint.
|
||||||
|
Addresses []Address
|
||||||
|
|
||||||
|
// Attributes contains arbitrary data about this endpoint intended for
|
||||||
|
// consumption by the LB policy.
|
||||||
|
Attributes *attributes.Attributes
|
||||||
|
}
|
||||||
|
|
||||||
// State contains the current Resolver state relevant to the ClientConn.
|
// State contains the current Resolver state relevant to the ClientConn.
|
||||||
type State struct {
|
type State struct {
|
||||||
// Addresses is the latest set of resolved addresses for the target.
|
// Addresses is the latest set of resolved addresses for the target.
|
||||||
|
//
|
||||||
|
// If a resolver sets Addresses but does not set Endpoints, one Endpoint
|
||||||
|
// will be created for each Address before the State is passed to the LB
|
||||||
|
// policy. The BalancerAttributes of each entry in Addresses will be set
|
||||||
|
// in Endpoints.Attributes, and be cleared in the Endpoint's Address's
|
||||||
|
// BalancerAttributes.
|
||||||
|
//
|
||||||
|
// Soon, Addresses will be deprecated and replaced fully by Endpoints.
|
||||||
Addresses []Address
|
Addresses []Address
|
||||||
|
|
||||||
|
// Endpoints is the latest set of resolved endpoints for the target.
|
||||||
|
//
|
||||||
|
// If a resolver produces a State containing Endpoints but not Addresses,
|
||||||
|
// it must take care to ensure the LB policies it selects will support
|
||||||
|
// Endpoints.
|
||||||
|
Endpoints []Endpoint
|
||||||
|
|
||||||
// ServiceConfig contains the result from parsing the latest service
|
// ServiceConfig contains the result from parsing the latest service
|
||||||
// config. If it is nil, it indicates no service config is present or the
|
// config. If it is nil, it indicates no service config is present or the
|
||||||
// resolver does not provide service configs.
|
// resolver does not provide service configs.
|
||||||
|
@ -254,20 +260,7 @@ type ClientConn interface {
|
||||||
// target does not contain a scheme or if the parsed scheme is not registered
|
// target does not contain a scheme or if the parsed scheme is not registered
|
||||||
// (i.e. no corresponding resolver available to resolve the endpoint), we will
|
// (i.e. no corresponding resolver available to resolve the endpoint), we will
|
||||||
// apply the default scheme, and will attempt to reparse it.
|
// apply the default scheme, and will attempt to reparse it.
|
||||||
//
|
|
||||||
// Examples:
|
|
||||||
//
|
|
||||||
// - "dns://some_authority/foo.bar"
|
|
||||||
// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
|
|
||||||
// - "foo.bar"
|
|
||||||
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}
|
|
||||||
// - "unknown_scheme://authority/endpoint"
|
|
||||||
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
|
|
||||||
type Target struct {
|
type Target struct {
|
||||||
// Deprecated: use URL.Scheme instead.
|
|
||||||
Scheme string
|
|
||||||
// Deprecated: use URL.Host instead.
|
|
||||||
Authority string
|
|
||||||
// URL contains the parsed dial target with an optional default scheme added
|
// URL contains the parsed dial target with an optional default scheme added
|
||||||
// to it if the original dial target contained no scheme or contained an
|
// to it if the original dial target contained no scheme or contained an
|
||||||
// unregistered scheme. Any query params specified in the original dial
|
// unregistered scheme. Any query params specified in the original dial
|
||||||
|
@ -321,10 +314,3 @@ type Resolver interface {
|
||||||
// Close closes the resolver.
|
// Close closes the resolver.
|
||||||
Close()
|
Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnregisterForTesting removes the resolver builder with the given scheme from the
|
|
||||||
// resolver map.
|
|
||||||
// This function is for testing only.
|
|
||||||
func UnregisterForTesting(scheme string) {
|
|
||||||
delete(m, scheme)
|
|
||||||
}
|
|
||||||
|
|
|
@ -133,7 +133,7 @@ func (ccr *ccResolverWrapper) close() {
|
||||||
ccr.mu.Unlock()
|
ccr.mu.Unlock()
|
||||||
|
|
||||||
// Give enqueued callbacks a chance to finish.
|
// Give enqueued callbacks a chance to finish.
|
||||||
<-ccr.serializer.Done
|
<-ccr.serializer.Done()
|
||||||
|
|
||||||
// Spawn a goroutine to close the resolver (since it may block trying to
|
// Spawn a goroutine to close the resolver (since it may block trying to
|
||||||
// cleanup all allocated resources) and return early.
|
// cleanup all allocated resources) and return early.
|
||||||
|
@ -152,6 +152,14 @@ func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context))
|
||||||
// which includes addresses and service config.
|
// which includes addresses and service config.
|
||||||
func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
|
func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
|
||||||
errCh := make(chan error, 1)
|
errCh := make(chan error, 1)
|
||||||
|
if s.Endpoints == nil {
|
||||||
|
s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses))
|
||||||
|
for _, a := range s.Addresses {
|
||||||
|
ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
|
||||||
|
ep.Addresses[0].BalancerAttributes = nil
|
||||||
|
s.Endpoints = append(s.Endpoints, ep)
|
||||||
|
}
|
||||||
|
}
|
||||||
ok := ccr.serializer.Schedule(func(context.Context) {
|
ok := ccr.serializer.Schedule(func(context.Context) {
|
||||||
ccr.addChannelzTraceEvent(s)
|
ccr.addChannelzTraceEvent(s)
|
||||||
ccr.curState = s
|
ccr.curState = s
|
||||||
|
|
|
@ -75,7 +75,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
|
||||||
}
|
}
|
||||||
return &gzipCompressor{
|
return &gzipCompressor{
|
||||||
pool: sync.Pool{
|
pool: sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() any {
|
||||||
w, err := gzip.NewWriterLevel(io.Discard, level)
|
w, err := gzip.NewWriterLevel(io.Discard, level)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -577,6 +577,9 @@ type parser struct {
|
||||||
// The header of a gRPC message. Find more detail at
|
// The header of a gRPC message. Find more detail at
|
||||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
|
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
|
||||||
header [5]byte
|
header [5]byte
|
||||||
|
|
||||||
|
// recvBufferPool is the pool of shared receive buffers.
|
||||||
|
recvBufferPool SharedBufferPool
|
||||||
}
|
}
|
||||||
|
|
||||||
// recvMsg reads a complete gRPC message from the stream.
|
// recvMsg reads a complete gRPC message from the stream.
|
||||||
|
@ -610,9 +613,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
|
||||||
if int(length) > maxReceiveMessageSize {
|
if int(length) > maxReceiveMessageSize {
|
||||||
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
|
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
|
||||||
}
|
}
|
||||||
// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
|
msg = p.recvBufferPool.Get(int(length))
|
||||||
// of making it for each message:
|
|
||||||
msg = make([]byte, int(length))
|
|
||||||
if _, err := p.r.Read(msg); err != nil {
|
if _, err := p.r.Read(msg); err != nil {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
err = io.ErrUnexpectedEOF
|
err = io.ErrUnexpectedEOF
|
||||||
|
@ -625,7 +626,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
|
||||||
// encode serializes msg and returns a buffer containing the message, or an
|
// encode serializes msg and returns a buffer containing the message, or an
|
||||||
// error if it is too large to be transmitted by grpc. If msg is nil, it
|
// error if it is too large to be transmitted by grpc. If msg is nil, it
|
||||||
// generates an empty message.
|
// generates an empty message.
|
||||||
func encode(c baseCodec, msg interface{}) ([]byte, error) {
|
func encode(c baseCodec, msg any) ([]byte, error) {
|
||||||
if msg == nil { // NOTE: typed nils will not be caught by this check
|
if msg == nil { // NOTE: typed nils will not be caught by this check
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
@ -692,7 +693,7 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
|
||||||
return hdr, data
|
return hdr, data
|
||||||
}
|
}
|
||||||
|
|
||||||
func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload {
|
func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload {
|
||||||
return &stats.OutPayload{
|
return &stats.OutPayload{
|
||||||
Client: client,
|
Client: client,
|
||||||
Payload: msg,
|
Payload: msg,
|
||||||
|
@ -726,12 +727,12 @@ type payloadInfo struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) {
|
func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) {
|
||||||
pf, d, err := p.recvMsg(maxReceiveMessageSize)
|
pf, buf, err := p.recvMsg(maxReceiveMessageSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if payInfo != nil {
|
if payInfo != nil {
|
||||||
payInfo.compressedLength = len(d)
|
payInfo.compressedLength = len(buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
|
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
|
||||||
|
@ -743,10 +744,10 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
|
||||||
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
|
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
|
||||||
// use this decompressor as the default.
|
// use this decompressor as the default.
|
||||||
if dc != nil {
|
if dc != nil {
|
||||||
d, err = dc.Do(bytes.NewReader(d))
|
buf, err = dc.Do(bytes.NewReader(buf))
|
||||||
size = len(d)
|
size = len(buf)
|
||||||
} else {
|
} else {
|
||||||
d, size, err = decompress(compressor, d, maxReceiveMessageSize)
|
buf, size, err = decompress(compressor, buf, maxReceiveMessageSize)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
|
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
|
||||||
|
@ -757,7 +758,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
|
||||||
return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
|
return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return d, nil
|
return buf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Using compressor, decompress d, returning data and size.
|
// Using compressor, decompress d, returning data and size.
|
||||||
|
@ -791,16 +792,18 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize
|
||||||
// For the two compressor parameters, both should not be set, but if they are,
|
// For the two compressor parameters, both should not be set, but if they are,
|
||||||
// dc takes precedence over compressor.
|
// dc takes precedence over compressor.
|
||||||
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
|
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
|
||||||
func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
|
func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
|
||||||
d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
|
buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := c.Unmarshal(d, m); err != nil {
|
if err := c.Unmarshal(buf, m); err != nil {
|
||||||
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
|
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
|
||||||
}
|
}
|
||||||
if payInfo != nil {
|
if payInfo != nil {
|
||||||
payInfo.uncompressedBytes = d
|
payInfo.uncompressedBytes = buf
|
||||||
|
} else {
|
||||||
|
p.recvBufferPool.Put(&buf)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -860,19 +863,22 @@ func ErrorDesc(err error) string {
|
||||||
// Errorf returns nil if c is OK.
|
// Errorf returns nil if c is OK.
|
||||||
//
|
//
|
||||||
// Deprecated: use status.Errorf instead.
|
// Deprecated: use status.Errorf instead.
|
||||||
func Errorf(c codes.Code, format string, a ...interface{}) error {
|
func Errorf(c codes.Code, format string, a ...any) error {
|
||||||
return status.Errorf(c, format, a...)
|
return status.Errorf(c, format, a...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error())
|
||||||
|
var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error())
|
||||||
|
|
||||||
// toRPCErr converts an error into an error from the status package.
|
// toRPCErr converts an error into an error from the status package.
|
||||||
func toRPCErr(err error) error {
|
func toRPCErr(err error) error {
|
||||||
switch err {
|
switch err {
|
||||||
case nil, io.EOF:
|
case nil, io.EOF:
|
||||||
return err
|
return err
|
||||||
case context.DeadlineExceeded:
|
case context.DeadlineExceeded:
|
||||||
return status.Error(codes.DeadlineExceeded, err.Error())
|
return errContextDeadline
|
||||||
case context.Canceled:
|
case context.Canceled:
|
||||||
return status.Error(codes.Canceled, err.Error())
|
return errContextCanceled
|
||||||
case io.ErrUnexpectedEOF:
|
case io.ErrUnexpectedEOF:
|
||||||
return status.Error(codes.Internal, err.Error())
|
return status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
|
@ -86,7 +86,7 @@ func init() {
|
||||||
var statusOK = status.New(codes.OK, "")
|
var statusOK = status.New(codes.OK, "")
|
||||||
var logger = grpclog.Component("core")
|
var logger = grpclog.Component("core")
|
||||||
|
|
||||||
type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
|
type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error)
|
||||||
|
|
||||||
// MethodDesc represents an RPC service's method specification.
|
// MethodDesc represents an RPC service's method specification.
|
||||||
type MethodDesc struct {
|
type MethodDesc struct {
|
||||||
|
@ -99,20 +99,20 @@ type ServiceDesc struct {
|
||||||
ServiceName string
|
ServiceName string
|
||||||
// The pointer to the service interface. Used to check whether the user
|
// The pointer to the service interface. Used to check whether the user
|
||||||
// provided implementation satisfies the interface requirements.
|
// provided implementation satisfies the interface requirements.
|
||||||
HandlerType interface{}
|
HandlerType any
|
||||||
Methods []MethodDesc
|
Methods []MethodDesc
|
||||||
Streams []StreamDesc
|
Streams []StreamDesc
|
||||||
Metadata interface{}
|
Metadata any
|
||||||
}
|
}
|
||||||
|
|
||||||
// serviceInfo wraps information about a service. It is very similar to
|
// serviceInfo wraps information about a service. It is very similar to
|
||||||
// ServiceDesc and is constructed from it for internal purposes.
|
// ServiceDesc and is constructed from it for internal purposes.
|
||||||
type serviceInfo struct {
|
type serviceInfo struct {
|
||||||
// Contains the implementation for the methods in this service.
|
// Contains the implementation for the methods in this service.
|
||||||
serviceImpl interface{}
|
serviceImpl any
|
||||||
methods map[string]*MethodDesc
|
methods map[string]*MethodDesc
|
||||||
streams map[string]*StreamDesc
|
streams map[string]*StreamDesc
|
||||||
mdata interface{}
|
mdata any
|
||||||
}
|
}
|
||||||
|
|
||||||
// Server is a gRPC server to serve RPC requests.
|
// Server is a gRPC server to serve RPC requests.
|
||||||
|
@ -164,10 +164,12 @@ type serverOptions struct {
|
||||||
initialConnWindowSize int32
|
initialConnWindowSize int32
|
||||||
writeBufferSize int
|
writeBufferSize int
|
||||||
readBufferSize int
|
readBufferSize int
|
||||||
|
sharedWriteBuffer bool
|
||||||
connectionTimeout time.Duration
|
connectionTimeout time.Duration
|
||||||
maxHeaderListSize *uint32
|
maxHeaderListSize *uint32
|
||||||
headerTableSize *uint32
|
headerTableSize *uint32
|
||||||
numServerWorkers uint32
|
numServerWorkers uint32
|
||||||
|
recvBufferPool SharedBufferPool
|
||||||
}
|
}
|
||||||
|
|
||||||
var defaultServerOptions = serverOptions{
|
var defaultServerOptions = serverOptions{
|
||||||
|
@ -177,6 +179,7 @@ var defaultServerOptions = serverOptions{
|
||||||
connectionTimeout: 120 * time.Second,
|
connectionTimeout: 120 * time.Second,
|
||||||
writeBufferSize: defaultWriteBufSize,
|
writeBufferSize: defaultWriteBufSize,
|
||||||
readBufferSize: defaultReadBufSize,
|
readBufferSize: defaultReadBufSize,
|
||||||
|
recvBufferPool: nopBufferPool{},
|
||||||
}
|
}
|
||||||
var globalServerOptions []ServerOption
|
var globalServerOptions []ServerOption
|
||||||
|
|
||||||
|
@ -228,6 +231,20 @@ func newJoinServerOption(opts ...ServerOption) ServerOption {
|
||||||
return &joinServerOption{opts: opts}
|
return &joinServerOption{opts: opts}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SharedWriteBuffer allows reusing per-connection transport write buffer.
|
||||||
|
// If this option is set to true every connection will release the buffer after
|
||||||
|
// flushing the data on the wire.
|
||||||
|
//
|
||||||
|
// # Experimental
|
||||||
|
//
|
||||||
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||||
|
// later release.
|
||||||
|
func SharedWriteBuffer(val bool) ServerOption {
|
||||||
|
return newFuncServerOption(func(o *serverOptions) {
|
||||||
|
o.sharedWriteBuffer = val
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// WriteBufferSize determines how much data can be batched before doing a write
|
// WriteBufferSize determines how much data can be batched before doing a write
|
||||||
// on the wire. The corresponding memory allocation for this buffer will be
|
// on the wire. The corresponding memory allocation for this buffer will be
|
||||||
// twice the size to keep syscalls low. The default value for this buffer is
|
// twice the size to keep syscalls low. The default value for this buffer is
|
||||||
|
@ -268,9 +285,9 @@ func InitialConnWindowSize(s int32) ServerOption {
|
||||||
|
|
||||||
// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
|
// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
|
||||||
func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
|
func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
|
||||||
if kp.Time > 0 && kp.Time < time.Second {
|
if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime {
|
||||||
logger.Warning("Adjusting keepalive ping interval to minimum period of 1s")
|
logger.Warning("Adjusting keepalive ping interval to minimum period of 1s")
|
||||||
kp.Time = time.Second
|
kp.Time = internal.KeepaliveMinServerPingTime
|
||||||
}
|
}
|
||||||
|
|
||||||
return newFuncServerOption(func(o *serverOptions) {
|
return newFuncServerOption(func(o *serverOptions) {
|
||||||
|
@ -550,6 +567,27 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RecvBufferPool returns a ServerOption that configures the server
|
||||||
|
// to use the provided shared buffer pool for parsing incoming messages. Depending
|
||||||
|
// on the application's workload, this could result in reduced memory allocation.
|
||||||
|
//
|
||||||
|
// If you are unsure about how to implement a memory pool but want to utilize one,
|
||||||
|
// begin with grpc.NewSharedBufferPool.
|
||||||
|
//
|
||||||
|
// Note: The shared buffer pool feature will not be active if any of the following
|
||||||
|
// options are used: StatsHandler, EnableTracing, or binary logging. In such
|
||||||
|
// cases, the shared buffer pool will be ignored.
|
||||||
|
//
|
||||||
|
// # Experimental
|
||||||
|
//
|
||||||
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||||
|
// later release.
|
||||||
|
func RecvBufferPool(bufferPool SharedBufferPool) ServerOption {
|
||||||
|
return newFuncServerOption(func(o *serverOptions) {
|
||||||
|
o.recvBufferPool = bufferPool
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// serverWorkerResetThreshold defines how often the stack must be reset. Every
|
// serverWorkerResetThreshold defines how often the stack must be reset. Every
|
||||||
// N requests, by spawning a new goroutine in its place, a worker can reset its
|
// N requests, by spawning a new goroutine in its place, a worker can reset its
|
||||||
// stack so that large stacks don't live in memory forever. 2^16 should allow
|
// stack so that large stacks don't live in memory forever. 2^16 should allow
|
||||||
|
@ -625,7 +663,7 @@ func NewServer(opt ...ServerOption) *Server {
|
||||||
|
|
||||||
// printf records an event in s's event log, unless s has been stopped.
|
// printf records an event in s's event log, unless s has been stopped.
|
||||||
// REQUIRES s.mu is held.
|
// REQUIRES s.mu is held.
|
||||||
func (s *Server) printf(format string, a ...interface{}) {
|
func (s *Server) printf(format string, a ...any) {
|
||||||
if s.events != nil {
|
if s.events != nil {
|
||||||
s.events.Printf(format, a...)
|
s.events.Printf(format, a...)
|
||||||
}
|
}
|
||||||
|
@ -633,7 +671,7 @@ func (s *Server) printf(format string, a ...interface{}) {
|
||||||
|
|
||||||
// errorf records an error in s's event log, unless s has been stopped.
|
// errorf records an error in s's event log, unless s has been stopped.
|
||||||
// REQUIRES s.mu is held.
|
// REQUIRES s.mu is held.
|
||||||
func (s *Server) errorf(format string, a ...interface{}) {
|
func (s *Server) errorf(format string, a ...any) {
|
||||||
if s.events != nil {
|
if s.events != nil {
|
||||||
s.events.Errorf(format, a...)
|
s.events.Errorf(format, a...)
|
||||||
}
|
}
|
||||||
|
@ -648,14 +686,14 @@ type ServiceRegistrar interface {
|
||||||
// once the server has started serving.
|
// once the server has started serving.
|
||||||
// desc describes the service and its methods and handlers. impl is the
|
// desc describes the service and its methods and handlers. impl is the
|
||||||
// service implementation which is passed to the method handlers.
|
// service implementation which is passed to the method handlers.
|
||||||
RegisterService(desc *ServiceDesc, impl interface{})
|
RegisterService(desc *ServiceDesc, impl any)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterService registers a service and its implementation to the gRPC
|
// RegisterService registers a service and its implementation to the gRPC
|
||||||
// server. It is called from the IDL generated code. This must be called before
|
// server. It is called from the IDL generated code. This must be called before
|
||||||
// invoking Serve. If ss is non-nil (for legacy code), its type is checked to
|
// invoking Serve. If ss is non-nil (for legacy code), its type is checked to
|
||||||
// ensure it implements sd.HandlerType.
|
// ensure it implements sd.HandlerType.
|
||||||
func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
|
func (s *Server) RegisterService(sd *ServiceDesc, ss any) {
|
||||||
if ss != nil {
|
if ss != nil {
|
||||||
ht := reflect.TypeOf(sd.HandlerType).Elem()
|
ht := reflect.TypeOf(sd.HandlerType).Elem()
|
||||||
st := reflect.TypeOf(ss)
|
st := reflect.TypeOf(ss)
|
||||||
|
@ -666,7 +704,7 @@ func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
|
||||||
s.register(sd, ss)
|
s.register(sd, ss)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) register(sd *ServiceDesc, ss interface{}) {
|
func (s *Server) register(sd *ServiceDesc, ss any) {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
s.printf("RegisterService(%q)", sd.ServiceName)
|
s.printf("RegisterService(%q)", sd.ServiceName)
|
||||||
|
@ -707,7 +745,7 @@ type MethodInfo struct {
|
||||||
type ServiceInfo struct {
|
type ServiceInfo struct {
|
||||||
Methods []MethodInfo
|
Methods []MethodInfo
|
||||||
// Metadata is the metadata specified in ServiceDesc when registering service.
|
// Metadata is the metadata specified in ServiceDesc when registering service.
|
||||||
Metadata interface{}
|
Metadata any
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetServiceInfo returns a map from service names to ServiceInfo.
|
// GetServiceInfo returns a map from service names to ServiceInfo.
|
||||||
|
@ -908,6 +946,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
|
||||||
InitialConnWindowSize: s.opts.initialConnWindowSize,
|
InitialConnWindowSize: s.opts.initialConnWindowSize,
|
||||||
WriteBufferSize: s.opts.writeBufferSize,
|
WriteBufferSize: s.opts.writeBufferSize,
|
||||||
ReadBufferSize: s.opts.readBufferSize,
|
ReadBufferSize: s.opts.readBufferSize,
|
||||||
|
SharedWriteBuffer: s.opts.sharedWriteBuffer,
|
||||||
ChannelzParentID: s.channelzID,
|
ChannelzParentID: s.channelzID,
|
||||||
MaxHeaderListSize: s.opts.maxHeaderListSize,
|
MaxHeaderListSize: s.opts.maxHeaderListSize,
|
||||||
HeaderTableSize: s.opts.headerTableSize,
|
HeaderTableSize: s.opts.headerTableSize,
|
||||||
|
@ -1094,7 +1133,7 @@ func (s *Server) incrCallsFailed() {
|
||||||
atomic.AddInt64(&s.czData.callsFailed, 1)
|
atomic.AddInt64(&s.czData.callsFailed, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
|
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
|
||||||
data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
|
data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err)
|
channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err)
|
||||||
|
@ -1141,7 +1180,7 @@ func chainUnaryServerInterceptors(s *Server) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor {
|
func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor {
|
||||||
return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) {
|
return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) {
|
||||||
return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler))
|
return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1150,7 +1189,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info
|
||||||
if curr == len(interceptors)-1 {
|
if curr == len(interceptors)-1 {
|
||||||
return finalHandler
|
return finalHandler
|
||||||
}
|
}
|
||||||
return func(ctx context.Context, req interface{}) (interface{}, error) {
|
return func(ctx context.Context, req any) (any, error) {
|
||||||
return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler))
|
return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1187,7 +1226,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||||
defer func() {
|
defer func() {
|
||||||
if trInfo != nil {
|
if trInfo != nil {
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||||
trInfo.tr.SetError()
|
trInfo.tr.SetError()
|
||||||
}
|
}
|
||||||
trInfo.tr.Finish()
|
trInfo.tr.Finish()
|
||||||
|
@ -1294,7 +1333,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||||
if len(shs) != 0 || len(binlogs) != 0 {
|
if len(shs) != 0 || len(binlogs) != 0 {
|
||||||
payInfo = &payloadInfo{}
|
payInfo = &payloadInfo{}
|
||||||
}
|
}
|
||||||
d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
|
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
|
||||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||||
|
@ -1304,7 +1343,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||||
if channelz.IsOn() {
|
if channelz.IsOn() {
|
||||||
t.IncrMsgRecv()
|
t.IncrMsgRecv()
|
||||||
}
|
}
|
||||||
df := func(v interface{}) error {
|
df := func(v any) error {
|
||||||
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
|
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
|
||||||
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
|
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -1468,7 +1507,7 @@ func chainStreamServerInterceptors(s *Server) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor {
|
func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor {
|
||||||
return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
|
return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
|
||||||
return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler))
|
return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1477,7 +1516,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf
|
||||||
if curr == len(interceptors)-1 {
|
if curr == len(interceptors)-1 {
|
||||||
return finalHandler
|
return finalHandler
|
||||||
}
|
}
|
||||||
return func(srv interface{}, stream ServerStream) error {
|
return func(srv any, stream ServerStream) error {
|
||||||
return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler))
|
return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1504,7 +1543,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
t: t,
|
t: t,
|
||||||
s: stream,
|
s: stream,
|
||||||
p: &parser{r: stream},
|
p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool},
|
||||||
codec: s.getCodec(stream.ContentSubtype()),
|
codec: s.getCodec(stream.ContentSubtype()),
|
||||||
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
|
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
|
||||||
maxSendMessageSize: s.opts.maxSendMessageSize,
|
maxSendMessageSize: s.opts.maxSendMessageSize,
|
||||||
|
@ -1518,7 +1557,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||||
if trInfo != nil {
|
if trInfo != nil {
|
||||||
ss.mu.Lock()
|
ss.mu.Lock()
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||||
ss.trInfo.tr.SetError()
|
ss.trInfo.tr.SetError()
|
||||||
}
|
}
|
||||||
ss.trInfo.tr.Finish()
|
ss.trInfo.tr.Finish()
|
||||||
|
@ -1621,7 +1660,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||||
}
|
}
|
||||||
var appErr error
|
var appErr error
|
||||||
var server interface{}
|
var server any
|
||||||
if info != nil {
|
if info != nil {
|
||||||
server = info.serviceImpl
|
server = info.serviceImpl
|
||||||
}
|
}
|
||||||
|
@ -1687,13 +1726,13 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
||||||
pos := strings.LastIndex(sm, "/")
|
pos := strings.LastIndex(sm, "/")
|
||||||
if pos == -1 {
|
if pos == -1 {
|
||||||
if trInfo != nil {
|
if trInfo != nil {
|
||||||
trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true)
|
trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true)
|
||||||
trInfo.tr.SetError()
|
trInfo.tr.SetError()
|
||||||
}
|
}
|
||||||
errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
|
errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
|
||||||
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
|
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
|
||||||
if trInfo != nil {
|
if trInfo != nil {
|
||||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||||
trInfo.tr.SetError()
|
trInfo.tr.SetError()
|
||||||
}
|
}
|
||||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
|
channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
|
||||||
|
@ -1734,7 +1773,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
||||||
}
|
}
|
||||||
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
|
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
|
||||||
if trInfo != nil {
|
if trInfo != nil {
|
||||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||||
trInfo.tr.SetError()
|
trInfo.tr.SetError()
|
||||||
}
|
}
|
||||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
|
channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
|
||||||
|
@ -2054,12 +2093,12 @@ func validateSendCompressor(name, clientCompressors string) error {
|
||||||
// atomicSemaphore implements a blocking, counting semaphore. acquire should be
|
// atomicSemaphore implements a blocking, counting semaphore. acquire should be
|
||||||
// called synchronously; release may be called asynchronously.
|
// called synchronously; release may be called asynchronously.
|
||||||
type atomicSemaphore struct {
|
type atomicSemaphore struct {
|
||||||
n int64
|
n atomic.Int64
|
||||||
wait chan struct{}
|
wait chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *atomicSemaphore) acquire() {
|
func (q *atomicSemaphore) acquire() {
|
||||||
if atomic.AddInt64(&q.n, -1) < 0 {
|
if q.n.Add(-1) < 0 {
|
||||||
// We ran out of quota. Block until a release happens.
|
// We ran out of quota. Block until a release happens.
|
||||||
<-q.wait
|
<-q.wait
|
||||||
}
|
}
|
||||||
|
@ -2070,12 +2109,14 @@ func (q *atomicSemaphore) release() {
|
||||||
// concurrent calls to acquire, but also note that with synchronous calls to
|
// concurrent calls to acquire, but also note that with synchronous calls to
|
||||||
// acquire, as our system does, n will never be less than -1. There are
|
// acquire, as our system does, n will never be less than -1. There are
|
||||||
// fairness issues (queuing) to consider if this was to be generalized.
|
// fairness issues (queuing) to consider if this was to be generalized.
|
||||||
if atomic.AddInt64(&q.n, 1) <= 0 {
|
if q.n.Add(1) <= 0 {
|
||||||
// An acquire was waiting on us. Unblock it.
|
// An acquire was waiting on us. Unblock it.
|
||||||
q.wait <- struct{}{}
|
q.wait <- struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newHandlerQuota(n uint32) *atomicSemaphore {
|
func newHandlerQuota(n uint32) *atomicSemaphore {
|
||||||
return &atomicSemaphore{n: int64(n), wait: make(chan struct{}, 1)}
|
a := &atomicSemaphore{wait: make(chan struct{}, 1)}
|
||||||
|
a.n.Store(int64(n))
|
||||||
|
return a
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,154 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2023 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package grpc
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
// SharedBufferPool is a pool of buffers that can be shared, resulting in
|
||||||
|
// decreased memory allocation. Currently, in gRPC-go, it is only utilized
|
||||||
|
// for parsing incoming messages.
|
||||||
|
//
|
||||||
|
// # Experimental
|
||||||
|
//
|
||||||
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||||
|
// later release.
|
||||||
|
type SharedBufferPool interface {
|
||||||
|
// Get returns a buffer with specified length from the pool.
|
||||||
|
//
|
||||||
|
// The returned byte slice may be not zero initialized.
|
||||||
|
Get(length int) []byte
|
||||||
|
|
||||||
|
// Put returns a buffer to the pool.
|
||||||
|
Put(*[]byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSharedBufferPool creates a simple SharedBufferPool with buckets
|
||||||
|
// of different sizes to optimize memory usage. This prevents the pool from
|
||||||
|
// wasting large amounts of memory, even when handling messages of varying sizes.
|
||||||
|
//
|
||||||
|
// # Experimental
|
||||||
|
//
|
||||||
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||||
|
// later release.
|
||||||
|
func NewSharedBufferPool() SharedBufferPool {
|
||||||
|
return &simpleSharedBufferPool{
|
||||||
|
pools: [poolArraySize]simpleSharedBufferChildPool{
|
||||||
|
newBytesPool(level0PoolMaxSize),
|
||||||
|
newBytesPool(level1PoolMaxSize),
|
||||||
|
newBytesPool(level2PoolMaxSize),
|
||||||
|
newBytesPool(level3PoolMaxSize),
|
||||||
|
newBytesPool(level4PoolMaxSize),
|
||||||
|
newBytesPool(0),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// simpleSharedBufferPool is a simple implementation of SharedBufferPool.
|
||||||
|
type simpleSharedBufferPool struct {
|
||||||
|
pools [poolArraySize]simpleSharedBufferChildPool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *simpleSharedBufferPool) Get(size int) []byte {
|
||||||
|
return p.pools[p.poolIdx(size)].Get(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *simpleSharedBufferPool) Put(bs *[]byte) {
|
||||||
|
p.pools[p.poolIdx(cap(*bs))].Put(bs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *simpleSharedBufferPool) poolIdx(size int) int {
|
||||||
|
switch {
|
||||||
|
case size <= level0PoolMaxSize:
|
||||||
|
return level0PoolIdx
|
||||||
|
case size <= level1PoolMaxSize:
|
||||||
|
return level1PoolIdx
|
||||||
|
case size <= level2PoolMaxSize:
|
||||||
|
return level2PoolIdx
|
||||||
|
case size <= level3PoolMaxSize:
|
||||||
|
return level3PoolIdx
|
||||||
|
case size <= level4PoolMaxSize:
|
||||||
|
return level4PoolIdx
|
||||||
|
default:
|
||||||
|
return levelMaxPoolIdx
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
level0PoolMaxSize = 16 // 16 B
|
||||||
|
level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B
|
||||||
|
level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB
|
||||||
|
level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB
|
||||||
|
level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
level0PoolIdx = iota
|
||||||
|
level1PoolIdx
|
||||||
|
level2PoolIdx
|
||||||
|
level3PoolIdx
|
||||||
|
level4PoolIdx
|
||||||
|
levelMaxPoolIdx
|
||||||
|
poolArraySize
|
||||||
|
)
|
||||||
|
|
||||||
|
type simpleSharedBufferChildPool interface {
|
||||||
|
Get(size int) []byte
|
||||||
|
Put(any)
|
||||||
|
}
|
||||||
|
|
||||||
|
type bufferPool struct {
|
||||||
|
sync.Pool
|
||||||
|
|
||||||
|
defaultSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *bufferPool) Get(size int) []byte {
|
||||||
|
bs := p.Pool.Get().(*[]byte)
|
||||||
|
|
||||||
|
if cap(*bs) < size {
|
||||||
|
p.Pool.Put(bs)
|
||||||
|
|
||||||
|
return make([]byte, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
return (*bs)[:size]
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBytesPool(size int) simpleSharedBufferChildPool {
|
||||||
|
return &bufferPool{
|
||||||
|
Pool: sync.Pool{
|
||||||
|
New: func() any {
|
||||||
|
bs := make([]byte, size)
|
||||||
|
return &bs
|
||||||
|
},
|
||||||
|
},
|
||||||
|
defaultSize: size,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// nopBufferPool is a buffer pool just makes new buffer without pooling.
|
||||||
|
type nopBufferPool struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nopBufferPool) Get(length int) []byte {
|
||||||
|
return make([]byte, length)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nopBufferPool) Put(*[]byte) {
|
||||||
|
}
|
|
@ -59,12 +59,22 @@ func (s *Begin) IsClient() bool { return s.Client }
|
||||||
|
|
||||||
func (s *Begin) isRPCStats() {}
|
func (s *Begin) isRPCStats() {}
|
||||||
|
|
||||||
|
// PickerUpdated indicates that the LB policy provided a new picker while the
|
||||||
|
// RPC was waiting for one.
|
||||||
|
type PickerUpdated struct{}
|
||||||
|
|
||||||
|
// IsClient indicates if the stats information is from client side. Only Client
|
||||||
|
// Side interfaces with a Picker, thus always returns true.
|
||||||
|
func (*PickerUpdated) IsClient() bool { return true }
|
||||||
|
|
||||||
|
func (*PickerUpdated) isRPCStats() {}
|
||||||
|
|
||||||
// InPayload contains the information for an incoming payload.
|
// InPayload contains the information for an incoming payload.
|
||||||
type InPayload struct {
|
type InPayload struct {
|
||||||
// Client is true if this InPayload is from client side.
|
// Client is true if this InPayload is from client side.
|
||||||
Client bool
|
Client bool
|
||||||
// Payload is the payload with original type.
|
// Payload is the payload with original type.
|
||||||
Payload interface{}
|
Payload any
|
||||||
// Data is the serialized message payload.
|
// Data is the serialized message payload.
|
||||||
Data []byte
|
Data []byte
|
||||||
|
|
||||||
|
@ -134,7 +144,7 @@ type OutPayload struct {
|
||||||
// Client is true if this OutPayload is from client side.
|
// Client is true if this OutPayload is from client side.
|
||||||
Client bool
|
Client bool
|
||||||
// Payload is the payload with original type.
|
// Payload is the payload with original type.
|
||||||
Payload interface{}
|
Payload any
|
||||||
// Data is the serialized message payload.
|
// Data is the serialized message payload.
|
||||||
Data []byte
|
Data []byte
|
||||||
// Length is the size of the uncompressed payload data. Does not include any
|
// Length is the size of the uncompressed payload data. Does not include any
|
||||||
|
|
|
@ -50,7 +50,7 @@ func New(c codes.Code, msg string) *Status {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Newf returns New(c, fmt.Sprintf(format, a...)).
|
// Newf returns New(c, fmt.Sprintf(format, a...)).
|
||||||
func Newf(c codes.Code, format string, a ...interface{}) *Status {
|
func Newf(c codes.Code, format string, a ...any) *Status {
|
||||||
return New(c, fmt.Sprintf(format, a...))
|
return New(c, fmt.Sprintf(format, a...))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -60,7 +60,7 @@ func Error(c codes.Code, msg string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errorf returns Error(c, fmt.Sprintf(format, a...)).
|
// Errorf returns Error(c, fmt.Sprintf(format, a...)).
|
||||||
func Errorf(c codes.Code, format string, a ...interface{}) error {
|
func Errorf(c codes.Code, format string, a ...any) error {
|
||||||
return Error(c, fmt.Sprintf(format, a...))
|
return Error(c, fmt.Sprintf(format, a...))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,25 +99,27 @@ func FromError(err error) (s *Status, ok bool) {
|
||||||
}
|
}
|
||||||
type grpcstatus interface{ GRPCStatus() *Status }
|
type grpcstatus interface{ GRPCStatus() *Status }
|
||||||
if gs, ok := err.(grpcstatus); ok {
|
if gs, ok := err.(grpcstatus); ok {
|
||||||
if gs.GRPCStatus() == nil {
|
grpcStatus := gs.GRPCStatus()
|
||||||
|
if grpcStatus == nil {
|
||||||
// Error has status nil, which maps to codes.OK. There
|
// Error has status nil, which maps to codes.OK. There
|
||||||
// is no sensible behavior for this, so we turn it into
|
// is no sensible behavior for this, so we turn it into
|
||||||
// an error with codes.Unknown and discard the existing
|
// an error with codes.Unknown and discard the existing
|
||||||
// status.
|
// status.
|
||||||
return New(codes.Unknown, err.Error()), false
|
return New(codes.Unknown, err.Error()), false
|
||||||
}
|
}
|
||||||
return gs.GRPCStatus(), true
|
return grpcStatus, true
|
||||||
}
|
}
|
||||||
var gs grpcstatus
|
var gs grpcstatus
|
||||||
if errors.As(err, &gs) {
|
if errors.As(err, &gs) {
|
||||||
if gs.GRPCStatus() == nil {
|
grpcStatus := gs.GRPCStatus()
|
||||||
|
if grpcStatus == nil {
|
||||||
// Error wraps an error that has status nil, which maps
|
// Error wraps an error that has status nil, which maps
|
||||||
// to codes.OK. There is no sensible behavior for this,
|
// to codes.OK. There is no sensible behavior for this,
|
||||||
// so we turn it into an error with codes.Unknown and
|
// so we turn it into an error with codes.Unknown and
|
||||||
// discard the existing status.
|
// discard the existing status.
|
||||||
return New(codes.Unknown, err.Error()), false
|
return New(codes.Unknown, err.Error()), false
|
||||||
}
|
}
|
||||||
p := gs.GRPCStatus().Proto()
|
p := grpcStatus.Proto()
|
||||||
p.Message = err.Error()
|
p.Message = err.Error()
|
||||||
return status.FromProto(p), true
|
return status.FromProto(p), true
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,7 @@ import (
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/encoding"
|
"google.golang.org/grpc/encoding"
|
||||||
|
"google.golang.org/grpc/internal"
|
||||||
"google.golang.org/grpc/internal/balancerload"
|
"google.golang.org/grpc/internal/balancerload"
|
||||||
"google.golang.org/grpc/internal/binarylog"
|
"google.golang.org/grpc/internal/binarylog"
|
||||||
"google.golang.org/grpc/internal/channelz"
|
"google.golang.org/grpc/internal/channelz"
|
||||||
|
@ -54,7 +55,7 @@ import (
|
||||||
// status package, or be one of the context errors. Otherwise, gRPC will use
|
// status package, or be one of the context errors. Otherwise, gRPC will use
|
||||||
// codes.Unknown as the status code and err.Error() as the status message of the
|
// codes.Unknown as the status code and err.Error() as the status message of the
|
||||||
// RPC.
|
// RPC.
|
||||||
type StreamHandler func(srv interface{}, stream ServerStream) error
|
type StreamHandler func(srv any, stream ServerStream) error
|
||||||
|
|
||||||
// StreamDesc represents a streaming RPC service's method specification. Used
|
// StreamDesc represents a streaming RPC service's method specification. Used
|
||||||
// on the server when registering services and on the client when initiating
|
// on the server when registering services and on the client when initiating
|
||||||
|
@ -79,9 +80,9 @@ type Stream interface {
|
||||||
// Deprecated: See ClientStream and ServerStream documentation instead.
|
// Deprecated: See ClientStream and ServerStream documentation instead.
|
||||||
Context() context.Context
|
Context() context.Context
|
||||||
// Deprecated: See ClientStream and ServerStream documentation instead.
|
// Deprecated: See ClientStream and ServerStream documentation instead.
|
||||||
SendMsg(m interface{}) error
|
SendMsg(m any) error
|
||||||
// Deprecated: See ClientStream and ServerStream documentation instead.
|
// Deprecated: See ClientStream and ServerStream documentation instead.
|
||||||
RecvMsg(m interface{}) error
|
RecvMsg(m any) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClientStream defines the client-side behavior of a streaming RPC.
|
// ClientStream defines the client-side behavior of a streaming RPC.
|
||||||
|
@ -90,7 +91,9 @@ type Stream interface {
|
||||||
// status package.
|
// status package.
|
||||||
type ClientStream interface {
|
type ClientStream interface {
|
||||||
// Header returns the header metadata received from the server if there
|
// Header returns the header metadata received from the server if there
|
||||||
// is any. It blocks if the metadata is not ready to read.
|
// is any. It blocks if the metadata is not ready to read. If the metadata
|
||||||
|
// is nil and the error is also nil, then the stream was terminated without
|
||||||
|
// headers, and the status can be discovered by calling RecvMsg.
|
||||||
Header() (metadata.MD, error)
|
Header() (metadata.MD, error)
|
||||||
// Trailer returns the trailer metadata from the server, if there is any.
|
// Trailer returns the trailer metadata from the server, if there is any.
|
||||||
// It must only be called after stream.CloseAndRecv has returned, or
|
// It must only be called after stream.CloseAndRecv has returned, or
|
||||||
|
@ -126,7 +129,7 @@ type ClientStream interface {
|
||||||
//
|
//
|
||||||
// It is not safe to modify the message after calling SendMsg. Tracing
|
// It is not safe to modify the message after calling SendMsg. Tracing
|
||||||
// libraries and stats handlers may use the message lazily.
|
// libraries and stats handlers may use the message lazily.
|
||||||
SendMsg(m interface{}) error
|
SendMsg(m any) error
|
||||||
// RecvMsg blocks until it receives a message into m or the stream is
|
// RecvMsg blocks until it receives a message into m or the stream is
|
||||||
// done. It returns io.EOF when the stream completes successfully. On
|
// done. It returns io.EOF when the stream completes successfully. On
|
||||||
// any other error, the stream is aborted and the error contains the RPC
|
// any other error, the stream is aborted and the error contains the RPC
|
||||||
|
@ -135,7 +138,7 @@ type ClientStream interface {
|
||||||
// It is safe to have a goroutine calling SendMsg and another goroutine
|
// It is safe to have a goroutine calling SendMsg and another goroutine
|
||||||
// calling RecvMsg on the same stream at the same time, but it is not
|
// calling RecvMsg on the same stream at the same time, but it is not
|
||||||
// safe to call RecvMsg on the same stream in different goroutines.
|
// safe to call RecvMsg on the same stream in different goroutines.
|
||||||
RecvMsg(m interface{}) error
|
RecvMsg(m any) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStream creates a new Stream for the client side. This is typically
|
// NewStream creates a new Stream for the client side. This is typically
|
||||||
|
@ -155,11 +158,6 @@ type ClientStream interface {
|
||||||
// If none of the above happen, a goroutine and a context will be leaked, and grpc
|
// If none of the above happen, a goroutine and a context will be leaked, and grpc
|
||||||
// will not call the optionally-configured stats handler with a stats.End message.
|
// will not call the optionally-configured stats handler with a stats.End message.
|
||||||
func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
|
func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
|
||||||
if err := cc.idlenessMgr.onCallBegin(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer cc.idlenessMgr.onCallEnd()
|
|
||||||
|
|
||||||
// allow interceptor to see all applicable call options, which means those
|
// allow interceptor to see all applicable call options, which means those
|
||||||
// configured as defaults from dial option as well as per-call options
|
// configured as defaults from dial option as well as per-call options
|
||||||
opts = combine(cc.dopts.callOptions, opts)
|
opts = combine(cc.dopts.callOptions, opts)
|
||||||
|
@ -176,6 +174,16 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
||||||
}
|
}
|
||||||
|
|
||||||
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
|
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
|
||||||
|
// Start tracking the RPC for idleness purposes. This is where a stream is
|
||||||
|
// created for both streaming and unary RPCs, and hence is a good place to
|
||||||
|
// track active RPC count.
|
||||||
|
if err := cc.idlenessMgr.OnCallBegin(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Add a calloption, to decrement the active call count, that gets executed
|
||||||
|
// when the RPC completes.
|
||||||
|
opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...)
|
||||||
|
|
||||||
if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
|
if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
|
||||||
// validate md
|
// validate md
|
||||||
if err := imetadata.Validate(md); err != nil {
|
if err := imetadata.Validate(md); err != nil {
|
||||||
|
@ -433,7 +441,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
|
||||||
ctx = trace.NewContext(ctx, trInfo.tr)
|
ctx = trace.NewContext(ctx, trInfo.tr)
|
||||||
}
|
}
|
||||||
|
|
||||||
if cs.cc.parsedTarget.URL.Scheme == "xds" {
|
if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata {
|
||||||
// Add extra metadata (metadata that will be added by transport) to context
|
// Add extra metadata (metadata that will be added by transport) to context
|
||||||
// so the balancer can see them.
|
// so the balancer can see them.
|
||||||
ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs(
|
ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs(
|
||||||
|
@ -507,7 +515,7 @@ func (a *csAttempt) newStream() error {
|
||||||
return toRPCErr(nse.Err)
|
return toRPCErr(nse.Err)
|
||||||
}
|
}
|
||||||
a.s = s
|
a.s = s
|
||||||
a.p = &parser{r: s}
|
a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -788,23 +796,24 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func())
|
||||||
|
|
||||||
func (cs *clientStream) Header() (metadata.MD, error) {
|
func (cs *clientStream) Header() (metadata.MD, error) {
|
||||||
var m metadata.MD
|
var m metadata.MD
|
||||||
noHeader := false
|
|
||||||
err := cs.withRetry(func(a *csAttempt) error {
|
err := cs.withRetry(func(a *csAttempt) error {
|
||||||
var err error
|
var err error
|
||||||
m, err = a.s.Header()
|
m, err = a.s.Header()
|
||||||
if err == transport.ErrNoHeaders {
|
|
||||||
noHeader = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return toRPCErr(err)
|
return toRPCErr(err)
|
||||||
}, cs.commitAttemptLocked)
|
}, cs.commitAttemptLocked)
|
||||||
|
|
||||||
if err != nil {
|
if m == nil && err == nil {
|
||||||
cs.finish(err)
|
// The stream ended with success. Finish the clientStream.
|
||||||
return nil, err
|
err = io.EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader {
|
if err != nil {
|
||||||
|
cs.finish(err)
|
||||||
|
// Do not return the error. The user should get it by calling Recv().
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil {
|
||||||
// Only log if binary log is on and header has not been logged, and
|
// Only log if binary log is on and header has not been logged, and
|
||||||
// there is actually headers to log.
|
// there is actually headers to log.
|
||||||
logEntry := &binarylog.ServerHeader{
|
logEntry := &binarylog.ServerHeader{
|
||||||
|
@ -820,6 +829,7 @@ func (cs *clientStream) Header() (metadata.MD, error) {
|
||||||
binlog.Log(cs.ctx, logEntry)
|
binlog.Log(cs.ctx, logEntry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -860,7 +870,7 @@ func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error
|
||||||
cs.buffer = append(cs.buffer, op)
|
cs.buffer = append(cs.buffer, op)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
func (cs *clientStream) SendMsg(m any) (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
// Call finish on the client stream for errors generated by this SendMsg
|
// Call finish on the client stream for errors generated by this SendMsg
|
||||||
|
@ -904,7 +914,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) RecvMsg(m interface{}) error {
|
func (cs *clientStream) RecvMsg(m any) error {
|
||||||
if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged {
|
if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged {
|
||||||
// Call Header() to binary log header if it's not already logged.
|
// Call Header() to binary log header if it's not already logged.
|
||||||
cs.Header()
|
cs.Header()
|
||||||
|
@ -928,24 +938,6 @@ func (cs *clientStream) RecvMsg(m interface{}) error {
|
||||||
if err != nil || !cs.desc.ServerStreams {
|
if err != nil || !cs.desc.ServerStreams {
|
||||||
// err != nil or non-server-streaming indicates end of stream.
|
// err != nil or non-server-streaming indicates end of stream.
|
||||||
cs.finish(err)
|
cs.finish(err)
|
||||||
|
|
||||||
if len(cs.binlogs) != 0 {
|
|
||||||
// finish will not log Trailer. Log Trailer here.
|
|
||||||
logEntry := &binarylog.ServerTrailer{
|
|
||||||
OnClientSide: true,
|
|
||||||
Trailer: cs.Trailer(),
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
if logEntry.Err == io.EOF {
|
|
||||||
logEntry.Err = nil
|
|
||||||
}
|
|
||||||
if peer, ok := peer.FromContext(cs.Context()); ok {
|
|
||||||
logEntry.PeerAddr = peer.Addr
|
|
||||||
}
|
|
||||||
for _, binlog := range cs.binlogs {
|
|
||||||
binlog.Log(cs.ctx, logEntry)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1001,18 +993,30 @@ func (cs *clientStream) finish(err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cs.mu.Unlock()
|
cs.mu.Unlock()
|
||||||
// For binary logging. only log cancel in finish (could be caused by RPC ctx
|
// Only one of cancel or trailer needs to be logged.
|
||||||
// canceled or ClientConn closed). Trailer will be logged in RecvMsg.
|
if len(cs.binlogs) != 0 {
|
||||||
//
|
switch err {
|
||||||
// Only one of cancel or trailer needs to be logged. In the cases where
|
case errContextCanceled, errContextDeadline, ErrClientConnClosing:
|
||||||
// users don't call RecvMsg, users must have already canceled the RPC.
|
c := &binarylog.Cancel{
|
||||||
if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled {
|
OnClientSide: true,
|
||||||
c := &binarylog.Cancel{
|
}
|
||||||
OnClientSide: true,
|
for _, binlog := range cs.binlogs {
|
||||||
}
|
binlog.Log(cs.ctx, c)
|
||||||
for _, binlog := range cs.binlogs {
|
}
|
||||||
binlog.Log(cs.ctx, c)
|
default:
|
||||||
|
logEntry := &binarylog.ServerTrailer{
|
||||||
|
OnClientSide: true,
|
||||||
|
Trailer: cs.Trailer(),
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
|
if peer, ok := peer.FromContext(cs.Context()); ok {
|
||||||
|
logEntry.PeerAddr = peer.Addr
|
||||||
|
}
|
||||||
|
for _, binlog := range cs.binlogs {
|
||||||
|
binlog.Log(cs.ctx, logEntry)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -1028,7 +1032,7 @@ func (cs *clientStream) finish(err error) {
|
||||||
cs.cancel()
|
cs.cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
|
func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error {
|
||||||
cs := a.cs
|
cs := a.cs
|
||||||
if a.trInfo != nil {
|
if a.trInfo != nil {
|
||||||
a.mu.Lock()
|
a.mu.Lock()
|
||||||
|
@ -1055,7 +1059,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
|
func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
|
||||||
cs := a.cs
|
cs := a.cs
|
||||||
if len(a.statsHandlers) != 0 && payInfo == nil {
|
if len(a.statsHandlers) != 0 && payInfo == nil {
|
||||||
payInfo = &payloadInfo{}
|
payInfo = &payloadInfo{}
|
||||||
|
@ -1270,7 +1274,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
as.s = s
|
as.s = s
|
||||||
as.p = &parser{r: s}
|
as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool}
|
||||||
ac.incrCallsStarted()
|
ac.incrCallsStarted()
|
||||||
if desc != unaryStreamDesc {
|
if desc != unaryStreamDesc {
|
||||||
// Listen on stream context to cleanup when the stream context is
|
// Listen on stream context to cleanup when the stream context is
|
||||||
|
@ -1348,7 +1352,7 @@ func (as *addrConnStream) Context() context.Context {
|
||||||
return as.s.Context()
|
return as.s.Context()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *addrConnStream) SendMsg(m interface{}) (err error) {
|
func (as *addrConnStream) SendMsg(m any) (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
// Call finish on the client stream for errors generated by this SendMsg
|
// Call finish on the client stream for errors generated by this SendMsg
|
||||||
|
@ -1393,7 +1397,7 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *addrConnStream) RecvMsg(m interface{}) (err error) {
|
func (as *addrConnStream) RecvMsg(m any) (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil || !as.desc.ServerStreams {
|
if err != nil || !as.desc.ServerStreams {
|
||||||
// err != nil or non-server-streaming indicates end of stream.
|
// err != nil or non-server-streaming indicates end of stream.
|
||||||
|
@ -1512,7 +1516,7 @@ type ServerStream interface {
|
||||||
//
|
//
|
||||||
// It is not safe to modify the message after calling SendMsg. Tracing
|
// It is not safe to modify the message after calling SendMsg. Tracing
|
||||||
// libraries and stats handlers may use the message lazily.
|
// libraries and stats handlers may use the message lazily.
|
||||||
SendMsg(m interface{}) error
|
SendMsg(m any) error
|
||||||
// RecvMsg blocks until it receives a message into m or the stream is
|
// RecvMsg blocks until it receives a message into m or the stream is
|
||||||
// done. It returns io.EOF when the client has performed a CloseSend. On
|
// done. It returns io.EOF when the client has performed a CloseSend. On
|
||||||
// any non-EOF error, the stream is aborted and the error contains the
|
// any non-EOF error, the stream is aborted and the error contains the
|
||||||
|
@ -1521,7 +1525,7 @@ type ServerStream interface {
|
||||||
// It is safe to have a goroutine calling SendMsg and another goroutine
|
// It is safe to have a goroutine calling SendMsg and another goroutine
|
||||||
// calling RecvMsg on the same stream at the same time, but it is not
|
// calling RecvMsg on the same stream at the same time, but it is not
|
||||||
// safe to call RecvMsg on the same stream in different goroutines.
|
// safe to call RecvMsg on the same stream in different goroutines.
|
||||||
RecvMsg(m interface{}) error
|
RecvMsg(m any) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// serverStream implements a server side Stream.
|
// serverStream implements a server side Stream.
|
||||||
|
@ -1602,7 +1606,7 @@ func (ss *serverStream) SetTrailer(md metadata.MD) {
|
||||||
ss.s.SetTrailer(md)
|
ss.s.SetTrailer(md)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
func (ss *serverStream) SendMsg(m any) (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if ss.trInfo != nil {
|
if ss.trInfo != nil {
|
||||||
ss.mu.Lock()
|
ss.mu.Lock()
|
||||||
|
@ -1610,7 +1614,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
|
ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
|
||||||
} else {
|
} else {
|
||||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||||
ss.trInfo.tr.SetError()
|
ss.trInfo.tr.SetError()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1677,7 +1681,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
func (ss *serverStream) RecvMsg(m any) (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if ss.trInfo != nil {
|
if ss.trInfo != nil {
|
||||||
ss.mu.Lock()
|
ss.mu.Lock()
|
||||||
|
@ -1685,7 +1689,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
|
ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
|
||||||
} else if err != io.EOF {
|
} else if err != io.EOF {
|
||||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||||
ss.trInfo.tr.SetError()
|
ss.trInfo.tr.SetError()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1757,7 +1761,7 @@ func MethodFromServerStream(stream ServerStream) (string, bool) {
|
||||||
// prepareMsg returns the hdr, payload and data
|
// prepareMsg returns the hdr, payload and data
|
||||||
// using the compressors passed or using the
|
// using the compressors passed or using the
|
||||||
// passed preparedmsg
|
// passed preparedmsg
|
||||||
func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
|
func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
|
||||||
if preparedMsg, ok := m.(*PreparedMsg); ok {
|
if preparedMsg, ok := m.(*PreparedMsg); ok {
|
||||||
return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
|
return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -97,8 +97,8 @@ func truncate(x string, l int) string {
|
||||||
|
|
||||||
// payload represents an RPC request or response payload.
|
// payload represents an RPC request or response payload.
|
||||||
type payload struct {
|
type payload struct {
|
||||||
sent bool // whether this is an outgoing payload
|
sent bool // whether this is an outgoing payload
|
||||||
msg interface{} // e.g. a proto.Message
|
msg any // e.g. a proto.Message
|
||||||
// TODO(dsymonds): add stringifying info to codec, and limit how much we hold here?
|
// TODO(dsymonds): add stringifying info to codec, and limit how much we hold here?
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -111,7 +111,7 @@ func (p payload) String() string {
|
||||||
|
|
||||||
type fmtStringer struct {
|
type fmtStringer struct {
|
||||||
format string
|
format string
|
||||||
a []interface{}
|
a []any
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fmtStringer) String() string {
|
func (f *fmtStringer) String() string {
|
||||||
|
|
|
@ -19,4 +19,4 @@
|
||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
// Version is the current grpc version.
|
// Version is the current grpc version.
|
||||||
const Version = "1.56.3"
|
const Version = "1.58.3"
|
||||||
|
|
|
@ -84,6 +84,9 @@ not git grep -l 'x/net/context' -- "*.go"
|
||||||
# thread safety.
|
# thread safety.
|
||||||
git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test'
|
git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test'
|
||||||
|
|
||||||
|
# - Do not use "interface{}"; use "any" instead.
|
||||||
|
git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc'
|
||||||
|
|
||||||
# - Do not call grpclog directly. Use grpclog.Component instead.
|
# - Do not call grpclog directly. Use grpclog.Component instead.
|
||||||
git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
|
git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
|
||||||
|
|
||||||
|
@ -106,7 +109,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do
|
||||||
goimports -l . 2>&1 | not grep -vE "\.pb\.go"
|
goimports -l . 2>&1 | not grep -vE "\.pb\.go"
|
||||||
golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:"
|
golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:"
|
||||||
|
|
||||||
go mod tidy -compat=1.17
|
go mod tidy -compat=1.19
|
||||||
git status --porcelain 2>&1 | fail_on_output || \
|
git status --porcelain 2>&1 | fail_on_output || \
|
||||||
(git status; git --no-pager diff; exit 1)
|
(git status; git --no-pager diff; exit 1)
|
||||||
popd
|
popd
|
||||||
|
@ -168,8 +171,6 @@ proto.RegisteredExtension is deprecated
|
||||||
proto.RegisteredExtensions is deprecated
|
proto.RegisteredExtensions is deprecated
|
||||||
proto.RegisterMapType is deprecated
|
proto.RegisterMapType is deprecated
|
||||||
proto.Unmarshaler is deprecated
|
proto.Unmarshaler is deprecated
|
||||||
resolver.Backend
|
|
||||||
resolver.GRPCLB
|
|
||||||
Target is deprecated: Use the Target field in the BuildOptions instead.
|
Target is deprecated: Use the Target field in the BuildOptions instead.
|
||||||
xxx_messageInfo_
|
xxx_messageInfo_
|
||||||
' "${SC_OUT}"
|
' "${SC_OUT}"
|
||||||
|
|
|
@ -24,7 +24,7 @@ github.com/cespare/xxhash/v2
|
||||||
# github.com/container-orchestrated-devices/container-device-interface v0.6.1
|
# github.com/container-orchestrated-devices/container-device-interface v0.6.1
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/container-orchestrated-devices/container-device-interface/pkg/parser
|
github.com/container-orchestrated-devices/container-device-interface/pkg/parser
|
||||||
# github.com/containerd/containerd v1.7.7
|
# github.com/containerd/containerd v1.7.8
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
github.com/containerd/containerd/errdefs
|
github.com/containerd/containerd/errdefs
|
||||||
github.com/containerd/containerd/log
|
github.com/containerd/containerd/log
|
||||||
|
@ -56,7 +56,7 @@ github.com/docker/distribution/registry/client/transport
|
||||||
github.com/docker/distribution/registry/storage/cache
|
github.com/docker/distribution/registry/storage/cache
|
||||||
github.com/docker/distribution/registry/storage/cache/memory
|
github.com/docker/distribution/registry/storage/cache/memory
|
||||||
github.com/docker/distribution/uuid
|
github.com/docker/distribution/uuid
|
||||||
# github.com/docker/docker v24.0.0-rc.2.0.20231025221548-fc4d035e7a4e+incompatible
|
# github.com/docker/docker v24.0.0-rc.2.0.20231103125139-ed1a61dcb789+incompatible
|
||||||
## explicit
|
## explicit
|
||||||
github.com/docker/docker/api
|
github.com/docker/docker/api
|
||||||
github.com/docker/docker/api/types
|
github.com/docker/docker/api/types
|
||||||
|
@ -315,7 +315,7 @@ go.opentelemetry.io/otel/trace
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
golang.org/x/crypto/ed25519
|
golang.org/x/crypto/ed25519
|
||||||
golang.org/x/crypto/pbkdf2
|
golang.org/x/crypto/pbkdf2
|
||||||
# golang.org/x/mod v0.10.0
|
# golang.org/x/mod v0.11.0
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
golang.org/x/mod/semver
|
golang.org/x/mod/semver
|
||||||
# golang.org/x/net v0.17.0
|
# golang.org/x/net v0.17.0
|
||||||
|
@ -350,17 +350,17 @@ golang.org/x/text/width
|
||||||
# golang.org/x/time v0.3.0
|
# golang.org/x/time v0.3.0
|
||||||
## explicit
|
## explicit
|
||||||
golang.org/x/time/rate
|
golang.org/x/time/rate
|
||||||
# golang.org/x/tools v0.8.0
|
# golang.org/x/tools v0.10.0
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
golang.org/x/tools/cmd/stringer
|
golang.org/x/tools/cmd/stringer
|
||||||
golang.org/x/tools/go/gcexportdata
|
golang.org/x/tools/go/gcexportdata
|
||||||
golang.org/x/tools/go/internal/packagesdriver
|
golang.org/x/tools/go/internal/packagesdriver
|
||||||
golang.org/x/tools/go/packages
|
golang.org/x/tools/go/packages
|
||||||
golang.org/x/tools/go/types/objectpath
|
|
||||||
golang.org/x/tools/internal/event
|
golang.org/x/tools/internal/event
|
||||||
golang.org/x/tools/internal/event/core
|
golang.org/x/tools/internal/event/core
|
||||||
golang.org/x/tools/internal/event/keys
|
golang.org/x/tools/internal/event/keys
|
||||||
golang.org/x/tools/internal/event/label
|
golang.org/x/tools/internal/event/label
|
||||||
|
golang.org/x/tools/internal/event/tag
|
||||||
golang.org/x/tools/internal/gcimporter
|
golang.org/x/tools/internal/gcimporter
|
||||||
golang.org/x/tools/internal/gocommand
|
golang.org/x/tools/internal/gocommand
|
||||||
golang.org/x/tools/internal/packagesinternal
|
golang.org/x/tools/internal/packagesinternal
|
||||||
|
@ -368,11 +368,11 @@ golang.org/x/tools/internal/pkgbits
|
||||||
golang.org/x/tools/internal/tokeninternal
|
golang.org/x/tools/internal/tokeninternal
|
||||||
golang.org/x/tools/internal/typeparams
|
golang.org/x/tools/internal/typeparams
|
||||||
golang.org/x/tools/internal/typesinternal
|
golang.org/x/tools/internal/typesinternal
|
||||||
# google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
|
# google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
google.golang.org/genproto/googleapis/rpc/status
|
google.golang.org/genproto/googleapis/rpc/status
|
||||||
# google.golang.org/grpc v1.56.3
|
# google.golang.org/grpc v1.58.3
|
||||||
## explicit; go 1.17
|
## explicit; go 1.19
|
||||||
google.golang.org/grpc
|
google.golang.org/grpc
|
||||||
google.golang.org/grpc/attributes
|
google.golang.org/grpc/attributes
|
||||||
google.golang.org/grpc/backoff
|
google.golang.org/grpc/backoff
|
||||||
|
@ -402,6 +402,7 @@ google.golang.org/grpc/internal/grpclog
|
||||||
google.golang.org/grpc/internal/grpcrand
|
google.golang.org/grpc/internal/grpcrand
|
||||||
google.golang.org/grpc/internal/grpcsync
|
google.golang.org/grpc/internal/grpcsync
|
||||||
google.golang.org/grpc/internal/grpcutil
|
google.golang.org/grpc/internal/grpcutil
|
||||||
|
google.golang.org/grpc/internal/idle
|
||||||
google.golang.org/grpc/internal/metadata
|
google.golang.org/grpc/internal/metadata
|
||||||
google.golang.org/grpc/internal/pretty
|
google.golang.org/grpc/internal/pretty
|
||||||
google.golang.org/grpc/internal/resolver
|
google.golang.org/grpc/internal/resolver
|
||||||
|
|
Loading…
Reference in New Issue