mirror of https://github.com/docker/cli.git
Merge pull request #3474 from thaJeztah/bump_deps_for_buildkit_containerd
This commit is contained in:
commit
11c76f1ddb
14
vendor.mod
14
vendor.mod
|
@ -8,22 +8,21 @@ go 1.16
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Microsoft/go-winio v0.5.1 // indirect
|
github.com/Microsoft/go-winio v0.5.1 // indirect
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||||
github.com/containerd/containerd v1.5.10
|
github.com/containerd/containerd v1.5.10
|
||||||
github.com/coreos/etcd v3.3.27+incompatible // indirect
|
|
||||||
github.com/creack/pty v1.1.11
|
github.com/creack/pty v1.1.11
|
||||||
github.com/docker/distribution v2.8.1+incompatible
|
github.com/docker/distribution v2.8.1+incompatible
|
||||||
github.com/docker/docker v20.10.7+incompatible // see "replace" for the actual version
|
github.com/docker/docker v20.10.7+incompatible // see "replace" for the actual version
|
||||||
github.com/docker/docker-credential-helpers v0.6.4
|
github.com/docker/docker-credential-helpers v0.6.4
|
||||||
github.com/docker/go-connections v0.4.0
|
github.com/docker/go-connections v0.4.0
|
||||||
github.com/docker/go-units v0.4.0
|
github.com/docker/go-units v0.4.0
|
||||||
github.com/docker/swarmkit v1.12.1-0.20210726173615-3629f50980f6
|
github.com/docker/swarmkit v1.12.1-0.20220307221335-616e8db4c3b0
|
||||||
github.com/fvbommel/sortorder v1.0.2
|
github.com/fvbommel/sortorder v1.0.2
|
||||||
github.com/gogo/protobuf v1.3.2
|
github.com/gogo/protobuf v1.3.2
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
|
||||||
github.com/google/go-cmp v0.5.7
|
github.com/google/go-cmp v0.5.7
|
||||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||||
github.com/imdario/mergo v0.3.12
|
github.com/imdario/mergo v0.3.12
|
||||||
github.com/klauspost/compress v1.14.3 // indirect
|
github.com/klauspost/compress v1.15.0 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.3.2
|
github.com/mitchellh/mapstructure v1.3.2
|
||||||
github.com/moby/buildkit v0.8.2-0.20210615162540-9f254e18360a // master (v0.9.0-dev)
|
github.com/moby/buildkit v0.8.2-0.20210615162540-9f254e18360a // master (v0.9.0-dev)
|
||||||
github.com/moby/sys/signal v0.7.0
|
github.com/moby/sys/signal v0.7.0
|
||||||
|
@ -32,6 +31,7 @@ require (
|
||||||
github.com/morikuni/aec v1.0.0
|
github.com/morikuni/aec v1.0.0
|
||||||
github.com/opencontainers/go-digest v1.0.0
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
github.com/opencontainers/image-spec v1.0.2
|
github.com/opencontainers/image-spec v1.0.2
|
||||||
|
github.com/opencontainers/runc v1.1.0 // indirect
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/sirupsen/logrus v1.8.1
|
github.com/sirupsen/logrus v1.8.1
|
||||||
github.com/spf13/cobra v1.1.3
|
github.com/spf13/cobra v1.1.3
|
||||||
|
@ -40,9 +40,13 @@ require (
|
||||||
github.com/tonistiigi/go-rosetta v0.0.0-20200727161949-f79598599c5d
|
github.com/tonistiigi/go-rosetta v0.0.0-20200727161949-f79598599c5d
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||||
github.com/xeipuuv/gojsonschema v1.2.0
|
github.com/xeipuuv/gojsonschema v1.2.0
|
||||||
|
go.etcd.io/etcd/raft/v3 v3.5.2 // indirect
|
||||||
|
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e // indirect
|
||||||
|
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f // indirect
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1
|
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b
|
||||||
golang.org/x/text v0.3.7
|
golang.org/x/text v0.3.7
|
||||||
|
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||||
google.golang.org/grpc v1.38.0 // indirect
|
google.golang.org/grpc v1.38.0 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
gotest.tools/v3 v3.1.0
|
gotest.tools/v3 v3.1.0
|
||||||
|
|
58
vendor.sum
58
vendor.sum
|
@ -97,10 +97,14 @@ github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXe
|
||||||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
||||||
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||||
|
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
|
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
|
||||||
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
||||||
|
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
|
||||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||||
|
@ -109,12 +113,16 @@ github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLI
|
||||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||||
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||||
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||||
|
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ=
|
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ=
|
||||||
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
|
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||||
|
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
|
||||||
|
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
|
||||||
|
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
|
||||||
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||||
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
|
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
|
||||||
github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
|
github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
|
||||||
|
@ -135,6 +143,7 @@ github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on
|
||||||
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
|
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
|
||||||
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
|
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
|
||||||
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
|
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
|
||||||
|
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||||
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||||
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||||
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||||
|
@ -208,8 +217,6 @@ github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B
|
||||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/etcd v3.3.27+incompatible h1:QIudLb9KeBsE5zyYxd1mjzRSkzLg9Wf9QlRwFgd6oTA=
|
|
||||||
github.com/coreos/etcd v3.3.27+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
|
||||||
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
||||||
github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
||||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||||
|
@ -230,6 +237,7 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
|
||||||
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
|
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
|
||||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||||
|
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||||
github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
|
github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
|
||||||
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
|
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
|
||||||
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
|
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
|
||||||
|
@ -271,8 +279,8 @@ github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNE
|
||||||
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
|
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
|
||||||
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
|
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
|
||||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||||
github.com/docker/swarmkit v1.12.1-0.20210726173615-3629f50980f6 h1:mFQcXSzzNXVKAnl0KltjSQ7rbgipTYcXJns4sucurKA=
|
github.com/docker/swarmkit v1.12.1-0.20220307221335-616e8db4c3b0 h1:YehAv2BPLfTm58HW04wRnNy8Oo/CAzWji7mjJ6UJWgM=
|
||||||
github.com/docker/swarmkit v1.12.1-0.20210726173615-3629f50980f6/go.mod h1:n3Z4lIEl7g261ptkGDBcYi/3qBMDl9csaAhwi2MPejs=
|
github.com/docker/swarmkit v1.12.1-0.20220307221335-616e8db4c3b0/go.mod h1:n3Z4lIEl7g261ptkGDBcYi/3qBMDl9csaAhwi2MPejs=
|
||||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
|
@ -298,6 +306,7 @@ github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXt
|
||||||
github.com/fvbommel/sortorder v1.0.2 h1:mV4o8B2hKboCdkJm+a7uX/SIpZob4JzUpc5GGnM45eo=
|
github.com/fvbommel/sortorder v1.0.2 h1:mV4o8B2hKboCdkJm+a7uX/SIpZob4JzUpc5GGnM45eo=
|
||||||
github.com/fvbommel/sortorder v1.0.2/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
|
github.com/fvbommel/sortorder v1.0.2/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
|
||||||
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
|
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
|
||||||
|
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
|
||||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||||
|
@ -323,6 +332,7 @@ github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblf
|
||||||
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
|
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
|
||||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
|
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/gofrs/flock v0.7.3/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
github.com/gofrs/flock v0.7.3/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||||
github.com/gogo/googleapis v1.3.2/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
|
github.com/gogo/googleapis v1.3.2/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
|
||||||
github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
|
@ -479,8 +489,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
||||||
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||||
github.com/klauspost/compress v1.14.3 h1:DQv1WP+iS4srNjibdnHtqu8JNWCDMluj5NzPnFJsnvk=
|
github.com/klauspost/compress v1.15.0 h1:xqfchp4whNFxn5A4XFyyYtitiWI8Hy5EW59jEwcyL6U=
|
||||||
github.com/klauspost/compress v1.14.3/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
@ -535,6 +545,7 @@ github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQ
|
||||||
github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM=
|
github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM=
|
||||||
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||||
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||||
|
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
||||||
github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI=
|
github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI=
|
||||||
github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
|
github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
|
||||||
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
|
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
|
||||||
|
@ -589,8 +600,9 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P
|
||||||
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||||
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||||
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
|
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
|
||||||
github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg=
|
|
||||||
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||||
|
github.com/opencontainers/runc v1.1.0 h1:O9+X96OcDjkmmZyfaG996kV7yq8HsoU2h1XRRQcefG8=
|
||||||
|
github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
|
||||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
|
@ -601,6 +613,7 @@ github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mo
|
||||||
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
|
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
|
||||||
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
|
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
|
||||||
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
|
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
|
||||||
|
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
||||||
github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU=
|
github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU=
|
||||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||||
|
@ -641,6 +654,7 @@ github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiB
|
||||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||||
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
||||||
|
github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc=
|
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc=
|
||||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||||
|
@ -741,7 +755,11 @@ github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go
|
||||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||||
|
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo=
|
||||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
|
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
|
||||||
|
go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||||
|
go.etcd.io/etcd/raft/v3 v3.5.2 h1:uCC37qOXqBvKqTGHGyhASsaCsnTuJugl1GvneJNwHWo=
|
||||||
|
go.etcd.io/etcd/raft/v3 v3.5.2/go.mod h1:G6pCP1sFgbjod7/KnEHY0vHUViqxjkdt6AiKsD0GRr8=
|
||||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||||
|
@ -749,8 +767,11 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
|
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
|
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
|
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||||
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
@ -766,8 +787,9 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
||||||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w=
|
|
||||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
|
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e h1:MUP6MR3rJ7Gk9LEia0LP2ytiH6MuCfs7qYz+47jGdD8=
|
||||||
|
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
|
@ -832,8 +854,10 @@ golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwY
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
|
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM=
|
||||||
|
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
@ -912,28 +936,35 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||||
golang.org/x/sys v0.0.0-20210313202042-bd2e13477e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210313202042-bd2e13477e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
|
||||||
|
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
|
|
||||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
|
||||||
|
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
@ -1084,8 +1115,9 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||||
|
|
|
@ -1,8 +0,0 @@
|
||||||
language: go
|
|
||||||
go:
|
|
||||||
- "1.x"
|
|
||||||
- master
|
|
||||||
env:
|
|
||||||
- TAGS=""
|
|
||||||
- TAGS="-tags purego"
|
|
||||||
script: go test $TAGS -v ./...
|
|
|
@ -1,7 +1,7 @@
|
||||||
# xxhash
|
# xxhash
|
||||||
|
|
||||||
[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
|
[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||||
[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
|
[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||||
|
|
||||||
xxhash is a Go implementation of the 64-bit
|
xxhash is a Go implementation of the 64-bit
|
||||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||||
|
@ -64,4 +64,6 @@ $ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||||
|
|
||||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||||
|
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||||
- [FreeCache](https://github.com/coocood/freecache)
|
- [FreeCache](https://github.com/coocood/freecache)
|
||||||
|
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
||||||
|
|
|
@ -193,7 +193,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||||
b, d.v4 = consumeUint64(b)
|
b, d.v4 = consumeUint64(b)
|
||||||
b, d.total = consumeUint64(b)
|
b, d.total = consumeUint64(b)
|
||||||
copy(d.mem[:], b)
|
copy(d.mem[:], b)
|
||||||
b = b[len(d.mem):]
|
|
||||||
d.n = int(d.total % uint64(len(d.mem)))
|
d.n = int(d.total % uint64(len(d.mem)))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
// Register allocation:
|
// Register allocation:
|
||||||
// AX h
|
// AX h
|
||||||
// CX pointer to advance through b
|
// SI pointer to advance through b
|
||||||
// DX n
|
// DX n
|
||||||
// BX loop end
|
// BX loop end
|
||||||
// R8 v1, k1
|
// R8 v1, k1
|
||||||
|
@ -16,39 +16,39 @@
|
||||||
// R12 tmp
|
// R12 tmp
|
||||||
// R13 prime1v
|
// R13 prime1v
|
||||||
// R14 prime2v
|
// R14 prime2v
|
||||||
// R15 prime4v
|
// DI prime4v
|
||||||
|
|
||||||
// round reads from and advances the buffer pointer in CX.
|
// round reads from and advances the buffer pointer in SI.
|
||||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||||
#define round(r) \
|
#define round(r) \
|
||||||
MOVQ (CX), R12 \
|
MOVQ (SI), R12 \
|
||||||
ADDQ $8, CX \
|
ADDQ $8, SI \
|
||||||
IMULQ R14, R12 \
|
IMULQ R14, R12 \
|
||||||
ADDQ R12, r \
|
ADDQ R12, r \
|
||||||
ROLQ $31, r \
|
ROLQ $31, r \
|
||||||
IMULQ R13, r
|
IMULQ R13, r
|
||||||
|
|
||||||
// mergeRound applies a merge round on the two registers acc and val.
|
// mergeRound applies a merge round on the two registers acc and val.
|
||||||
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
|
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||||||
#define mergeRound(acc, val) \
|
#define mergeRound(acc, val) \
|
||||||
IMULQ R14, val \
|
IMULQ R14, val \
|
||||||
ROLQ $31, val \
|
ROLQ $31, val \
|
||||||
IMULQ R13, val \
|
IMULQ R13, val \
|
||||||
XORQ val, acc \
|
XORQ val, acc \
|
||||||
IMULQ R13, acc \
|
IMULQ R13, acc \
|
||||||
ADDQ R15, acc
|
ADDQ DI, acc
|
||||||
|
|
||||||
// func Sum64(b []byte) uint64
|
// func Sum64(b []byte) uint64
|
||||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||||
// Load fixed primes.
|
// Load fixed primes.
|
||||||
MOVQ ·prime1v(SB), R13
|
MOVQ ·prime1v(SB), R13
|
||||||
MOVQ ·prime2v(SB), R14
|
MOVQ ·prime2v(SB), R14
|
||||||
MOVQ ·prime4v(SB), R15
|
MOVQ ·prime4v(SB), DI
|
||||||
|
|
||||||
// Load slice.
|
// Load slice.
|
||||||
MOVQ b_base+0(FP), CX
|
MOVQ b_base+0(FP), SI
|
||||||
MOVQ b_len+8(FP), DX
|
MOVQ b_len+8(FP), DX
|
||||||
LEAQ (CX)(DX*1), BX
|
LEAQ (SI)(DX*1), BX
|
||||||
|
|
||||||
// The first loop limit will be len(b)-32.
|
// The first loop limit will be len(b)-32.
|
||||||
SUBQ $32, BX
|
SUBQ $32, BX
|
||||||
|
@ -65,14 +65,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||||
XORQ R11, R11
|
XORQ R11, R11
|
||||||
SUBQ R13, R11
|
SUBQ R13, R11
|
||||||
|
|
||||||
// Loop until CX > BX.
|
// Loop until SI > BX.
|
||||||
blockLoop:
|
blockLoop:
|
||||||
round(R8)
|
round(R8)
|
||||||
round(R9)
|
round(R9)
|
||||||
round(R10)
|
round(R10)
|
||||||
round(R11)
|
round(R11)
|
||||||
|
|
||||||
CMPQ CX, BX
|
CMPQ SI, BX
|
||||||
JLE blockLoop
|
JLE blockLoop
|
||||||
|
|
||||||
MOVQ R8, AX
|
MOVQ R8, AX
|
||||||
|
@ -100,16 +100,16 @@ noBlocks:
|
||||||
afterBlocks:
|
afterBlocks:
|
||||||
ADDQ DX, AX
|
ADDQ DX, AX
|
||||||
|
|
||||||
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
|
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||||||
ADDQ $24, BX
|
ADDQ $24, BX
|
||||||
|
|
||||||
CMPQ CX, BX
|
CMPQ SI, BX
|
||||||
JG fourByte
|
JG fourByte
|
||||||
|
|
||||||
wordLoop:
|
wordLoop:
|
||||||
// Calculate k1.
|
// Calculate k1.
|
||||||
MOVQ (CX), R8
|
MOVQ (SI), R8
|
||||||
ADDQ $8, CX
|
ADDQ $8, SI
|
||||||
IMULQ R14, R8
|
IMULQ R14, R8
|
||||||
ROLQ $31, R8
|
ROLQ $31, R8
|
||||||
IMULQ R13, R8
|
IMULQ R13, R8
|
||||||
|
@ -117,18 +117,18 @@ wordLoop:
|
||||||
XORQ R8, AX
|
XORQ R8, AX
|
||||||
ROLQ $27, AX
|
ROLQ $27, AX
|
||||||
IMULQ R13, AX
|
IMULQ R13, AX
|
||||||
ADDQ R15, AX
|
ADDQ DI, AX
|
||||||
|
|
||||||
CMPQ CX, BX
|
CMPQ SI, BX
|
||||||
JLE wordLoop
|
JLE wordLoop
|
||||||
|
|
||||||
fourByte:
|
fourByte:
|
||||||
ADDQ $4, BX
|
ADDQ $4, BX
|
||||||
CMPQ CX, BX
|
CMPQ SI, BX
|
||||||
JG singles
|
JG singles
|
||||||
|
|
||||||
MOVL (CX), R8
|
MOVL (SI), R8
|
||||||
ADDQ $4, CX
|
ADDQ $4, SI
|
||||||
IMULQ R13, R8
|
IMULQ R13, R8
|
||||||
XORQ R8, AX
|
XORQ R8, AX
|
||||||
|
|
||||||
|
@ -138,19 +138,19 @@ fourByte:
|
||||||
|
|
||||||
singles:
|
singles:
|
||||||
ADDQ $4, BX
|
ADDQ $4, BX
|
||||||
CMPQ CX, BX
|
CMPQ SI, BX
|
||||||
JGE finalize
|
JGE finalize
|
||||||
|
|
||||||
singlesLoop:
|
singlesLoop:
|
||||||
MOVBQZX (CX), R12
|
MOVBQZX (SI), R12
|
||||||
ADDQ $1, CX
|
ADDQ $1, SI
|
||||||
IMULQ ·prime5v(SB), R12
|
IMULQ ·prime5v(SB), R12
|
||||||
XORQ R12, AX
|
XORQ R12, AX
|
||||||
|
|
||||||
ROLQ $11, AX
|
ROLQ $11, AX
|
||||||
IMULQ R13, AX
|
IMULQ R13, AX
|
||||||
|
|
||||||
CMPQ CX, BX
|
CMPQ SI, BX
|
||||||
JL singlesLoop
|
JL singlesLoop
|
||||||
|
|
||||||
finalize:
|
finalize:
|
||||||
|
@ -179,9 +179,9 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||||
MOVQ ·prime2v(SB), R14
|
MOVQ ·prime2v(SB), R14
|
||||||
|
|
||||||
// Load slice.
|
// Load slice.
|
||||||
MOVQ b_base+8(FP), CX
|
MOVQ b_base+8(FP), SI
|
||||||
MOVQ b_len+16(FP), DX
|
MOVQ b_len+16(FP), DX
|
||||||
LEAQ (CX)(DX*1), BX
|
LEAQ (SI)(DX*1), BX
|
||||||
SUBQ $32, BX
|
SUBQ $32, BX
|
||||||
|
|
||||||
// Load vN from d.
|
// Load vN from d.
|
||||||
|
@ -199,7 +199,7 @@ blockLoop:
|
||||||
round(R10)
|
round(R10)
|
||||||
round(R11)
|
round(R11)
|
||||||
|
|
||||||
CMPQ CX, BX
|
CMPQ SI, BX
|
||||||
JLE blockLoop
|
JLE blockLoop
|
||||||
|
|
||||||
// Copy vN back to d.
|
// Copy vN back to d.
|
||||||
|
@ -208,8 +208,8 @@ blockLoop:
|
||||||
MOVQ R10, 16(AX)
|
MOVQ R10, 16(AX)
|
||||||
MOVQ R11, 24(AX)
|
MOVQ R11, 24(AX)
|
||||||
|
|
||||||
// The number of bytes written is CX minus the old base pointer.
|
// The number of bytes written is SI minus the old base pointer.
|
||||||
SUBQ b_base+8(FP), CX
|
SUBQ b_base+8(FP), SI
|
||||||
MOVQ CX, ret+32(FP)
|
MOVQ SI, ret+32(FP)
|
||||||
|
|
||||||
RET
|
RET
|
||||||
|
|
|
@ -6,41 +6,52 @@
|
||||||
package xxhash
|
package xxhash
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Notes:
|
|
||||||
//
|
|
||||||
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
|
|
||||||
// for some discussion about these unsafe conversions.
|
|
||||||
//
|
|
||||||
// In the future it's possible that compiler optimizations will make these
|
// In the future it's possible that compiler optimizations will make these
|
||||||
// unsafe operations unnecessary: https://golang.org/issue/2205.
|
// XxxString functions unnecessary by realizing that calls such as
|
||||||
|
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
||||||
|
// If that happens, even if we keep these functions they can be replaced with
|
||||||
|
// the trivial safe code.
|
||||||
|
|
||||||
|
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
|
||||||
//
|
//
|
||||||
// Both of these wrapper functions still incur function call overhead since they
|
// var b []byte
|
||||||
// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
|
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||||
// for strings to squeeze out a bit more speed. Mid-stack inlining should
|
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||||
// eventually fix this.
|
// bh.Len = len(s)
|
||||||
|
// bh.Cap = len(s)
|
||||||
|
//
|
||||||
|
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
|
||||||
|
// weight to this sequence of expressions that any function that uses it will
|
||||||
|
// not be inlined. Instead, the functions below use a different unsafe
|
||||||
|
// conversion designed to minimize the inliner weight and allow both to be
|
||||||
|
// inlined. There is also a test (TestInlining) which verifies that these are
|
||||||
|
// inlined.
|
||||||
|
//
|
||||||
|
// See https://github.com/golang/go/issues/42739 for discussion.
|
||||||
|
|
||||||
// Sum64String computes the 64-bit xxHash digest of s.
|
// Sum64String computes the 64-bit xxHash digest of s.
|
||||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||||
func Sum64String(s string) uint64 {
|
func Sum64String(s string) uint64 {
|
||||||
var b []byte
|
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
||||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
|
||||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
|
||||||
bh.Len = len(s)
|
|
||||||
bh.Cap = len(s)
|
|
||||||
return Sum64(b)
|
return Sum64(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteString adds more data to d. It always returns len(s), nil.
|
// WriteString adds more data to d. It always returns len(s), nil.
|
||||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||||
var b []byte
|
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
|
||||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
// d.Write always returns len(s), nil.
|
||||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
// Ignoring the return output and returning these fixed values buys a
|
||||||
bh.Len = len(s)
|
// savings of 6 in the inliner's cost model.
|
||||||
bh.Cap = len(s)
|
return len(s), nil
|
||||||
return d.Write(b)
|
}
|
||||||
|
|
||||||
|
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
|
||||||
|
// of the first two words is the same as the layout of a string.
|
||||||
|
type sliceHeader struct {
|
||||||
|
s string
|
||||||
|
cap int
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +0,0 @@
|
||||||
CoreOS Project
|
|
||||||
Copyright 2014 CoreOS, Inc
|
|
||||||
|
|
||||||
This product includes software developed at CoreOS, Inc.
|
|
||||||
(http://www.coreos.com/).
|
|
|
@ -1,95 +0,0 @@
|
||||||
syntax = "proto2";
|
|
||||||
package raftpb;
|
|
||||||
|
|
||||||
import "gogoproto/gogo.proto";
|
|
||||||
|
|
||||||
option (gogoproto.marshaler_all) = true;
|
|
||||||
option (gogoproto.sizer_all) = true;
|
|
||||||
option (gogoproto.unmarshaler_all) = true;
|
|
||||||
option (gogoproto.goproto_getters_all) = false;
|
|
||||||
option (gogoproto.goproto_enum_prefix_all) = false;
|
|
||||||
|
|
||||||
enum EntryType {
|
|
||||||
EntryNormal = 0;
|
|
||||||
EntryConfChange = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message Entry {
|
|
||||||
optional uint64 Term = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
|
|
||||||
optional uint64 Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
|
|
||||||
optional EntryType Type = 1 [(gogoproto.nullable) = false];
|
|
||||||
optional bytes Data = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
message SnapshotMetadata {
|
|
||||||
optional ConfState conf_state = 1 [(gogoproto.nullable) = false];
|
|
||||||
optional uint64 index = 2 [(gogoproto.nullable) = false];
|
|
||||||
optional uint64 term = 3 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message Snapshot {
|
|
||||||
optional bytes data = 1;
|
|
||||||
optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
enum MessageType {
|
|
||||||
MsgHup = 0;
|
|
||||||
MsgBeat = 1;
|
|
||||||
MsgProp = 2;
|
|
||||||
MsgApp = 3;
|
|
||||||
MsgAppResp = 4;
|
|
||||||
MsgVote = 5;
|
|
||||||
MsgVoteResp = 6;
|
|
||||||
MsgSnap = 7;
|
|
||||||
MsgHeartbeat = 8;
|
|
||||||
MsgHeartbeatResp = 9;
|
|
||||||
MsgUnreachable = 10;
|
|
||||||
MsgSnapStatus = 11;
|
|
||||||
MsgCheckQuorum = 12;
|
|
||||||
MsgTransferLeader = 13;
|
|
||||||
MsgTimeoutNow = 14;
|
|
||||||
MsgReadIndex = 15;
|
|
||||||
MsgReadIndexResp = 16;
|
|
||||||
MsgPreVote = 17;
|
|
||||||
MsgPreVoteResp = 18;
|
|
||||||
}
|
|
||||||
|
|
||||||
message Message {
|
|
||||||
optional MessageType type = 1 [(gogoproto.nullable) = false];
|
|
||||||
optional uint64 to = 2 [(gogoproto.nullable) = false];
|
|
||||||
optional uint64 from = 3 [(gogoproto.nullable) = false];
|
|
||||||
optional uint64 term = 4 [(gogoproto.nullable) = false];
|
|
||||||
optional uint64 logTerm = 5 [(gogoproto.nullable) = false];
|
|
||||||
optional uint64 index = 6 [(gogoproto.nullable) = false];
|
|
||||||
repeated Entry entries = 7 [(gogoproto.nullable) = false];
|
|
||||||
optional uint64 commit = 8 [(gogoproto.nullable) = false];
|
|
||||||
optional Snapshot snapshot = 9 [(gogoproto.nullable) = false];
|
|
||||||
optional bool reject = 10 [(gogoproto.nullable) = false];
|
|
||||||
optional uint64 rejectHint = 11 [(gogoproto.nullable) = false];
|
|
||||||
optional bytes context = 12;
|
|
||||||
}
|
|
||||||
|
|
||||||
message HardState {
|
|
||||||
optional uint64 term = 1 [(gogoproto.nullable) = false];
|
|
||||||
optional uint64 vote = 2 [(gogoproto.nullable) = false];
|
|
||||||
optional uint64 commit = 3 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message ConfState {
|
|
||||||
repeated uint64 nodes = 1;
|
|
||||||
repeated uint64 learners = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
enum ConfChangeType {
|
|
||||||
ConfChangeAddNode = 0;
|
|
||||||
ConfChangeRemoveNode = 1;
|
|
||||||
ConfChangeUpdateNode = 2;
|
|
||||||
ConfChangeAddLearnerNode = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ConfChange {
|
|
||||||
optional uint64 ID = 1 [(gogoproto.nullable) = false];
|
|
||||||
optional ConfChangeType Type = 2 [(gogoproto.nullable) = false];
|
|
||||||
optional uint64 NodeID = 3 [(gogoproto.nullable) = false];
|
|
||||||
optional bytes Context = 4;
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -246,6 +246,34 @@ service Control {
|
||||||
rpc RemoveResource(RemoveResourceRequest) returns (RemoveResourceResponse) {
|
rpc RemoveResource(RemoveResourceRequest) returns (RemoveResourceResponse) {
|
||||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// --- volumes APIs ---
|
||||||
|
|
||||||
|
// CreateVolume returns a `CreateVolumeResponse` with a `Volume` based on the
|
||||||
|
// provided `CreateVolumeRequest.VolumeSpec`.
|
||||||
|
// - Returns `InvalidArgument` if the `CreateVolumeRequest.VolumeSpec` is
|
||||||
|
// malformed.
|
||||||
|
rpc CreateVolume(CreateVolumeRequest) returns (CreateVolumeResponse) {
|
||||||
|
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetVolume returns a `GetVolumeResponse` with a Volume with the same ID
|
||||||
|
// as `GetVolumeRequest.ID`
|
||||||
|
rpc GetVolume(GetVolumeRequest) returns (GetVolumeResponse) {
|
||||||
|
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||||
|
}
|
||||||
|
|
||||||
|
rpc UpdateVolume(UpdateVolumeRequest) returns (UpdateVolumeResponse) {
|
||||||
|
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||||
|
}
|
||||||
|
|
||||||
|
rpc ListVolumes(ListVolumesRequest) returns (ListVolumesResponse) {
|
||||||
|
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||||
|
}
|
||||||
|
|
||||||
|
rpc RemoveVolume(RemoveVolumeRequest) returns (RemoveVolumeResponse) {
|
||||||
|
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetNodeRequest {
|
message GetNodeRequest {
|
||||||
|
@ -787,3 +815,57 @@ message ListResourcesRequest {
|
||||||
message ListResourcesResponse {
|
message ListResourcesResponse {
|
||||||
repeated Resource resources = 1;
|
repeated Resource resources = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message CreateVolumeRequest {
|
||||||
|
VolumeSpec spec = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CreateVolumeResponse {
|
||||||
|
Volume volume = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetVolumeRequest {
|
||||||
|
string volume_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetVolumeResponse {
|
||||||
|
Volume volume = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message UpdateVolumeRequest {
|
||||||
|
string volume_id = 1;
|
||||||
|
|
||||||
|
Version volume_version = 2;
|
||||||
|
|
||||||
|
VolumeSpec spec = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message UpdateVolumeResponse {
|
||||||
|
Volume volume = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListVolumesRequest {
|
||||||
|
message Filters {
|
||||||
|
repeated string names = 1;
|
||||||
|
repeated string id_prefixes = 2;
|
||||||
|
map<string, string> labels = 3;
|
||||||
|
repeated string name_prefixes = 4;
|
||||||
|
repeated string groups = 5;
|
||||||
|
repeated string drivers = 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
Filters filters = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListVolumesResponse {
|
||||||
|
repeated Volume volumes = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RemoveVolumeRequest {
|
||||||
|
string volume_id = 1;
|
||||||
|
// Force forces the volume to be deleted from swarmkit, regardless of
|
||||||
|
// whether its current state would permit such an action.
|
||||||
|
bool force = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RemoveVolumeResponse {}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -42,6 +42,13 @@ service Dispatcher { // maybe dispatch, al likes this
|
||||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
|
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// UpdateVolumeStatus updates the status of a Volume. Like
|
||||||
|
// UpdateTaskStatus, the node should send such updates on every status
|
||||||
|
// change of its volumes.
|
||||||
|
rpc UpdateVolumeStatus(UpdateVolumeStatusRequest) returns (UpdateVolumeStatusResponse) {
|
||||||
|
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
|
||||||
|
};
|
||||||
|
|
||||||
// Tasks is a stream of tasks state for node. Each message contains full list
|
// Tasks is a stream of tasks state for node. Each message contains full list
|
||||||
// of tasks which should be run on node, if task is not present in that list,
|
// of tasks which should be run on node, if task is not present in that list,
|
||||||
// it should be terminated.
|
// it should be terminated.
|
||||||
|
@ -155,10 +162,35 @@ message UpdateTaskStatusRequest {
|
||||||
repeated TaskStatusUpdate updates = 3;
|
repeated TaskStatusUpdate updates = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message UpdateTaskStatusResponse{
|
message UpdateTaskStatusResponse{
|
||||||
// void
|
// void
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message UpdateVolumeStatusRequest {
|
||||||
|
string session_id = 1;
|
||||||
|
|
||||||
|
message VolumeStatusUpdate {
|
||||||
|
// ID is the ID of the volume being updated. This is the Swarmkit ID,
|
||||||
|
// not the CSI VolumeID.
|
||||||
|
string id = 1;
|
||||||
|
// Unpublished is set to true when the volume is affirmatively
|
||||||
|
// unpublished on the Node side. We don't need to report that a Volume
|
||||||
|
// is published on the the node; as soon as the Volume is assigned to
|
||||||
|
// the Node, we must assume that it has been published until informed
|
||||||
|
// otherwise.
|
||||||
|
//
|
||||||
|
// Further, the Node must not send unpublished = true unless it will
|
||||||
|
// definitely no longer attempt to call NodePublishVolume.
|
||||||
|
bool unpublished = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
repeated VolumeStatusUpdate updates = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message UpdateVolumeStatusResponse {
|
||||||
|
// empty on purpose
|
||||||
|
}
|
||||||
|
|
||||||
message TasksRequest {
|
message TasksRequest {
|
||||||
string session_id = 1;
|
string session_id = 1;
|
||||||
}
|
}
|
||||||
|
@ -178,6 +210,7 @@ message Assignment {
|
||||||
Task task = 1;
|
Task task = 1;
|
||||||
Secret secret = 2;
|
Secret secret = 2;
|
||||||
Config config = 3;
|
Config config = 3;
|
||||||
|
VolumeAssignment volume = 4;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -269,6 +269,10 @@ message Task {
|
||||||
// JobIteration is the iteration number of the Job-mode Service that this
|
// JobIteration is the iteration number of the Job-mode Service that this
|
||||||
// task belongs to.
|
// task belongs to.
|
||||||
Version job_iteration = 16;
|
Version job_iteration = 16;
|
||||||
|
|
||||||
|
// Volumes is a list of VolumeAttachments for this task. It specifies which
|
||||||
|
// volumes this task is allocated.
|
||||||
|
repeated VolumeAttachment volumes = 17;
|
||||||
}
|
}
|
||||||
|
|
||||||
// NetworkAttachment specifies the network parameters of attachment to
|
// NetworkAttachment specifies the network parameters of attachment to
|
||||||
|
@ -510,3 +514,43 @@ message Extension {
|
||||||
// // Indices, with values expressed as Go templates.
|
// // Indices, with values expressed as Go templates.
|
||||||
//repeated IndexEntry index_templates = 6;
|
//repeated IndexEntry index_templates = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Volume is the top-level object describing a volume usable by Swarmkit. The
|
||||||
|
// Volume contains the user's VolumeSpec, the Volume's status, and the Volume
|
||||||
|
// object that was returned by the CSI Plugin when the volume was created.
|
||||||
|
message Volume {
|
||||||
|
option (docker.protobuf.plugin.store_object) = {
|
||||||
|
watch_selectors: {
|
||||||
|
id: true
|
||||||
|
id_prefix: true
|
||||||
|
name: true
|
||||||
|
name_prefix: true
|
||||||
|
custom: true
|
||||||
|
custom_prefix: true
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// ID is the swarmkit-internal ID for this volume object. This has no
|
||||||
|
// relation to the CSI volume identifier provided by the CSI Plugin.
|
||||||
|
string id = 1;
|
||||||
|
Meta meta = 2 [(gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// Spec is the desired state of the Volume, as provided by the user.
|
||||||
|
VolumeSpec spec = 3 [(gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// PublishStatus is the status of the volume as it pertains to the various
|
||||||
|
// nodes it is in use on.
|
||||||
|
repeated VolumePublishStatus publish_status = 4;
|
||||||
|
|
||||||
|
// VolumeInfo contains information about the volume originating from the
|
||||||
|
// CSI plugin when the volume is created.
|
||||||
|
VolumeInfo volume_info = 5;
|
||||||
|
|
||||||
|
// PendingDelete indicates that this Volume is being removed from Swarm.
|
||||||
|
// Before a Volume can be removed, we must call the DeleteVolume on the
|
||||||
|
// Controller. Because of this, we cannot immediately remove the Volume
|
||||||
|
// when a user wishes to delete it. Instead, we will mark a Volume with
|
||||||
|
// PendingDelete = true, which instructs Swarm to go through the work of
|
||||||
|
// removing the volume and then delete it when finished.
|
||||||
|
bool pending_delete = 6;
|
||||||
|
}
|
||||||
|
|
|
@ -6,10 +6,10 @@ package api
|
||||||
import (
|
import (
|
||||||
context "context"
|
context "context"
|
||||||
fmt "fmt"
|
fmt "fmt"
|
||||||
raftpb "github.com/coreos/etcd/raft/raftpb"
|
|
||||||
github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy"
|
github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy"
|
||||||
raftselector "github.com/docker/swarmkit/manager/raftselector"
|
raftselector "github.com/docker/swarmkit/manager/raftselector"
|
||||||
proto "github.com/gogo/protobuf/proto"
|
proto "github.com/gogo/protobuf/proto"
|
||||||
|
raftpb "go.etcd.io/etcd/raft/v3/raftpb"
|
||||||
grpc "google.golang.org/grpc"
|
grpc "google.golang.org/grpc"
|
||||||
codes "google.golang.org/grpc/codes"
|
codes "google.golang.org/grpc/codes"
|
||||||
metadata "google.golang.org/grpc/metadata"
|
metadata "google.golang.org/grpc/metadata"
|
||||||
|
@ -532,6 +532,7 @@ type StoreAction struct {
|
||||||
// *StoreAction_Resource
|
// *StoreAction_Resource
|
||||||
// *StoreAction_Extension
|
// *StoreAction_Extension
|
||||||
// *StoreAction_Config
|
// *StoreAction_Config
|
||||||
|
// *StoreAction_Volume
|
||||||
Target isStoreAction_Target `protobuf_oneof:"target"`
|
Target isStoreAction_Target `protobuf_oneof:"target"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -600,6 +601,9 @@ type StoreAction_Extension struct {
|
||||||
type StoreAction_Config struct {
|
type StoreAction_Config struct {
|
||||||
Config *Config `protobuf:"bytes,10,opt,name=config,proto3,oneof" json:"config,omitempty"`
|
Config *Config `protobuf:"bytes,10,opt,name=config,proto3,oneof" json:"config,omitempty"`
|
||||||
}
|
}
|
||||||
|
type StoreAction_Volume struct {
|
||||||
|
Volume *Volume `protobuf:"bytes,11,opt,name=volume,proto3,oneof" json:"volume,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
func (*StoreAction_Node) isStoreAction_Target() {}
|
func (*StoreAction_Node) isStoreAction_Target() {}
|
||||||
func (*StoreAction_Service) isStoreAction_Target() {}
|
func (*StoreAction_Service) isStoreAction_Target() {}
|
||||||
|
@ -610,6 +614,7 @@ func (*StoreAction_Secret) isStoreAction_Target() {}
|
||||||
func (*StoreAction_Resource) isStoreAction_Target() {}
|
func (*StoreAction_Resource) isStoreAction_Target() {}
|
||||||
func (*StoreAction_Extension) isStoreAction_Target() {}
|
func (*StoreAction_Extension) isStoreAction_Target() {}
|
||||||
func (*StoreAction_Config) isStoreAction_Target() {}
|
func (*StoreAction_Config) isStoreAction_Target() {}
|
||||||
|
func (*StoreAction_Volume) isStoreAction_Target() {}
|
||||||
|
|
||||||
func (m *StoreAction) GetTarget() isStoreAction_Target {
|
func (m *StoreAction) GetTarget() isStoreAction_Target {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -681,6 +686,13 @@ func (m *StoreAction) GetConfig() *Config {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *StoreAction) GetVolume() *Volume {
|
||||||
|
if x, ok := m.GetTarget().(*StoreAction_Volume); ok {
|
||||||
|
return x.Volume
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||||
func (*StoreAction) XXX_OneofWrappers() []interface{} {
|
func (*StoreAction) XXX_OneofWrappers() []interface{} {
|
||||||
return []interface{}{
|
return []interface{}{
|
||||||
|
@ -693,6 +705,7 @@ func (*StoreAction) XXX_OneofWrappers() []interface{} {
|
||||||
(*StoreAction_Resource)(nil),
|
(*StoreAction_Resource)(nil),
|
||||||
(*StoreAction_Extension)(nil),
|
(*StoreAction_Extension)(nil),
|
||||||
(*StoreAction_Config)(nil),
|
(*StoreAction_Config)(nil),
|
||||||
|
(*StoreAction_Volume)(nil),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -718,72 +731,73 @@ func init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptor_d2c32e1e3c930c15 = []byte{
|
var fileDescriptor_d2c32e1e3c930c15 = []byte{
|
||||||
// 1028 bytes of a gzipped FileDescriptorProto
|
// 1046 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0x41, 0x73, 0xdb, 0x44,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0x41, 0x73, 0xdb, 0x44,
|
||||||
0x14, 0xc7, 0x25, 0x5b, 0x75, 0x9a, 0x97, 0x36, 0x09, 0x5b, 0x12, 0x14, 0xb5, 0x28, 0xae, 0xda,
|
0x14, 0xc7, 0x25, 0x5b, 0x75, 0x92, 0xe7, 0x36, 0x09, 0x5b, 0x12, 0x14, 0xb5, 0x28, 0xae, 0xdb,
|
||||||
0x19, 0x9c, 0x90, 0xc8, 0x83, 0x61, 0xa6, 0x4c, 0xa1, 0x87, 0x38, 0xf1, 0x4c, 0x4c, 0x5a, 0xa7,
|
0x19, 0x9c, 0x90, 0xc8, 0x83, 0xcb, 0x4c, 0x99, 0x42, 0x0f, 0x71, 0xe2, 0x99, 0x98, 0xb4, 0x4e,
|
||||||
0xa3, 0x24, 0xd0, 0x5b, 0x90, 0xa5, 0x8d, 0x2b, 0x1c, 0x6b, 0xcd, 0xee, 0x3a, 0x81, 0x0b, 0xd3,
|
0x47, 0x49, 0x4a, 0x6f, 0x41, 0x96, 0x36, 0xae, 0xb0, 0xad, 0x35, 0xbb, 0x6b, 0x07, 0x2e, 0x4c,
|
||||||
0x23, 0xe4, 0xc4, 0x0d, 0x86, 0x99, 0x0e, 0x07, 0x38, 0xf7, 0x03, 0xf0, 0x01, 0x98, 0x0c, 0xa7,
|
0x8f, 0x90, 0x13, 0x37, 0x18, 0x66, 0x3a, 0x1c, 0xe0, 0xdc, 0x0f, 0xc0, 0x27, 0xc8, 0x70, 0xea,
|
||||||
0xde, 0xe8, 0x29, 0x43, 0x9d, 0x3b, 0x7c, 0x05, 0x66, 0x57, 0x52, 0x12, 0x6c, 0xd9, 0xf1, 0x81,
|
0x8d, 0x9e, 0x32, 0xd4, 0xb9, 0xc3, 0x17, 0xe0, 0xc0, 0xec, 0x4a, 0x4a, 0x42, 0x22, 0x3b, 0x3e,
|
||||||
0x4b, 0xb2, 0xa3, 0xfd, 0xfd, 0xdf, 0xff, 0xed, 0xee, 0xdb, 0xb7, 0x86, 0x85, 0x46, 0xc0, 0x9f,
|
0x70, 0xd2, 0x8e, 0xf7, 0xf7, 0x7f, 0xff, 0xb7, 0xbb, 0x6f, 0xdf, 0x1a, 0x16, 0x1a, 0x3e, 0x7f,
|
||||||
0x76, 0xea, 0xb6, 0x47, 0x5a, 0x45, 0x9f, 0x78, 0x4d, 0x4c, 0x8b, 0xec, 0xd0, 0xa5, 0xad, 0x66,
|
0xd6, 0xad, 0x5b, 0x2e, 0x69, 0x17, 0x3d, 0xe2, 0x36, 0x31, 0x2d, 0xb2, 0x7d, 0x87, 0xb6, 0x9b,
|
||||||
0xc0, 0x8b, 0x6e, 0x3b, 0x28, 0x52, 0x77, 0x8f, 0xdb, 0x6d, 0x4a, 0x38, 0x41, 0x28, 0x9a, 0xb7,
|
0x3e, 0x2f, 0x3a, 0x1d, 0xbf, 0x48, 0x9d, 0x3d, 0x6e, 0x75, 0x28, 0xe1, 0x04, 0xa1, 0x70, 0xde,
|
||||||
0x93, 0x79, 0xfb, 0xe0, 0x3d, 0x63, 0xe9, 0x12, 0x39, 0xa9, 0x7f, 0x81, 0x3d, 0xce, 0xa2, 0x08,
|
0x8a, 0xe7, 0xad, 0xde, 0x07, 0xc6, 0xd2, 0x25, 0x72, 0x52, 0xff, 0x02, 0xbb, 0x9c, 0x85, 0x11,
|
||||||
0xc6, 0xe2, 0x25, 0x34, 0xff, 0xba, 0x8d, 0x13, 0x76, 0xf9, 0x02, 0xeb, 0x11, 0x8a, 0x09, 0x2b,
|
0x8c, 0xc5, 0x4b, 0x68, 0xfe, 0x75, 0x07, 0xc7, 0xec, 0x42, 0x83, 0x58, 0x98, 0xbb, 0x9e, 0xe5,
|
||||||
0x62, 0xee, 0xf9, 0x32, 0x21, 0xf9, 0xa7, 0x5d, 0xbf, 0x90, 0x9c, 0xf1, 0x66, 0x83, 0x34, 0x88,
|
0x93, 0xa2, 0xf8, 0xca, 0x4c, 0x8a, 0xbd, 0xbb, 0xf2, 0xdb, 0xa9, 0x9f, 0x49, 0xcc, 0x78, 0xbb,
|
||||||
0x1c, 0x16, 0xc5, 0x28, 0xfe, 0x7a, 0x6f, 0x88, 0xa1, 0x24, 0xea, 0x9d, 0xbd, 0x62, 0x7b, 0xbf,
|
0x41, 0x1a, 0x44, 0x0e, 0x8b, 0x62, 0x14, 0xfd, 0x7a, 0x6f, 0x88, 0x99, 0x24, 0xea, 0xdd, 0xbd,
|
||||||
0xd3, 0x08, 0xc2, 0xf8, 0x5f, 0x24, 0xb4, 0x5e, 0xa8, 0x00, 0x8e, 0xbb, 0xc7, 0x1f, 0xe1, 0x56,
|
0x62, 0xa7, 0xd5, 0x6d, 0xf8, 0x41, 0xf4, 0x09, 0x85, 0xf9, 0x97, 0x2a, 0x80, 0xed, 0xec, 0xf1,
|
||||||
0x1d, 0x53, 0x74, 0x07, 0xc6, 0x84, 0xd7, 0x6e, 0xe0, 0xeb, 0x6a, 0x5e, 0x2d, 0x68, 0x65, 0xe8,
|
0x47, 0xb8, 0x5d, 0xc7, 0x14, 0xdd, 0x86, 0x31, 0xe1, 0xb5, 0xeb, 0x7b, 0xba, 0x9a, 0x53, 0x0b,
|
||||||
0x9e, 0xcc, 0xe7, 0x04, 0x50, 0x5d, 0x73, 0x72, 0x62, 0xaa, 0xea, 0x0b, 0x28, 0x24, 0x3e, 0x16,
|
0x5a, 0x19, 0xfa, 0x47, 0xf3, 0x19, 0x01, 0x54, 0xd7, 0xec, 0x8c, 0x98, 0xaa, 0x7a, 0x02, 0x0a,
|
||||||
0x50, 0x26, 0xaf, 0x16, 0xc6, 0x23, 0xa8, 0x46, 0x7c, 0x2c, 0x20, 0x31, 0x55, 0xf5, 0x11, 0x02,
|
0x88, 0x87, 0x05, 0x94, 0xca, 0xa9, 0x85, 0x89, 0x10, 0xaa, 0x11, 0x0f, 0x0b, 0x48, 0x4c, 0x55,
|
||||||
0xcd, 0xf5, 0x7d, 0xaa, 0x67, 0x05, 0xe1, 0xc8, 0x31, 0x2a, 0x43, 0x8e, 0x71, 0x97, 0x77, 0x98,
|
0x3d, 0x84, 0x40, 0x73, 0x3c, 0x8f, 0xea, 0x69, 0x41, 0xd8, 0x72, 0x8c, 0xca, 0x90, 0x61, 0xdc,
|
||||||
0xae, 0xe5, 0xd5, 0xc2, 0x44, 0xe9, 0xae, 0xdd, 0xbf, 0xd3, 0xf6, 0x79, 0x36, 0x5b, 0x92, 0x2d,
|
0xe1, 0x5d, 0xa6, 0x6b, 0x39, 0xb5, 0x90, 0x2d, 0xdd, 0xb1, 0x2e, 0xee, 0xb2, 0x75, 0x9a, 0xcd,
|
||||||
0x6b, 0xc7, 0x27, 0xf3, 0x8a, 0x13, 0x2b, 0xad, 0xdb, 0x30, 0xf1, 0x09, 0x09, 0x42, 0x07, 0x7f,
|
0x96, 0x64, 0xcb, 0xda, 0xe1, 0xd1, 0xbc, 0x62, 0x47, 0xca, 0xfc, 0x2d, 0xc8, 0x7e, 0x4a, 0xfc,
|
||||||
0xd9, 0xc1, 0x8c, 0x9f, 0xd9, 0xa8, 0xe7, 0x36, 0xd6, 0x4f, 0x2a, 0x5c, 0x8b, 0x18, 0xd6, 0x26,
|
0xc0, 0xc6, 0x5f, 0x76, 0x31, 0xe3, 0x27, 0x36, 0xea, 0xa9, 0x4d, 0xfe, 0x27, 0x15, 0xae, 0x86,
|
||||||
0x21, 0xc3, 0xa3, 0xad, 0xea, 0x43, 0x18, 0x6b, 0x49, 0x5b, 0xa6, 0x67, 0xf2, 0xd9, 0xc2, 0x44,
|
0x0c, 0xeb, 0x90, 0x80, 0xe1, 0xd1, 0x56, 0xf5, 0x11, 0x8c, 0xb5, 0xa5, 0x2d, 0xd3, 0x53, 0xb9,
|
||||||
0xc9, 0x1c, 0x9e, 0x9d, 0x93, 0xe0, 0xe8, 0x5d, 0x98, 0xa2, 0xb8, 0x45, 0x0e, 0xb0, 0xbf, 0x9b,
|
0x74, 0x21, 0x5b, 0x32, 0x87, 0x67, 0x67, 0xc7, 0x38, 0x7a, 0x1f, 0xa6, 0x28, 0x6e, 0x93, 0x1e,
|
||||||
0x44, 0xc8, 0xe6, 0xb3, 0x05, 0xad, 0x9c, 0x99, 0x56, 0x9c, 0xc9, 0x78, 0x2a, 0x12, 0x31, 0xab,
|
0xf6, 0x76, 0xe3, 0x08, 0xe9, 0x5c, 0xba, 0xa0, 0x95, 0x53, 0xd3, 0x8a, 0x3d, 0x19, 0x4d, 0x85,
|
||||||
0x0c, 0xd7, 0x1e, 0x62, 0xf7, 0x00, 0x27, 0x0b, 0x28, 0x81, 0x26, 0x76, 0x4c, 0x26, 0x76, 0xb9,
|
0x22, 0x96, 0x2f, 0xc3, 0xd5, 0x87, 0xd8, 0xe9, 0xe1, 0x78, 0x01, 0x25, 0xd0, 0xc4, 0x8e, 0xc9,
|
||||||
0xa7, 0x64, 0xad, 0x29, 0xb8, 0x1e, 0xc7, 0x88, 0x16, 0x68, 0x3d, 0x84, 0xb9, 0xc7, 0x94, 0x78,
|
0xc4, 0x2e, 0xf7, 0x94, 0x6c, 0x7e, 0x0a, 0xae, 0x45, 0x31, 0xc2, 0x05, 0xe6, 0x1f, 0xc2, 0xdc,
|
||||||
0x98, 0xb1, 0x88, 0x65, 0xcc, 0x6d, 0x9c, 0x39, 0x2c, 0x88, 0x85, 0xc9, 0x2f, 0xb1, 0xc9, 0x94,
|
0x63, 0x4a, 0x5c, 0xcc, 0x58, 0xc8, 0x32, 0xe6, 0x34, 0x4e, 0x1c, 0x16, 0xc4, 0xc2, 0xe4, 0x2f,
|
||||||
0x1d, 0x95, 0x95, 0x9d, 0x80, 0xc9, 0xfc, 0x7d, 0xed, 0xd9, 0x0f, 0x96, 0x62, 0xdd, 0x02, 0x23,
|
0x91, 0xc9, 0x94, 0x15, 0x96, 0x95, 0x15, 0x83, 0xf1, 0xfc, 0x7d, 0xed, 0xf9, 0x0f, 0x79, 0x25,
|
||||||
0x2d, 0x5a, 0xec, 0xb5, 0x01, 0xfa, 0x16, 0xa7, 0xd8, 0x6d, 0xfd, 0x1f, 0x56, 0x37, 0x61, 0x2e,
|
0x7f, 0x13, 0x8c, 0xa4, 0x68, 0x91, 0xd7, 0x06, 0xe8, 0x5b, 0x9c, 0x62, 0xa7, 0xfd, 0x7f, 0x58,
|
||||||
0x25, 0x58, 0xec, 0xf4, 0x31, 0xcc, 0x38, 0x98, 0x91, 0xfd, 0x03, 0xbc, 0xe2, 0xfb, 0x54, 0xa4,
|
0xdd, 0x80, 0xb9, 0x84, 0x60, 0x91, 0xd3, 0x27, 0x30, 0x63, 0x63, 0x46, 0x5a, 0x3d, 0xbc, 0xe2,
|
||||||
0x13, 0xdb, 0x8c, 0x72, 0x9e, 0xd6, 0x12, 0xcc, 0xf6, 0xaa, 0xe3, 0x72, 0x48, 0xab, 0x99, 0x7d,
|
0x79, 0x54, 0xa4, 0x13, 0xd9, 0x8c, 0x72, 0x9e, 0xf9, 0x25, 0x98, 0x3d, 0xaf, 0x8e, 0xca, 0x21,
|
||||||
0xb8, 0x51, 0x0d, 0x39, 0xa6, 0xa1, 0xbb, 0x2f, 0xe2, 0x24, 0x4e, 0xb3, 0x90, 0x39, 0x33, 0xc9,
|
0xa9, 0x66, 0x5a, 0x70, 0xbd, 0x1a, 0x70, 0x4c, 0x03, 0xa7, 0x25, 0xe2, 0xc4, 0x4e, 0xb3, 0x90,
|
||||||
0x75, 0x4f, 0xe6, 0x33, 0xd5, 0x35, 0x27, 0x13, 0xf8, 0xe8, 0x01, 0xe4, 0x5c, 0x8f, 0x07, 0x24,
|
0x3a, 0x31, 0xc9, 0xf4, 0x8f, 0xe6, 0x53, 0xd5, 0x35, 0x3b, 0xe5, 0x7b, 0xe8, 0x01, 0x64, 0x1c,
|
||||||
0x8c, 0x6b, 0x65, 0x3e, 0xed, 0xdc, 0xb6, 0x38, 0xa1, 0x78, 0x45, 0x62, 0x49, 0x11, 0x47, 0x22,
|
0x97, 0xfb, 0x24, 0x88, 0x6a, 0x65, 0x3e, 0xe9, 0xdc, 0xb6, 0x38, 0xa1, 0x78, 0x45, 0x62, 0x71,
|
||||||
0xeb, 0x77, 0x0d, 0x26, 0x2e, 0xcc, 0xa2, 0x8f, 0xce, 0xc2, 0x09, 0xab, 0xc9, 0xd2, 0x9d, 0x4b,
|
0x11, 0x87, 0xa2, 0xfc, 0x3f, 0x1a, 0x64, 0xcf, 0xcc, 0xa2, 0x8f, 0x4f, 0xc2, 0x09, 0xab, 0xc9,
|
||||||
0xc2, 0x6d, 0x04, 0xa1, 0x9f, 0x04, 0x43, 0x76, 0x5c, 0x41, 0x19, 0xb9, 0xe3, 0x7a, 0x9a, 0x54,
|
0xd2, 0xed, 0x4b, 0xc2, 0x6d, 0xf8, 0x81, 0x17, 0x07, 0x43, 0x56, 0x54, 0x41, 0x29, 0xb9, 0xe3,
|
||||||
0xdc, 0xcd, 0x75, 0x25, 0xaa, 0x1e, 0x74, 0x0f, 0xc6, 0x18, 0xa6, 0x07, 0x81, 0x87, 0xe5, 0xe5,
|
0x7a, 0x92, 0x54, 0xdc, 0xcd, 0x75, 0x25, 0xac, 0x1e, 0x74, 0x0f, 0xc6, 0x18, 0xa6, 0x3d, 0xdf,
|
||||||
0x9c, 0x28, 0xdd, 0x4c, 0x75, 0x8b, 0x90, 0x75, 0xc5, 0x49, 0x68, 0x61, 0xc4, 0x5d, 0xd6, 0x8c,
|
0xc5, 0xf2, 0x72, 0x66, 0x4b, 0x37, 0x12, 0xdd, 0x42, 0x64, 0x5d, 0xb1, 0x63, 0x5a, 0x18, 0x71,
|
||||||
0x2f, 0x6f, 0xaa, 0xd1, 0xb6, 0xcb, 0x9a, 0xc2, 0x48, 0x70, 0xc2, 0x28, 0xc4, 0xfc, 0x90, 0xd0,
|
0x87, 0x35, 0xa3, 0xcb, 0x9b, 0x68, 0xb4, 0xed, 0xb0, 0xa6, 0x30, 0x12, 0x9c, 0x30, 0x0a, 0x30,
|
||||||
0xa6, 0x7e, 0x65, 0xb0, 0x51, 0x2d, 0x42, 0x84, 0x51, 0x4c, 0x0b, 0xa1, 0xb7, 0xdf, 0x61, 0x1c,
|
0xdf, 0x27, 0xb4, 0xa9, 0x5f, 0x19, 0x6c, 0x54, 0x0b, 0x11, 0x61, 0x14, 0xd1, 0x42, 0xe8, 0xb6,
|
||||||
0x53, 0x3d, 0x37, 0x58, 0xb8, 0x1a, 0x21, 0x42, 0x18, 0xd3, 0xe8, 0x03, 0xc8, 0x31, 0xec, 0x51,
|
0xba, 0x8c, 0x63, 0xaa, 0x67, 0x06, 0x0b, 0x57, 0x43, 0x44, 0x08, 0x23, 0x1a, 0x7d, 0x08, 0x19,
|
||||||
0xcc, 0xf5, 0x31, 0xa9, 0x33, 0xd2, 0x57, 0x26, 0x88, 0x75, 0xd1, 0x52, 0xe4, 0x08, 0xdd, 0x87,
|
0x86, 0x5d, 0x8a, 0xb9, 0x3e, 0x26, 0x75, 0x46, 0xf2, 0xca, 0x04, 0xb1, 0x2e, 0x5a, 0x8a, 0x1c,
|
||||||
0xab, 0x14, 0x33, 0xd2, 0xa1, 0x1e, 0xd6, 0xaf, 0x4a, 0xdd, 0xad, 0xd4, 0x6b, 0x18, 0x33, 0xeb,
|
0xa1, 0xfb, 0x30, 0x4e, 0x31, 0x23, 0x5d, 0xea, 0x62, 0x7d, 0x5c, 0xea, 0x6e, 0x26, 0x5e, 0xc3,
|
||||||
0x8a, 0x73, 0xc6, 0xa3, 0x07, 0x30, 0x8e, 0xbf, 0xe2, 0x38, 0x64, 0xe2, 0xf0, 0xc6, 0xa5, 0xf8,
|
0x88, 0x59, 0x57, 0xec, 0x13, 0x1e, 0x3d, 0x80, 0x09, 0xfc, 0x15, 0xc7, 0x01, 0x13, 0x87, 0x37,
|
||||||
0xed, 0x34, 0x71, 0x25, 0x81, 0xd6, 0x15, 0xe7, 0x5c, 0x21, 0x12, 0xf6, 0x48, 0xb8, 0x17, 0x34,
|
0x21, 0xc5, 0xef, 0x26, 0x89, 0x2b, 0x31, 0xb4, 0xae, 0xd8, 0xa7, 0x0a, 0x91, 0xb0, 0x4b, 0x82,
|
||||||
0x74, 0x18, 0x9c, 0xf0, 0xaa, 0x24, 0x44, 0xc2, 0x11, 0x5b, 0xbe, 0x0a, 0x39, 0xee, 0xd2, 0x06,
|
0x3d, 0xbf, 0xa1, 0xc3, 0xe0, 0x84, 0x57, 0x25, 0x21, 0x12, 0x0e, 0x59, 0xa1, 0xea, 0x91, 0x56,
|
||||||
0xe6, 0x8b, 0xff, 0xa8, 0x30, 0xd5, 0x53, 0x17, 0xe8, 0x1d, 0x18, 0xdb, 0xa9, 0x6d, 0xd4, 0x36,
|
0xb7, 0x8d, 0xf5, 0xec, 0x60, 0xd5, 0x13, 0x49, 0x08, 0x55, 0xc8, 0x96, 0xc7, 0x21, 0xc3, 0x1d,
|
||||||
0x3f, 0xab, 0x4d, 0x2b, 0x86, 0x71, 0xf4, 0x3c, 0x3f, 0xdb, 0x43, 0xec, 0x84, 0xcd, 0x90, 0x1c,
|
0xda, 0xc0, 0x7c, 0xf1, 0x6f, 0x15, 0xa6, 0xce, 0x55, 0x13, 0x7a, 0x0f, 0xc6, 0x76, 0x6a, 0x1b,
|
||||||
0x86, 0xa8, 0x04, 0x37, 0xb6, 0xb6, 0x37, 0x9d, 0xca, 0xee, 0xca, 0xea, 0x76, 0x75, 0xb3, 0xb6,
|
0xb5, 0xcd, 0xcf, 0x6a, 0xd3, 0x8a, 0x61, 0x1c, 0xbc, 0xc8, 0xcd, 0x9e, 0x23, 0x76, 0x82, 0x66,
|
||||||
0xbb, 0xea, 0x54, 0x56, 0xb6, 0x2b, 0xd3, 0xaa, 0x31, 0x77, 0xf4, 0x3c, 0x3f, 0xd3, 0x23, 0x5a,
|
0x40, 0xf6, 0x03, 0x54, 0x82, 0xeb, 0x5b, 0xdb, 0x9b, 0x76, 0x65, 0x77, 0x65, 0x75, 0xbb, 0xba,
|
||||||
0xa5, 0xd8, 0xe5, 0xb8, 0x4f, 0xb3, 0xf3, 0x78, 0x4d, 0x68, 0x32, 0xa9, 0x9a, 0x9d, 0xb6, 0x9f,
|
0x59, 0xdb, 0x5d, 0xb5, 0x2b, 0x2b, 0xdb, 0x95, 0x69, 0xd5, 0x98, 0x3b, 0x78, 0x91, 0x9b, 0x39,
|
||||||
0xa6, 0x71, 0x2a, 0x8f, 0x36, 0x3f, 0xad, 0x4c, 0x67, 0x53, 0x35, 0x8e, 0x6c, 0x97, 0xc6, 0x5b,
|
0x27, 0x5a, 0xa5, 0xd8, 0xe1, 0xf8, 0x82, 0x66, 0xe7, 0xf1, 0x9a, 0xd0, 0xa4, 0x12, 0x35, 0x3b,
|
||||||
0xdf, 0xfe, 0x62, 0x2a, 0xbf, 0xfd, 0x6a, 0xf6, 0xae, 0xae, 0xf4, 0x73, 0x16, 0x34, 0x71, 0x43,
|
0x1d, 0x2f, 0x49, 0x63, 0x57, 0x1e, 0x6d, 0x3e, 0xa9, 0x4c, 0xa7, 0x13, 0x35, 0xb6, 0x6c, 0xb2,
|
||||||
0xd1, 0x91, 0x0a, 0xa8, 0xbf, 0x4d, 0xa1, 0xe5, 0xb4, 0x1d, 0x1c, 0xd8, 0x1c, 0x0d, 0x7b, 0x54,
|
0xc6, 0x3b, 0xdf, 0xfe, 0x62, 0x2a, 0xbf, 0xfd, 0x6a, 0x9e, 0x5f, 0x5d, 0xe9, 0xe7, 0x34, 0x68,
|
||||||
0x3c, 0xee, 0x49, 0x33, 0x7f, 0xbc, 0xf8, 0xfb, 0xc7, 0xcc, 0x14, 0x5c, 0x97, 0xfc, 0x72, 0xcb,
|
0xe2, 0x5e, 0xa3, 0x03, 0x15, 0xd0, 0xc5, 0xe6, 0x86, 0x96, 0x93, 0x76, 0x70, 0x60, 0x4b, 0x35,
|
||||||
0x0d, 0xdd, 0x06, 0xa6, 0xe8, 0x3b, 0x15, 0xde, 0xe8, 0x6b, 0x64, 0x68, 0x29, 0xfd, 0x1a, 0xa7,
|
0xac, 0x51, 0xf1, 0xa8, 0x93, 0xcd, 0xfc, 0xfe, 0xf2, 0xaf, 0x1f, 0x53, 0x53, 0x70, 0x4d, 0xf2,
|
||||||
0x37, 0x4f, 0x63, 0x79, 0x44, 0x7a, 0x68, 0x26, 0x05, 0x15, 0x7d, 0x03, 0x93, 0xff, 0x6d, 0x7c,
|
0xcb, 0x6d, 0x27, 0x70, 0x1a, 0x98, 0xa2, 0xef, 0x54, 0x78, 0xeb, 0x42, 0xfb, 0x43, 0x4b, 0xc9,
|
||||||
0x68, 0x61, 0x50, 0x39, 0xf7, 0xb5, 0x56, 0x63, 0x71, 0x14, 0x74, 0x68, 0x06, 0xa5, 0x3f, 0x55,
|
0x97, 0x3f, 0xb9, 0xe5, 0x1a, 0xcb, 0x23, 0xd2, 0x43, 0x33, 0x29, 0xa8, 0xe8, 0x1b, 0x98, 0xfc,
|
||||||
0x98, 0x3c, 0x7f, 0xb2, 0xd8, 0xd3, 0xa0, 0x8d, 0x3e, 0x07, 0x4d, 0x3c, 0xc8, 0x28, 0xb5, 0x4d,
|
0x6f, 0xbb, 0x44, 0x0b, 0x83, 0x2e, 0xc1, 0x85, 0x86, 0x6c, 0x2c, 0x8e, 0x82, 0x0e, 0xcd, 0xa0,
|
||||||
0x5e, 0x78, 0xce, 0x8d, 0xfc, 0x60, 0x60, 0xf8, 0x01, 0x78, 0x70, 0x45, 0x3e, 0x89, 0x28, 0x35,
|
0xf4, 0x87, 0x0a, 0x93, 0xa7, 0x0f, 0x1d, 0x7b, 0xe6, 0x77, 0xd0, 0xe7, 0xa0, 0x89, 0x67, 0x1c,
|
||||||
0xc2, 0xc5, 0x17, 0xd7, 0xb8, 0x3d, 0x84, 0x18, 0x6a, 0x52, 0xbe, 0x7b, 0xfc, 0xda, 0x54, 0x5e,
|
0x25, 0x36, 0xd7, 0x33, 0x7f, 0x02, 0x8c, 0xdc, 0x60, 0x60, 0xf8, 0x01, 0xb8, 0x70, 0x45, 0x3e,
|
||||||
0xbd, 0x36, 0x95, 0x67, 0x5d, 0x53, 0x3d, 0xee, 0x9a, 0xea, 0xcb, 0xae, 0xa9, 0xfe, 0xd5, 0x35,
|
0xa4, 0x28, 0x31, 0xc2, 0xd9, 0x77, 0xda, 0xb8, 0x35, 0x84, 0x18, 0x6a, 0x52, 0xbe, 0x73, 0xf8,
|
||||||
0xd5, 0xef, 0x4f, 0x4d, 0xe5, 0xe5, 0xa9, 0xa9, 0xbc, 0x3a, 0x35, 0x95, 0x27, 0xd9, 0x27, 0x5a,
|
0xc6, 0x54, 0x5e, 0xbf, 0x31, 0x95, 0xe7, 0x7d, 0x53, 0x3d, 0xec, 0x9b, 0xea, 0xab, 0xbe, 0xa9,
|
||||||
0x3d, 0x27, 0x7f, 0x5d, 0xbd, 0xff, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc9, 0x42, 0x09, 0xd0,
|
0xfe, 0xd9, 0x37, 0xd5, 0xef, 0x8f, 0x4d, 0xe5, 0xd5, 0xb1, 0xa9, 0xbc, 0x3e, 0x36, 0x95, 0xa7,
|
||||||
0x76, 0x0a, 0x00, 0x00,
|
0xe9, 0xa7, 0x5a, 0x3d, 0x23, 0xff, 0x93, 0xdd, 0xfd, 0x37, 0x00, 0x00, 0xff, 0xff, 0x4a, 0x56,
|
||||||
|
0x23, 0xf6, 0xa8, 0x0a, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
||||||
type authenticatedWrapperRaftServer struct {
|
type authenticatedWrapperRaftServer struct {
|
||||||
|
@ -1079,6 +1093,12 @@ func (m *StoreAction) CopyFrom(src interface{}) {
|
||||||
}
|
}
|
||||||
github_com_docker_swarmkit_api_deepcopy.Copy(v.Config, o.GetConfig())
|
github_com_docker_swarmkit_api_deepcopy.Copy(v.Config, o.GetConfig())
|
||||||
m.Target = &v
|
m.Target = &v
|
||||||
|
case *StoreAction_Volume:
|
||||||
|
v := StoreAction_Volume{
|
||||||
|
Volume: &Volume{},
|
||||||
|
}
|
||||||
|
github_com_docker_swarmkit_api_deepcopy.Copy(v.Volume, o.GetVolume())
|
||||||
|
m.Target = &v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2030,6 +2050,27 @@ func (m *StoreAction_Config) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
}
|
}
|
||||||
return len(dAtA) - i, nil
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
func (m *StoreAction_Volume) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *StoreAction_Volume) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
|
if m.Volume != nil {
|
||||||
|
{
|
||||||
|
size, err := m.Volume.MarshalToSizedBuffer(dAtA[:i])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i -= size
|
||||||
|
i = encodeVarintRaft(dAtA, i, uint64(size))
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x5a
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
|
}
|
||||||
func encodeVarintRaft(dAtA []byte, offset int, v uint64) int {
|
func encodeVarintRaft(dAtA []byte, offset int, v uint64) int {
|
||||||
offset -= sovRaft(v)
|
offset -= sovRaft(v)
|
||||||
base := offset
|
base := offset
|
||||||
|
@ -2673,6 +2714,18 @@ func (m *StoreAction_Config) Size() (n int) {
|
||||||
}
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
func (m *StoreAction_Volume) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.Volume != nil {
|
||||||
|
l = m.Volume.Size()
|
||||||
|
n += 1 + l + sovRaft(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
func sovRaft(x uint64) (n int) {
|
func sovRaft(x uint64) (n int) {
|
||||||
return (math_bits.Len64(x|1) + 6) / 7
|
return (math_bits.Len64(x|1) + 6) / 7
|
||||||
|
@ -2914,6 +2967,16 @@ func (this *StoreAction_Config) String() string {
|
||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
func (this *StoreAction_Volume) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&StoreAction_Volume{`,
|
||||||
|
`Volume:` + strings.Replace(fmt.Sprintf("%v", this.Volume), "Volume", "Volume", 1) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
func valueToStringRaft(v interface{}) string {
|
func valueToStringRaft(v interface{}) string {
|
||||||
rv := reflect.ValueOf(v)
|
rv := reflect.ValueOf(v)
|
||||||
if rv.IsNil() {
|
if rv.IsNil() {
|
||||||
|
@ -4374,6 +4437,41 @@ func (m *StoreAction) Unmarshal(dAtA []byte) error {
|
||||||
}
|
}
|
||||||
m.Target = &StoreAction_Config{v}
|
m.Target = &StoreAction_Config{v}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
|
case 11:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowRaft
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= int(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthRaft
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthRaft
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
v := &Volume{}
|
||||||
|
if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.Target = &StoreAction_Volume{v}
|
||||||
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipRaft(dAtA[iNdEx:])
|
skippy, err := skipRaft(dAtA[iNdEx:])
|
||||||
|
|
|
@ -4,7 +4,7 @@ package docker.swarmkit.v1;
|
||||||
|
|
||||||
import "github.com/docker/swarmkit/api/objects.proto";
|
import "github.com/docker/swarmkit/api/objects.proto";
|
||||||
import "github.com/docker/swarmkit/api/types.proto";
|
import "github.com/docker/swarmkit/api/types.proto";
|
||||||
import "github.com/coreos/etcd/raft/raftpb/raft.proto";
|
import "go.etcd.io/etcd/raft/v3/raftpb/raft.proto";
|
||||||
import weak "gogoproto/gogo.proto";
|
import weak "gogoproto/gogo.proto";
|
||||||
import weak "github.com/docker/swarmkit/protobuf/plugin/plugin.proto";
|
import weak "github.com/docker/swarmkit/protobuf/plugin/plugin.proto";
|
||||||
|
|
||||||
|
@ -146,5 +146,6 @@ message StoreAction {
|
||||||
Resource resource = 8;
|
Resource resource = 8;
|
||||||
Extension extension = 9;
|
Extension extension = 9;
|
||||||
Config config = 10;
|
Config config = 10;
|
||||||
|
Volume volume = 11;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,6 +59,7 @@ type StoreSnapshot struct {
|
||||||
Resources []*Resource `protobuf:"bytes,7,rep,name=resources,proto3" json:"resources,omitempty"`
|
Resources []*Resource `protobuf:"bytes,7,rep,name=resources,proto3" json:"resources,omitempty"`
|
||||||
Extensions []*Extension `protobuf:"bytes,8,rep,name=extensions,proto3" json:"extensions,omitempty"`
|
Extensions []*Extension `protobuf:"bytes,8,rep,name=extensions,proto3" json:"extensions,omitempty"`
|
||||||
Configs []*Config `protobuf:"bytes,9,rep,name=configs,proto3" json:"configs,omitempty"`
|
Configs []*Config `protobuf:"bytes,9,rep,name=configs,proto3" json:"configs,omitempty"`
|
||||||
|
Volumes []*Volume `protobuf:"bytes,10,rep,name=volumes,proto3" json:"volumes,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *StoreSnapshot) Reset() { *m = StoreSnapshot{} }
|
func (m *StoreSnapshot) Reset() { *m = StoreSnapshot{} }
|
||||||
|
@ -181,39 +182,40 @@ func init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptor_c5cad0b62cecd9af = []byte{
|
var fileDescriptor_c5cad0b62cecd9af = []byte{
|
||||||
// 507 bytes of a gzipped FileDescriptorProto
|
// 523 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xcf, 0x6f, 0xd3, 0x30,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0x41, 0x8f, 0x12, 0x31,
|
||||||
0x14, 0xc7, 0x93, 0xfe, 0x4a, 0xe7, 0x09, 0x18, 0x16, 0x07, 0xab, 0x0c, 0x53, 0xca, 0x0e, 0x45,
|
0x14, 0xc7, 0x67, 0x58, 0x60, 0xd8, 0x6e, 0xd4, 0xb5, 0xf1, 0xd0, 0xe0, 0x5a, 0x11, 0xf7, 0x80,
|
||||||
0x82, 0x14, 0x0a, 0x12, 0x08, 0x69, 0x1c, 0x3a, 0x71, 0xe0, 0xc0, 0x0e, 0x2e, 0x9a, 0xb8, 0xa6,
|
0x89, 0x0e, 0x8a, 0x26, 0x1a, 0x93, 0xf5, 0xc0, 0xc6, 0x83, 0x07, 0xf7, 0x50, 0x0c, 0xf1, 0x3a,
|
||||||
0xa9, 0xdb, 0x86, 0x90, 0xb8, 0xf2, 0x73, 0x3b, 0x8e, 0xf0, 0x1f, 0xf0, 0x67, 0xf5, 0xb8, 0xe3,
|
0x0c, 0x05, 0x46, 0x76, 0xa6, 0xa4, 0xaf, 0xb0, 0x1e, 0xfd, 0x08, 0x7e, 0x1b, 0xbf, 0x02, 0xc7,
|
||||||
0x4e, 0x88, 0xb5, 0x07, 0xfe, 0x0d, 0x64, 0x3b, 0x09, 0x95, 0x48, 0xb7, 0x5b, 0x64, 0x7d, 0x3e,
|
0x3d, 0xee, 0xc9, 0xb8, 0x70, 0xf0, 0x6b, 0x98, 0xb6, 0x53, 0x24, 0x71, 0x70, 0x6f, 0x93, 0xe6,
|
||||||
0xef, 0x7d, 0xed, 0xbc, 0x87, 0x9e, 0x4d, 0x23, 0x35, 0x5b, 0x8c, 0xfc, 0x50, 0x24, 0xbd, 0xb1,
|
0xf7, 0x7b, 0xef, 0xdf, 0xce, 0x7b, 0xe8, 0xd9, 0x38, 0x51, 0x93, 0xf9, 0x20, 0x8c, 0x45, 0xda,
|
||||||
0x08, 0x63, 0x2e, 0x7b, 0x70, 0x1e, 0xc8, 0x24, 0x8e, 0x54, 0x2f, 0x98, 0x47, 0x3d, 0x48, 0x83,
|
0x1e, 0x8a, 0x78, 0xca, 0x65, 0x1b, 0x2e, 0x22, 0x99, 0x4e, 0x13, 0xd5, 0x8e, 0x66, 0x49, 0x1b,
|
||||||
0x39, 0xcc, 0x84, 0xf2, 0xe7, 0x52, 0x28, 0x81, 0xb1, 0x65, 0xfc, 0x9c, 0xf1, 0x97, 0x2f, 0x5a,
|
0xb2, 0x68, 0x06, 0x13, 0xa1, 0xc2, 0x99, 0x14, 0x4a, 0x60, 0x6c, 0x99, 0xd0, 0x31, 0xe1, 0xe2,
|
||||||
0x4f, 0x6f, 0x28, 0x21, 0x46, 0x5f, 0x78, 0xa8, 0xc0, 0x56, 0x68, 0x3d, 0xb9, 0x81, 0x96, 0xc1,
|
0x45, 0xfd, 0xe9, 0x0d, 0x25, 0xc4, 0xe0, 0x0b, 0x8f, 0x15, 0xd8, 0x0a, 0xf5, 0x27, 0x37, 0xd0,
|
||||||
0x24, 0x6b, 0xd6, 0xba, 0x37, 0x15, 0x53, 0x61, 0x3e, 0x7b, 0xfa, 0xcb, 0x9e, 0x76, 0x7e, 0xd4,
|
0x32, 0x1a, 0xe5, 0xcd, 0xea, 0xf7, 0xc6, 0x62, 0x2c, 0xcc, 0x67, 0x5b, 0x7f, 0xd9, 0xd3, 0xe6,
|
||||||
0xd0, 0xad, 0xa1, 0x12, 0x92, 0x0f, 0xb3, 0x68, 0xd8, 0x47, 0xf5, 0x54, 0x8c, 0x39, 0x10, 0xb7,
|
0x8f, 0x32, 0xba, 0xd5, 0x53, 0x42, 0xf2, 0x5e, 0x1e, 0x0d, 0x87, 0xa8, 0x92, 0x89, 0x21, 0x07,
|
||||||
0x5d, 0xed, 0xee, 0xf7, 0x89, 0xff, 0x7f, 0x48, 0xff, 0x54, 0x8c, 0x39, 0xb3, 0x18, 0x7e, 0x8d,
|
0xe2, 0x37, 0xf6, 0x5a, 0x07, 0x1d, 0x12, 0xfe, 0x1b, 0x32, 0x3c, 0x13, 0x43, 0xce, 0x2c, 0x86,
|
||||||
0x9a, 0xc0, 0xe5, 0x32, 0x0a, 0x39, 0x90, 0x8a, 0x51, 0xee, 0x97, 0x29, 0x43, 0xcb, 0xb0, 0x02,
|
0x5f, 0xa3, 0x1a, 0x70, 0xb9, 0x48, 0x62, 0x0e, 0xa4, 0x64, 0x94, 0xfb, 0x45, 0x4a, 0xcf, 0x32,
|
||||||
0xd6, 0x62, 0xca, 0xd5, 0xb9, 0x90, 0x31, 0x90, 0xea, 0x6e, 0xf1, 0xd4, 0x32, 0xac, 0x80, 0x75,
|
0x6c, 0x03, 0x6b, 0x31, 0xe3, 0xea, 0x42, 0xc8, 0x29, 0x90, 0xbd, 0xdd, 0xe2, 0x99, 0x65, 0xd8,
|
||||||
0x42, 0x15, 0x40, 0x0c, 0xa4, 0xb6, 0x3b, 0xe1, 0xa7, 0x00, 0x62, 0x66, 0x31, 0xdd, 0x28, 0xfc,
|
0x06, 0xd6, 0x09, 0x55, 0x04, 0x53, 0x20, 0xe5, 0xdd, 0x09, 0x3f, 0x45, 0x30, 0x65, 0x16, 0xd3,
|
||||||
0xba, 0x00, 0xc5, 0x25, 0x90, 0xfa, 0xee, 0x46, 0x27, 0x96, 0x61, 0x05, 0x8c, 0x5f, 0x21, 0x0f,
|
0x8d, 0xe2, 0xf3, 0x39, 0x28, 0x2e, 0x81, 0x54, 0x76, 0x37, 0x3a, 0xb5, 0x0c, 0xdb, 0xc0, 0xf8,
|
||||||
0x78, 0x28, 0xb9, 0x02, 0xd2, 0x30, 0x5e, 0xab, 0xfc, 0x66, 0x1a, 0x61, 0x39, 0x8a, 0xdf, 0xa2,
|
0x15, 0x0a, 0x80, 0xc7, 0x92, 0x2b, 0x20, 0x55, 0xe3, 0xd5, 0x8b, 0x6f, 0xa6, 0x11, 0xe6, 0x50,
|
||||||
0x3d, 0xc9, 0x41, 0x2c, 0xa4, 0x7e, 0x11, 0xcf, 0x78, 0x87, 0x65, 0x1e, 0xcb, 0x20, 0xf6, 0x0f,
|
0xfc, 0x16, 0xed, 0x4b, 0x0e, 0x62, 0x2e, 0xf5, 0x8b, 0x04, 0xc6, 0x3b, 0x2a, 0xf2, 0x58, 0x0e,
|
||||||
0xc7, 0xc7, 0x08, 0xf1, 0x6f, 0x8a, 0xa7, 0x10, 0x89, 0x14, 0x48, 0xd3, 0xc8, 0x0f, 0xca, 0xe4,
|
0xb1, 0xbf, 0x38, 0x3e, 0x41, 0x88, 0x7f, 0x55, 0x3c, 0x83, 0x44, 0x64, 0x40, 0x6a, 0x46, 0x7e,
|
||||||
0xf7, 0x39, 0xc5, 0xb6, 0x04, 0x1d, 0x38, 0x14, 0xe9, 0x24, 0x9a, 0x02, 0xd9, 0xdb, 0x1d, 0xf8,
|
0x50, 0x24, 0xbf, 0x77, 0x14, 0xdb, 0x12, 0x74, 0xe0, 0x58, 0x64, 0xa3, 0x64, 0x0c, 0x64, 0x7f,
|
||||||
0xc4, 0x20, 0x2c, 0x47, 0x3b, 0x11, 0xba, 0x93, 0xdd, 0xbd, 0x18, 0x82, 0x37, 0xc8, 0x4b, 0x78,
|
0x77, 0xe0, 0x53, 0x83, 0x30, 0x87, 0x6a, 0x6b, 0x21, 0xce, 0xe7, 0x29, 0x07, 0x82, 0x76, 0x5b,
|
||||||
0x32, 0xd2, 0x2f, 0x66, 0xc7, 0x80, 0x96, 0xde, 0x20, 0x98, 0xa8, 0x8f, 0x06, 0x63, 0x39, 0x8e,
|
0x7d, 0x83, 0x30, 0x87, 0x36, 0x13, 0x74, 0x27, 0x7f, 0xb1, 0xcd, 0xe8, 0xbc, 0x41, 0x41, 0xca,
|
||||||
0x0f, 0x91, 0x27, 0x79, 0x22, 0x96, 0x7c, 0x6c, 0xa6, 0xa1, 0x36, 0xa8, 0x1c, 0x38, 0x2c, 0x3f,
|
0xd3, 0x81, 0x7e, 0x67, 0x3b, 0x3c, 0xb4, 0xf0, 0xde, 0xd1, 0x48, 0x7d, 0x34, 0x18, 0x73, 0x38,
|
||||||
0xea, 0xfc, 0x71, 0x51, 0xb3, 0x68, 0xf2, 0x0e, 0x79, 0x4b, 0x2e, 0x75, 0x72, 0xe2, 0xb6, 0xdd,
|
0x3e, 0x42, 0x81, 0xe4, 0xa9, 0x58, 0xf0, 0xa1, 0x99, 0xa1, 0x72, 0xb7, 0x74, 0xe8, 0x31, 0x77,
|
||||||
0xee, 0xed, 0xfe, 0x51, 0xe9, 0xf3, 0xe6, 0x3b, 0x73, 0x66, 0x59, 0x96, 0x4b, 0xf8, 0x03, 0x42,
|
0xd4, 0xfc, 0xed, 0xa3, 0xda, 0xa6, 0xc9, 0x3b, 0x14, 0x2c, 0xb8, 0xd4, 0xf7, 0x25, 0x7e, 0xc3,
|
||||||
0x59, 0xd7, 0x59, 0x34, 0x27, 0x95, 0xb6, 0xdb, 0xdd, 0xef, 0x3f, 0xbe, 0xe6, 0xcf, 0xe6, 0x95,
|
0x6f, 0xdd, 0xee, 0x1c, 0x17, 0xfe, 0x14, 0xb7, 0x69, 0x7d, 0xcb, 0x32, 0x27, 0xe1, 0x0f, 0x08,
|
||||||
0x06, 0xb5, 0xd5, 0xaf, 0x87, 0x0e, 0xdb, 0x92, 0xf1, 0x31, 0xaa, 0x83, 0xde, 0x02, 0x52, 0x35,
|
0xe5, 0x5d, 0x27, 0xc9, 0x8c, 0x94, 0x1a, 0x7e, 0xeb, 0xa0, 0xf3, 0xf8, 0x3f, 0xf3, 0xe0, 0x2a,
|
||||||
0x55, 0x1e, 0x95, 0x06, 0xd9, 0x5e, 0x93, 0xac, 0x86, 0xb5, 0x3a, 0x77, 0x91, 0x97, 0xa5, 0xc3,
|
0x75, 0xcb, 0xcb, 0x9f, 0x0f, 0x3d, 0xb6, 0x25, 0xe3, 0x13, 0x54, 0x01, 0xbd, 0x3b, 0x64, 0xcf,
|
||||||
0x0d, 0x54, 0x39, 0x7b, 0x7e, 0xe0, 0x0c, 0x8e, 0x56, 0x57, 0xd4, 0xb9, 0xbc, 0xa2, 0xce, 0xf7,
|
0x54, 0x79, 0x54, 0x18, 0x64, 0x7b, 0xb9, 0xf2, 0x1a, 0xd6, 0x6a, 0xde, 0x45, 0x41, 0x9e, 0x0e,
|
||||||
0x35, 0x75, 0x57, 0x6b, 0xea, 0x5e, 0xac, 0xa9, 0xfb, 0x7b, 0x4d, 0xdd, 0x9f, 0x1b, 0xea, 0x5c,
|
0x57, 0x51, 0xa9, 0xff, 0xfc, 0xd0, 0xeb, 0x1e, 0x2f, 0xaf, 0xa9, 0x77, 0x75, 0x4d, 0xbd, 0x6f,
|
||||||
0x6c, 0xa8, 0x73, 0xb9, 0xa1, 0xce, 0xe7, 0xca, 0xa8, 0x61, 0xf6, 0xf0, 0xe5, 0xdf, 0x00, 0x00,
|
0x2b, 0xea, 0x2f, 0x57, 0xd4, 0xbf, 0x5c, 0x51, 0xff, 0xd7, 0x8a, 0xfa, 0xdf, 0xd7, 0xd4, 0xbb,
|
||||||
0x00, 0xff, 0xff, 0x97, 0x4e, 0xfd, 0x2a, 0x3b, 0x04, 0x00, 0x00,
|
0x5c, 0x53, 0xef, 0x6a, 0x4d, 0xbd, 0xcf, 0xa5, 0x41, 0xd5, 0x6c, 0xef, 0xcb, 0x3f, 0x01, 0x00,
|
||||||
|
0x00, 0xff, 0xff, 0x27, 0xb3, 0xad, 0x75, 0x71, 0x04, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *StoreSnapshot) Copy() *StoreSnapshot {
|
func (m *StoreSnapshot) Copy() *StoreSnapshot {
|
||||||
|
@ -301,6 +303,14 @@ func (m *StoreSnapshot) CopyFrom(src interface{}) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if o.Volumes != nil {
|
||||||
|
m.Volumes = make([]*Volume, len(o.Volumes))
|
||||||
|
for i := range m.Volumes {
|
||||||
|
m.Volumes[i] = &Volume{}
|
||||||
|
github_com_docker_swarmkit_api_deepcopy.Copy(m.Volumes[i], o.Volumes[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ClusterSnapshot) Copy() *ClusterSnapshot {
|
func (m *ClusterSnapshot) Copy() *ClusterSnapshot {
|
||||||
|
@ -368,6 +378,20 @@ func (m *StoreSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
|
if len(m.Volumes) > 0 {
|
||||||
|
for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- {
|
||||||
|
{
|
||||||
|
size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i -= size
|
||||||
|
i = encodeVarintSnapshot(dAtA, i, uint64(size))
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x52
|
||||||
|
}
|
||||||
|
}
|
||||||
if len(m.Configs) > 0 {
|
if len(m.Configs) > 0 {
|
||||||
for iNdEx := len(m.Configs) - 1; iNdEx >= 0; iNdEx-- {
|
for iNdEx := len(m.Configs) - 1; iNdEx >= 0; iNdEx-- {
|
||||||
{
|
{
|
||||||
|
@ -660,6 +684,12 @@ func (m *StoreSnapshot) Size() (n int) {
|
||||||
n += 1 + l + sovSnapshot(uint64(l))
|
n += 1 + l + sovSnapshot(uint64(l))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(m.Volumes) > 0 {
|
||||||
|
for _, e := range m.Volumes {
|
||||||
|
l = e.Size()
|
||||||
|
n += 1 + l + sovSnapshot(uint64(l))
|
||||||
|
}
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -754,6 +784,11 @@ func (this *StoreSnapshot) String() string {
|
||||||
repeatedStringForConfigs += strings.Replace(fmt.Sprintf("%v", f), "Config", "Config", 1) + ","
|
repeatedStringForConfigs += strings.Replace(fmt.Sprintf("%v", f), "Config", "Config", 1) + ","
|
||||||
}
|
}
|
||||||
repeatedStringForConfigs += "}"
|
repeatedStringForConfigs += "}"
|
||||||
|
repeatedStringForVolumes := "[]*Volume{"
|
||||||
|
for _, f := range this.Volumes {
|
||||||
|
repeatedStringForVolumes += strings.Replace(fmt.Sprintf("%v", f), "Volume", "Volume", 1) + ","
|
||||||
|
}
|
||||||
|
repeatedStringForVolumes += "}"
|
||||||
s := strings.Join([]string{`&StoreSnapshot{`,
|
s := strings.Join([]string{`&StoreSnapshot{`,
|
||||||
`Nodes:` + repeatedStringForNodes + `,`,
|
`Nodes:` + repeatedStringForNodes + `,`,
|
||||||
`Services:` + repeatedStringForServices + `,`,
|
`Services:` + repeatedStringForServices + `,`,
|
||||||
|
@ -764,6 +799,7 @@ func (this *StoreSnapshot) String() string {
|
||||||
`Resources:` + repeatedStringForResources + `,`,
|
`Resources:` + repeatedStringForResources + `,`,
|
||||||
`Extensions:` + repeatedStringForExtensions + `,`,
|
`Extensions:` + repeatedStringForExtensions + `,`,
|
||||||
`Configs:` + repeatedStringForConfigs + `,`,
|
`Configs:` + repeatedStringForConfigs + `,`,
|
||||||
|
`Volumes:` + repeatedStringForVolumes + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
|
@ -1139,6 +1175,40 @@ func (m *StoreSnapshot) Unmarshal(dAtA []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
|
case 10:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= int(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthSnapshot
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthSnapshot
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Volumes = append(m.Volumes, &Volume{})
|
||||||
|
if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipSnapshot(dAtA[iNdEx:])
|
skippy, err := skipSnapshot(dAtA[iNdEx:])
|
||||||
|
|
|
@ -23,6 +23,7 @@ message StoreSnapshot {
|
||||||
repeated Resource resources = 7;
|
repeated Resource resources = 7;
|
||||||
repeated Extension extensions = 8;
|
repeated Extension extensions = 8;
|
||||||
repeated Config configs = 9;
|
repeated Config configs = 9;
|
||||||
|
repeated Volume volumes = 10;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClusterSnapshot stores cluster membership information in snapshots.
|
// ClusterSnapshot stores cluster membership information in snapshots.
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -506,3 +506,71 @@ message ConfigSpec {
|
||||||
// - golang: Go templating
|
// - golang: Go templating
|
||||||
Driver templating = 3;
|
Driver templating = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message VolumeSpec {
|
||||||
|
// Annotations includes the name and labels of a volume. The name used in the
|
||||||
|
// spec's Annotations will be passed to the Plugin as the "Name" in the
|
||||||
|
// CreateVolume request.
|
||||||
|
Annotations annotations = 1 [(gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// Group defines the volume group this particular volume belongs to. When
|
||||||
|
// requesting volumes for a workload, the group name can be used instead of
|
||||||
|
// the volume's name, which tells swarmkit to pick one from the many volumes
|
||||||
|
// belonging to that group.
|
||||||
|
string group = 2;
|
||||||
|
|
||||||
|
// Driver represents the CSI Plugin object and its configuration parameters.
|
||||||
|
// The "options" field of the Driver object is passed in the CSI
|
||||||
|
// CreateVolumeRequest as the "parameters" field. The Driver must be
|
||||||
|
// specified; there is no default CSI Plugin.
|
||||||
|
Driver driver = 3;
|
||||||
|
|
||||||
|
// AccessMode is similar to, and used to determine, the volume access mode as
|
||||||
|
// defined in the CSI spec, as well as the volume type (block vs mount). In
|
||||||
|
// this way, it is more similar to the VolumeCapability message in the CSI
|
||||||
|
// spec.
|
||||||
|
VolumeAccessMode access_mode = 4;
|
||||||
|
|
||||||
|
// Secrets represents a set of key/value pairs to pass to the CSI plugin. The
|
||||||
|
// keys of the secrets can be anything, but the values refer to swarmkit
|
||||||
|
// Secret objects. See the "Secrets Requirements" section of the CSI Plugin
|
||||||
|
// Spec for more information.
|
||||||
|
repeated VolumeSecret secrets = 5;
|
||||||
|
|
||||||
|
// AccessibilityRequirements specifies where a volume must be accessible
|
||||||
|
// from.
|
||||||
|
//
|
||||||
|
// This field must be empty if the plugin does not support
|
||||||
|
// VOLUME_ACCESSIBILITY_CONSTRAINTS capabilities. If it is present but the
|
||||||
|
// plugin does not support it, volume will not be created.
|
||||||
|
//
|
||||||
|
// If AccessibilityRequirements is empty, but the plugin does support
|
||||||
|
// VOLUME_ACCESSIBILITY_CONSTRAINTS, then Swarmkit will assume the entire
|
||||||
|
// cluster is a valid target for the volume.
|
||||||
|
TopologyRequirement AccessibilityRequirements = 6;
|
||||||
|
|
||||||
|
// CapacityRange is the capacity this volume should be created with. If nil,
|
||||||
|
// the plugin will decide the capacity.
|
||||||
|
CapacityRange capacity_range = 7;
|
||||||
|
|
||||||
|
enum VolumeAvailability {
|
||||||
|
option (gogoproto.goproto_enum_prefix) = false;
|
||||||
|
|
||||||
|
// Active allows a volume to be used and scheduled to. This is the
|
||||||
|
// default state.
|
||||||
|
ACTIVE = 0 [(gogoproto.enumvalue_customname) = "VolumeAvailabilityActive"];
|
||||||
|
|
||||||
|
// Pause prevents volumes from having new workloads scheduled to use
|
||||||
|
// them, even if they're already published on a Node.
|
||||||
|
PAUSE = 1 [(gogoproto.enumvalue_customname) = "VolumeAvailabilityPause"];
|
||||||
|
|
||||||
|
// Drain causes existing workloads using this volume to be rescheduled,
|
||||||
|
// causing the volume to be unpublished and removed from nodes.
|
||||||
|
DRAIN = 2 [(gogoproto.enumvalue_customname) = "VolumeAvailabilityDrain"];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Availability is the Volume's desired availability. Analogous to Node
|
||||||
|
// Availability, this allows the user to take volumes offline in order to
|
||||||
|
// update or delete them.
|
||||||
|
VolumeAvailability availability = 8;
|
||||||
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -62,6 +62,7 @@ enum ResourceType {
|
||||||
TASK = 0;
|
TASK = 0;
|
||||||
SECRET = 1;
|
SECRET = 1;
|
||||||
CONFIG = 2;
|
CONFIG = 2;
|
||||||
|
VOLUME = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message Resources {
|
message Resources {
|
||||||
|
@ -140,6 +141,8 @@ message NodeDescription {
|
||||||
|
|
||||||
// FIPS indicates whether the node has FIPS-enabled
|
// FIPS indicates whether the node has FIPS-enabled
|
||||||
bool fips = 6 [(gogoproto.customname) = "FIPS"];
|
bool fips = 6 [(gogoproto.customname) = "FIPS"];
|
||||||
|
|
||||||
|
repeated NodeCSIInfo csi_info = 7 [(gogoproto.customname) = "CSIInfo"];
|
||||||
}
|
}
|
||||||
|
|
||||||
message NodeTLSInfo {
|
message NodeTLSInfo {
|
||||||
|
@ -151,6 +154,27 @@ message NodeTLSInfo {
|
||||||
bytes cert_issuer_public_key = 3;
|
bytes cert_issuer_public_key = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NodeCSIInfo represents information about a Node returned by calling the
|
||||||
|
// NodeGetInfo RPC on the CSI plugin present on the node. There is a separate
|
||||||
|
// NodeCSIInfo object for each CSI plugin present.
|
||||||
|
message NodeCSIInfo {
|
||||||
|
|
||||||
|
// PluginName is the name of the CSI plugin.
|
||||||
|
string plugin_name = 1;
|
||||||
|
|
||||||
|
// NodeID is the ID of the node as reported by the CSI plugin. This will be
|
||||||
|
// different from the swarmkit node ID.
|
||||||
|
string node_id = 2;
|
||||||
|
|
||||||
|
// MaxVolumesPerNode is the maximum number of volumes that may be published
|
||||||
|
// to this node.
|
||||||
|
int64 max_volumes_per_node = 3;
|
||||||
|
|
||||||
|
// AccessibleTopology indicates the location of this node in the CSI plugin's
|
||||||
|
// topology
|
||||||
|
Topology accessible_topology = 4;
|
||||||
|
}
|
||||||
|
|
||||||
message RaftMemberStatus {
|
message RaftMemberStatus {
|
||||||
bool leader = 1;
|
bool leader = 1;
|
||||||
|
|
||||||
|
@ -215,6 +239,7 @@ message Mount {
|
||||||
VOLUME = 1 [(gogoproto.enumvalue_customname) = "MountTypeVolume"]; // Remote storage volumes
|
VOLUME = 1 [(gogoproto.enumvalue_customname) = "MountTypeVolume"]; // Remote storage volumes
|
||||||
TMPFS = 2 [(gogoproto.enumvalue_customname) = "MountTypeTmpfs"]; // Mount a tmpfs
|
TMPFS = 2 [(gogoproto.enumvalue_customname) = "MountTypeTmpfs"]; // Mount a tmpfs
|
||||||
NPIPE = 3 [(gogoproto.enumvalue_customname) = "MountTypeNamedPipe"]; // Windows named pipes
|
NPIPE = 3 [(gogoproto.enumvalue_customname) = "MountTypeNamedPipe"]; // Windows named pipes
|
||||||
|
CSI = 4 [(gogoproto.enumvalue_customname) = "MountTypeCSI"]; // CSI volume
|
||||||
}
|
}
|
||||||
|
|
||||||
// Type defines the nature of the mount.
|
// Type defines the nature of the mount.
|
||||||
|
@ -222,6 +247,10 @@ message Mount {
|
||||||
|
|
||||||
// Source specifies the name of the mount. Depending on mount type, this
|
// Source specifies the name of the mount. Depending on mount type, this
|
||||||
// may be a volume name or a host path, or even ignored.
|
// may be a volume name or a host path, or even ignored.
|
||||||
|
//
|
||||||
|
// For CSI type mounts, the source is either the name of the volume or the
|
||||||
|
// name of the volume group. To specify a volume group, the source should be
|
||||||
|
// prefixed with "group:", as in "group:groupname"
|
||||||
string source = 2;
|
string source = 2;
|
||||||
|
|
||||||
// Target path in container
|
// Target path in container
|
||||||
|
@ -1131,3 +1160,481 @@ message JobStatus {
|
||||||
// newly added nodes executing long-forgotten jobs.
|
// newly added nodes executing long-forgotten jobs.
|
||||||
google.protobuf.Timestamp last_execution = 2;
|
google.protobuf.Timestamp last_execution = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VolumeAccessMode is the access mode of the volume, and is used to determine
|
||||||
|
// the CSI AccessMode value, as well as the volume access type (block vs
|
||||||
|
// mount). In this way, it is more similar to the CSI VolumeCapability message.
|
||||||
|
//
|
||||||
|
// This defines how and where a volume can be accessed by more than
|
||||||
|
// one Task, but does not imply anything about the accessible topology of the
|
||||||
|
// volume.
|
||||||
|
//
|
||||||
|
// For analogy, a flash drive can be used on many computers, but only one of
|
||||||
|
// them at a time, and so would have a scope of "Single". But, it can be used
|
||||||
|
// by any number of programs simultaneously, so would have a sharing of "All".
|
||||||
|
message VolumeAccessMode {
|
||||||
|
// Scope enumerates the possible volume access scopes.
|
||||||
|
enum Scope {
|
||||||
|
option (gogoproto.goproto_enum_prefix) = false;
|
||||||
|
// VolumeScopeSingleNode indicates that only one node at a time may have
|
||||||
|
// access to the volume.
|
||||||
|
SINGLE_NODE = 0 [(gogoproto.enumvalue_customname) = "VolumeScopeSingleNode"];
|
||||||
|
// VolumeScopeMultiNode indicates that multiple nodes may access the volume
|
||||||
|
// at the same time.
|
||||||
|
MULTI_NODE = 1 [(gogoproto.enumvalue_customname) = "VolumeScopeMultiNode"];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sharing enumerates the possible volume sharing modes.
|
||||||
|
enum Sharing {
|
||||||
|
option (gogoproto.goproto_enum_prefix) = false;
|
||||||
|
// VolumeSharingNone indicates that the volume may only be used by a single
|
||||||
|
// Task at any given time.
|
||||||
|
NONE = 0 [(gogoproto.enumvalue_customname) = "VolumeSharingNone"];
|
||||||
|
// VolumeSharingReadOnly indicates that the volume may be accessed by
|
||||||
|
// multiple Tasks, but all Tasks only have have read access.
|
||||||
|
READ_ONLY = 1 [(gogoproto.enumvalue_customname) = "VolumeSharingReadOnly"];
|
||||||
|
// VolumeSharingOneWriter indicates that the Volume may be accessed by
|
||||||
|
// multiple Tasks, but only the one Task may have write permission for the
|
||||||
|
// Volume.
|
||||||
|
ONE_WRITER = 2 [(gogoproto.enumvalue_customname) = "VolumeSharingOneWriter"];
|
||||||
|
// VolumeSharingAll indicates that any number of Tasks may have read and
|
||||||
|
// write access to the volume.
|
||||||
|
ALL = 3 [(gogoproto.enumvalue_customname) = "VolumeSharingAll"];
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockVolume indicates the volume will be accessed with the block device
|
||||||
|
// API.
|
||||||
|
message BlockVolume {
|
||||||
|
// intentionally empty
|
||||||
|
}
|
||||||
|
|
||||||
|
// MountVolume indicates the volume will be access with the filesystem API.
|
||||||
|
message MountVolume {
|
||||||
|
// FsType is the filesystem type. This field is optional, and an empty
|
||||||
|
// string is equal to an unspecified value.
|
||||||
|
string fs_type = 1;
|
||||||
|
|
||||||
|
// MountFlags indicates mount options to be used for the volume. This
|
||||||
|
// field is optional, and may contain sensitive data.
|
||||||
|
repeated string mount_flags = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scope defines on how many nodes this volume can be accessed
|
||||||
|
// simultaneously. If unset, will default to the zero-value of SINGLE_NODE.
|
||||||
|
Scope scope = 1;
|
||||||
|
|
||||||
|
// Sharing defines how many tasks can use this volume at the same time, and
|
||||||
|
// in what way. If unset, will default to the zero-value of NONE.
|
||||||
|
Sharing sharing = 2;
|
||||||
|
|
||||||
|
// AccessType defines the access type of the volume. Unlike Sharing and
|
||||||
|
// Scope, Swarmkit itself doesn't define either of these as a default, but
|
||||||
|
// but the upstream is free to do so. However, one of these MUST be set.
|
||||||
|
oneof access_type {
|
||||||
|
BlockVolume block = 3;
|
||||||
|
MountVolume mount = 4;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// VolumeSecret indicates a secret value that must be passed to CSI plugin
|
||||||
|
// operations.
|
||||||
|
message VolumeSecret {
|
||||||
|
// Key represents the key that will be passed as a controller secret to the
|
||||||
|
// CSI plugin.
|
||||||
|
string key = 1;
|
||||||
|
|
||||||
|
// Secret represents the swarmkit Secret object from which to read data to
|
||||||
|
// use as the value to pass to the CSI plugin. This can be either a secret
|
||||||
|
// name or ID.
|
||||||
|
//
|
||||||
|
// TODO(dperny): should this be a SecretReference instead?
|
||||||
|
string secret = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// VolumePublishStatus contains information about the volume's publishing to a
|
||||||
|
// specific node.
|
||||||
|
//
|
||||||
|
// Publishing or unpublishing a volume to a node is a two-step process.
|
||||||
|
//
|
||||||
|
// When a Volume is needed on a Node, a VolumePublishStatus with state
|
||||||
|
// PendingPublish is added. This indicates that the volume should be published,
|
||||||
|
// but the RPCs have not been executed.
|
||||||
|
//
|
||||||
|
// Then, afterward, ControllerPublishVolume is called for the Volume, and the
|
||||||
|
// State is changed to Published, indicating that the call was a success.
|
||||||
|
//
|
||||||
|
// When a Volume is no longer needed, the process is similar, with the State
|
||||||
|
// being changed to PendingUnpublish. When ControllerUnpublishVolume succeeds,
|
||||||
|
// the PublishStatus for that Node is simply removed.
|
||||||
|
//
|
||||||
|
// Without this two-step process, the following could happen:
|
||||||
|
//
|
||||||
|
// 1. ControllerPublishVolume is called and the Volume is successfully
|
||||||
|
// published.
|
||||||
|
// 2. A crash or leadership change disrupts the cluster before
|
||||||
|
// the Volume with the updated VolumePublishStatus can be added to the
|
||||||
|
// store.
|
||||||
|
// 3. The Task that required the Volume to be published is deleted.
|
||||||
|
//
|
||||||
|
// In this case, the Volume would be published to the Node, but Swarm would be
|
||||||
|
// unaware of this, and would additionally be unaware that the Volume _should_
|
||||||
|
// be published to the Node.
|
||||||
|
//
|
||||||
|
// By first committing our intention to publish a Volume, we guarantee that the
|
||||||
|
// Volume itself is sufficient to know which Nodes it may have been published
|
||||||
|
// to.
|
||||||
|
message VolumePublishStatus {
|
||||||
|
// State is the state of the volume in the publish/unpublish
|
||||||
|
// lifecycle, on a particular node.
|
||||||
|
enum State {
|
||||||
|
// PendingPublish indicates that the volume should be published on this
|
||||||
|
// node, but the call to ControllerPublishVolume has not been
|
||||||
|
// successfully completed yet and the result recorded by swarmkit.
|
||||||
|
PENDING_PUBLISH = 0;
|
||||||
|
|
||||||
|
// Published means the volume is published successfully to the node.
|
||||||
|
PUBLISHED = 1;
|
||||||
|
|
||||||
|
// PendingNodeUnpublish indicates that the Volume should be unpublished
|
||||||
|
// on the Node, and we're waiting for confirmation that it has done so.
|
||||||
|
// After the Node has confirmed that the Volume has been unpublished,
|
||||||
|
// the state will move to PendingUnpublish.
|
||||||
|
PENDING_NODE_UNPUBLISH = 2;
|
||||||
|
|
||||||
|
// PendingUnpublish means the volume is published to the node, and
|
||||||
|
// needs to not be, but the call to ControllerUnpublishVolume has not
|
||||||
|
// verifiably succeeded yet. There is no Unpublished state, because
|
||||||
|
// after the volume has been verifiably unpublished, the
|
||||||
|
// VolumePublishStatus for the node is removed.
|
||||||
|
PENDING_UNPUBLISH = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeID is the swarm (not CSI plugin) node ID that this volume is
|
||||||
|
// published to.
|
||||||
|
string node_id = 1;
|
||||||
|
|
||||||
|
// State is the publish state of the volume.
|
||||||
|
State state = 2;
|
||||||
|
|
||||||
|
// PublishContext is the same PublishContext returned by a call to
|
||||||
|
// ControllerPublishVolume.
|
||||||
|
map<string, string> publish_context = 3;
|
||||||
|
|
||||||
|
// Message is a human-readable message explaining the state of the volume.
|
||||||
|
// It exists to convey the current situation with the volume to the user,
|
||||||
|
// allowing, for example, the user to see error messages why a volume might
|
||||||
|
// not be published yet.
|
||||||
|
string message = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
// VolumeInfo contains information about the volume originating from the CSI
|
||||||
|
// plugin.
|
||||||
|
message VolumeInfo {
|
||||||
|
// CapacityBytes is the capacity of this volume in bytes. A value of 0
|
||||||
|
// indicates that the capcity is unknown.
|
||||||
|
int64 capacity_bytes = 1;
|
||||||
|
|
||||||
|
// VolumeContext includes fields that are opaque to Swarmkit.
|
||||||
|
map<string, string> volume_context = 2;
|
||||||
|
|
||||||
|
// VolumeID is the ID of the volume as reported by the CSI plugin.
|
||||||
|
// Information about the volume is not cached in swarmkit's object store;
|
||||||
|
// instead, it is retrieved on-demand as needed. If the VolumeID field is an
|
||||||
|
// empty string, and the plugin advertises CREATE_DELETE_VOLUME capability,
|
||||||
|
// then Swarmkit has not yet called CreateVolume.
|
||||||
|
string volume_id = 3;
|
||||||
|
|
||||||
|
// AccessibleTopology is the topology this volume is actually accessible
|
||||||
|
// from.
|
||||||
|
repeated Topology accessible_topology = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
// CapacityRange describes the minimum and maximum capacity a volume should be
|
||||||
|
// created with.
|
||||||
|
message CapacityRange {
|
||||||
|
// RequiredBytes specifies that a volume must be at least this big. The value
|
||||||
|
// of 0 indicates an unspecified minimum. Must not be negative.
|
||||||
|
int64 required_bytes = 1;
|
||||||
|
|
||||||
|
// LimitBytes specifies that a volume must not be bigger than this. The value
|
||||||
|
// of 0 indicates an unspecified maximum. Must not be negative.
|
||||||
|
int64 limit_bytes = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// VolumeAssignment contains the information needed by a Node to use a CSI
|
||||||
|
// volume. This includes the information need to Stage and Publish the volume
|
||||||
|
// on the node, but never the full Volume object.
|
||||||
|
message VolumeAssignment {
|
||||||
|
// ID is the swarmkit ID for the volume. This is used by swarmkit components
|
||||||
|
// to identify the volume.
|
||||||
|
string id = 1;
|
||||||
|
|
||||||
|
// VolumeID is the CSI volume ID as returned from CreateVolume. This is used
|
||||||
|
// by the CSI driver to identify the volume.
|
||||||
|
string volume_id = 2;
|
||||||
|
|
||||||
|
// Driver is the CSI Driver that this volume is managed by.
|
||||||
|
Driver driver = 3;
|
||||||
|
|
||||||
|
// VolumeContext is a map returned from the CSI Controller service when a
|
||||||
|
// Volume is created. It is optional for the driver to provide, but if it is
|
||||||
|
// provided, it must be passed to subsequent calls.
|
||||||
|
map<string,string> volume_context = 4;
|
||||||
|
|
||||||
|
// PublishContext is a map returned from the Controller service when
|
||||||
|
// ControllerPublishVolume is called. Again, it is optional, but if provided,
|
||||||
|
// must be passed.
|
||||||
|
map<string,string> publish_context = 5;
|
||||||
|
|
||||||
|
// AccessMode specifies the access mode of the volume.
|
||||||
|
VolumeAccessMode access_mode = 6;
|
||||||
|
|
||||||
|
// Secrets is the set of secrets required by the CSI plugin. These refer to
|
||||||
|
// swarmkit Secrets that will be distributed separately to the node.
|
||||||
|
repeated VolumeSecret secrets = 7;
|
||||||
|
}
|
||||||
|
|
||||||
|
// VolumeAttachment is the information associating a Volume with a Task.
|
||||||
|
message VolumeAttachment {
|
||||||
|
// ID is the swarmkit ID of the volume assigned to this task, not the CSI
|
||||||
|
// volume ID.
|
||||||
|
string id = 1;
|
||||||
|
|
||||||
|
// Source indicates the Mount source that this volume is assigned for.
|
||||||
|
string source = 2;
|
||||||
|
|
||||||
|
// Target indicates the Mount target that this volume is assigned for.
|
||||||
|
string target = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// These types are copied from the CSI spec. They are copied because there is
|
||||||
|
// difficulty in compatibility between the CSI protos and the swarmkit protos,
|
||||||
|
// and straight importing them is difficult.
|
||||||
|
|
||||||
|
// TopologyRequirement expresses the user's requirements for a volume's
|
||||||
|
// accessible topology.
|
||||||
|
message TopologyRequirement {
|
||||||
|
// Specifies the list of topologies the provisioned volume MUST be
|
||||||
|
// accessible from.
|
||||||
|
// This field is OPTIONAL. If TopologyRequirement is specified either
|
||||||
|
// requisite or preferred or both MUST be specified.
|
||||||
|
//
|
||||||
|
// If requisite is specified, the provisioned volume MUST be
|
||||||
|
// accessible from at least one of the requisite topologies.
|
||||||
|
//
|
||||||
|
// Given
|
||||||
|
// x = number of topologies provisioned volume is accessible from
|
||||||
|
// n = number of requisite topologies
|
||||||
|
// The CO MUST ensure n >= 1. The SP MUST ensure x >= 1
|
||||||
|
// If x==n, then the SP MUST make the provisioned volume available to
|
||||||
|
// all topologies from the list of requisite topologies. If it is
|
||||||
|
// unable to do so, the SP MUST fail the CreateVolume call.
|
||||||
|
// For example, if a volume should be accessible from a single zone,
|
||||||
|
// and requisite =
|
||||||
|
// {"region": "R1", "zone": "Z2"}
|
||||||
|
// then the provisioned volume MUST be accessible from the "region"
|
||||||
|
// "R1" and the "zone" "Z2".
|
||||||
|
// Similarly, if a volume should be accessible from two zones, and
|
||||||
|
// requisite =
|
||||||
|
// {"region": "R1", "zone": "Z2"},
|
||||||
|
// {"region": "R1", "zone": "Z3"}
|
||||||
|
// then the provisioned volume MUST be accessible from the "region"
|
||||||
|
// "R1" and both "zone" "Z2" and "zone" "Z3".
|
||||||
|
//
|
||||||
|
// If x<n, then the SP SHALL choose x unique topologies from the list
|
||||||
|
// of requisite topologies. If it is unable to do so, the SP MUST fail
|
||||||
|
// the CreateVolume call.
|
||||||
|
// For example, if a volume should be accessible from a single zone,
|
||||||
|
// and requisite =
|
||||||
|
// {"region": "R1", "zone": "Z2"},
|
||||||
|
// {"region": "R1", "zone": "Z3"}
|
||||||
|
// then the SP may choose to make the provisioned volume available in
|
||||||
|
// either the "zone" "Z2" or the "zone" "Z3" in the "region" "R1".
|
||||||
|
// Similarly, if a volume should be accessible from two zones, and
|
||||||
|
// requisite =
|
||||||
|
// {"region": "R1", "zone": "Z2"},
|
||||||
|
// {"region": "R1", "zone": "Z3"},
|
||||||
|
// {"region": "R1", "zone": "Z4"}
|
||||||
|
// then the provisioned volume MUST be accessible from any combination
|
||||||
|
// of two unique topologies: e.g. "R1/Z2" and "R1/Z3", or "R1/Z2" and
|
||||||
|
// "R1/Z4", or "R1/Z3" and "R1/Z4".
|
||||||
|
//
|
||||||
|
// If x>n, then the SP MUST make the provisioned volume available from
|
||||||
|
// all topologies from the list of requisite topologies and MAY choose
|
||||||
|
// the remaining x-n unique topologies from the list of all possible
|
||||||
|
// topologies. If it is unable to do so, the SP MUST fail the
|
||||||
|
// CreateVolume call.
|
||||||
|
// For example, if a volume should be accessible from two zones, and
|
||||||
|
// requisite =
|
||||||
|
// {"region": "R1", "zone": "Z2"}
|
||||||
|
// then the provisioned volume MUST be accessible from the "region"
|
||||||
|
// "R1" and the "zone" "Z2" and the SP may select the second zone
|
||||||
|
// independently, e.g. "R1/Z4".
|
||||||
|
repeated Topology requisite = 1;
|
||||||
|
|
||||||
|
// Specifies the list of topologies the CO would prefer the volume to
|
||||||
|
// be provisioned in.
|
||||||
|
//
|
||||||
|
// This field is OPTIONAL. If TopologyRequirement is specified either
|
||||||
|
// requisite or preferred or both MUST be specified.
|
||||||
|
//
|
||||||
|
// An SP MUST attempt to make the provisioned volume available using
|
||||||
|
// the preferred topologies in order from first to last.
|
||||||
|
//
|
||||||
|
// If requisite is specified, all topologies in preferred list MUST
|
||||||
|
// also be present in the list of requisite topologies.
|
||||||
|
//
|
||||||
|
// If the SP is unable to to make the provisioned volume available
|
||||||
|
// from any of the preferred topologies, the SP MAY choose a topology
|
||||||
|
// from the list of requisite topologies.
|
||||||
|
// If the list of requisite topologies is not specified, then the SP
|
||||||
|
// MAY choose from the list of all possible topologies.
|
||||||
|
// If the list of requisite topologies is specified and the SP is
|
||||||
|
// unable to to make the provisioned volume available from any of the
|
||||||
|
// requisite topologies it MUST fail the CreateVolume call.
|
||||||
|
//
|
||||||
|
// Example 1:
|
||||||
|
// Given a volume should be accessible from a single zone, and
|
||||||
|
// requisite =
|
||||||
|
// {"region": "R1", "zone": "Z2"},
|
||||||
|
// {"region": "R1", "zone": "Z3"}
|
||||||
|
// preferred =
|
||||||
|
// {"region": "R1", "zone": "Z3"}
|
||||||
|
// then the the SP SHOULD first attempt to make the provisioned volume
|
||||||
|
// available from "zone" "Z3" in the "region" "R1" and fall back to
|
||||||
|
// "zone" "Z2" in the "region" "R1" if that is not possible.
|
||||||
|
//
|
||||||
|
// Example 2:
|
||||||
|
// Given a volume should be accessible from a single zone, and
|
||||||
|
// requisite =
|
||||||
|
// {"region": "R1", "zone": "Z2"},
|
||||||
|
// {"region": "R1", "zone": "Z3"},
|
||||||
|
// {"region": "R1", "zone": "Z4"},
|
||||||
|
// {"region": "R1", "zone": "Z5"}
|
||||||
|
// preferred =
|
||||||
|
// {"region": "R1", "zone": "Z4"},
|
||||||
|
// {"region": "R1", "zone": "Z2"}
|
||||||
|
// then the the SP SHOULD first attempt to make the provisioned volume
|
||||||
|
// accessible from "zone" "Z4" in the "region" "R1" and fall back to
|
||||||
|
// "zone" "Z2" in the "region" "R1" if that is not possible. If that
|
||||||
|
// is not possible, the SP may choose between either the "zone"
|
||||||
|
// "Z3" or "Z5" in the "region" "R1".
|
||||||
|
//
|
||||||
|
// Example 3:
|
||||||
|
// Given a volume should be accessible from TWO zones (because an
|
||||||
|
// opaque parameter in CreateVolumeRequest, for example, specifies
|
||||||
|
// the volume is accessible from two zones, aka synchronously
|
||||||
|
// replicated), and
|
||||||
|
// requisite =
|
||||||
|
// {"region": "R1", "zone": "Z2"},
|
||||||
|
// {"region": "R1", "zone": "Z3"},
|
||||||
|
// {"region": "R1", "zone": "Z4"},
|
||||||
|
// {"region": "R1", "zone": "Z5"}
|
||||||
|
// preferred =
|
||||||
|
// {"region": "R1", "zone": "Z5"},
|
||||||
|
// {"region": "R1", "zone": "Z3"}
|
||||||
|
// then the the SP SHOULD first attempt to make the provisioned volume
|
||||||
|
// accessible from the combination of the two "zones" "Z5" and "Z3" in
|
||||||
|
// the "region" "R1". If that's not possible, it should fall back to
|
||||||
|
// a combination of "Z5" and other possibilities from the list of
|
||||||
|
// requisite. If that's not possible, it should fall back to a
|
||||||
|
// combination of "Z3" and other possibilities from the list of
|
||||||
|
// requisite. If that's not possible, it should fall back to a
|
||||||
|
// combination of other possibilities from the list of requisite.
|
||||||
|
repeated Topology preferred = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Topology is a map of topological domains to topological segments.
|
||||||
|
// A topological domain is a sub-division of a cluster, like "region",
|
||||||
|
// "zone", "rack", etc.
|
||||||
|
// A topological segment is a specific instance of a topological domain,
|
||||||
|
// like "zone3", "rack3", etc.
|
||||||
|
// For example {"com.company/zone": "Z1", "com.company/rack": "R3"}
|
||||||
|
// Valid keys have two segments: an OPTIONAL prefix and name, separated
|
||||||
|
// by a slash (/), for example: "com.company.example/zone".
|
||||||
|
// The key name segment is REQUIRED. The prefix is OPTIONAL.
|
||||||
|
// The key name MUST be 63 characters or less, begin and end with an
|
||||||
|
// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-),
|
||||||
|
// underscores (_), dots (.), or alphanumerics in between, for example
|
||||||
|
// "zone".
|
||||||
|
// The key prefix MUST be 63 characters or less, begin and end with a
|
||||||
|
// lower-case alphanumeric character ([a-z0-9]), contain only
|
||||||
|
// dashes (-), dots (.), or lower-case alphanumerics in between, and
|
||||||
|
// follow domain name notation format
|
||||||
|
// (https://tools.ietf.org/html/rfc1035#section-2.3.1).
|
||||||
|
// The key prefix SHOULD include the plugin's host company name and/or
|
||||||
|
// the plugin name, to minimize the possibility of collisions with keys
|
||||||
|
// from other plugins.
|
||||||
|
// If a key prefix is specified, it MUST be identical across all
|
||||||
|
// topology keys returned by the SP (across all RPCs).
|
||||||
|
// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone"
|
||||||
|
// MUST not both exist.
|
||||||
|
// Each value (topological segment) MUST contain 1 or more strings.
|
||||||
|
// Each string MUST be 63 characters or less and begin and end with an
|
||||||
|
// alphanumeric character with '-', '_', '.', or alphanumerics in
|
||||||
|
// between.
|
||||||
|
message Topology {
|
||||||
|
map<string, string> segments = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// VolumeCapability specifies a capability of a volume.
|
||||||
|
message VolumeCapability {
|
||||||
|
// Indicate that the volume will be accessed via the block device API.
|
||||||
|
message BlockVolume {
|
||||||
|
// Intentionally empty, for now.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Indicate that the volume will be accessed via the filesystem API.
|
||||||
|
message MountVolume {
|
||||||
|
// The filesystem type. This field is OPTIONAL.
|
||||||
|
// An empty string is equal to an unspecified field value.
|
||||||
|
string fs_type = 1;
|
||||||
|
|
||||||
|
// The mount options that can be used for the volume. This field is
|
||||||
|
// OPTIONAL. `mount_flags` MAY contain sensitive information.
|
||||||
|
// Therefore, the CO and the Plugin MUST NOT leak this information
|
||||||
|
// to untrusted entities. The total size of this repeated field
|
||||||
|
// SHALL NOT exceed 4 KiB.
|
||||||
|
repeated string mount_flags = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Specify how a volume can be accessed.
|
||||||
|
message AccessMode {
|
||||||
|
enum Mode {
|
||||||
|
UNKNOWN = 0;
|
||||||
|
|
||||||
|
// Can only be published once as read/write on a single node, at
|
||||||
|
// any given time.
|
||||||
|
SINGLE_NODE_WRITER = 1;
|
||||||
|
|
||||||
|
// Can only be published once as readonly on a single node, at
|
||||||
|
// any given time.
|
||||||
|
SINGLE_NODE_READER_ONLY = 2;
|
||||||
|
|
||||||
|
// Can be published as readonly at multiple nodes simultaneously.
|
||||||
|
MULTI_NODE_READER_ONLY = 3;
|
||||||
|
|
||||||
|
// Can be published at multiple nodes simultaneously. Only one of
|
||||||
|
// the node can be used as read/write. The rest will be readonly.
|
||||||
|
MULTI_NODE_SINGLE_WRITER = 4;
|
||||||
|
|
||||||
|
// Can be published as read/write at multiple nodes
|
||||||
|
// simultaneously.
|
||||||
|
MULTI_NODE_MULTI_WRITER = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
// This field is REQUIRED.
|
||||||
|
Mode mode = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Specifies what API the volume will be accessed using. One of the
|
||||||
|
// following fields MUST be specified.
|
||||||
|
oneof access_type {
|
||||||
|
BlockVolume block = 1;
|
||||||
|
MountVolume mount = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is a REQUIRED field.
|
||||||
|
AccessMode access_mode = 3;
|
||||||
|
}
|
||||||
|
|
|
@ -80,6 +80,7 @@ type Object struct {
|
||||||
// *Object_Resource
|
// *Object_Resource
|
||||||
// *Object_Extension
|
// *Object_Extension
|
||||||
// *Object_Config
|
// *Object_Config
|
||||||
|
// *Object_Volume
|
||||||
Object isObject_Object `protobuf_oneof:"Object"`
|
Object isObject_Object `protobuf_oneof:"Object"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -148,6 +149,9 @@ type Object_Extension struct {
|
||||||
type Object_Config struct {
|
type Object_Config struct {
|
||||||
Config *Config `protobuf:"bytes,9,opt,name=config,proto3,oneof" json:"config,omitempty"`
|
Config *Config `protobuf:"bytes,9,opt,name=config,proto3,oneof" json:"config,omitempty"`
|
||||||
}
|
}
|
||||||
|
type Object_Volume struct {
|
||||||
|
Volume *Volume `protobuf:"bytes,10,opt,name=volume,proto3,oneof" json:"volume,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
func (*Object_Node) isObject_Object() {}
|
func (*Object_Node) isObject_Object() {}
|
||||||
func (*Object_Service) isObject_Object() {}
|
func (*Object_Service) isObject_Object() {}
|
||||||
|
@ -158,6 +162,7 @@ func (*Object_Secret) isObject_Object() {}
|
||||||
func (*Object_Resource) isObject_Object() {}
|
func (*Object_Resource) isObject_Object() {}
|
||||||
func (*Object_Extension) isObject_Object() {}
|
func (*Object_Extension) isObject_Object() {}
|
||||||
func (*Object_Config) isObject_Object() {}
|
func (*Object_Config) isObject_Object() {}
|
||||||
|
func (*Object_Volume) isObject_Object() {}
|
||||||
|
|
||||||
func (m *Object) GetObject() isObject_Object {
|
func (m *Object) GetObject() isObject_Object {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -229,6 +234,13 @@ func (m *Object) GetConfig() *Config {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Object) GetVolume() *Volume {
|
||||||
|
if x, ok := m.GetObject().(*Object_Volume); ok {
|
||||||
|
return x.Volume
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||||
func (*Object) XXX_OneofWrappers() []interface{} {
|
func (*Object) XXX_OneofWrappers() []interface{} {
|
||||||
return []interface{}{
|
return []interface{}{
|
||||||
|
@ -241,6 +253,7 @@ func (*Object) XXX_OneofWrappers() []interface{} {
|
||||||
(*Object_Resource)(nil),
|
(*Object_Resource)(nil),
|
||||||
(*Object_Extension)(nil),
|
(*Object_Extension)(nil),
|
||||||
(*Object_Config)(nil),
|
(*Object_Config)(nil),
|
||||||
|
(*Object_Volume)(nil),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -789,82 +802,83 @@ func init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptor_da25266013800cd9 = []byte{
|
var fileDescriptor_da25266013800cd9 = []byte{
|
||||||
// 1199 bytes of a gzipped FileDescriptorProto
|
// 1210 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0xbd, 0x73, 0x1b, 0xc5,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0xcd, 0x73, 0xdb, 0xc4,
|
||||||
0x1b, 0xc7, 0xef, 0x14, 0xf9, 0x24, 0x3d, 0xb6, 0x13, 0xcf, 0xc6, 0x49, 0xee, 0xa7, 0x5f, 0x90,
|
0x1b, 0xc7, 0x25, 0xd7, 0x51, 0xec, 0x27, 0x49, 0x9b, 0xd9, 0xa6, 0xad, 0x7e, 0xfe, 0x15, 0xc7,
|
||||||
0x85, 0x78, 0xcb, 0x24, 0x41, 0x06, 0x13, 0x92, 0x01, 0x02, 0x33, 0x96, 0x2c, 0x46, 0x22, 0xe3,
|
0x98, 0xb7, 0x4e, 0x5b, 0x1c, 0x08, 0xa5, 0x1d, 0xa0, 0x30, 0x13, 0x3b, 0x66, 0x6c, 0x3a, 0x79,
|
||||||
0x97, 0x59, 0xdb, 0x49, 0xa9, 0x39, 0xdf, 0x3d, 0x56, 0x0e, 0xdf, 0xdd, 0x8a, 0xbd, 0x93, 0x1d,
|
0x99, 0x4d, 0xd2, 0x1e, 0x3d, 0x8a, 0xf4, 0xc4, 0x15, 0x91, 0xb4, 0x66, 0x25, 0x3b, 0xcd, 0x8d,
|
||||||
0x77, 0x14, 0x14, 0x4c, 0x2a, 0x1a, 0x66, 0x68, 0x52, 0x41, 0x4d, 0x43, 0x07, 0xff, 0x40, 0x86,
|
0x23, 0xd3, 0x13, 0x17, 0x66, 0xb8, 0xf4, 0x04, 0x67, 0x2e, 0xdc, 0xca, 0x3f, 0xd0, 0xe1, 0xd4,
|
||||||
0x2a, 0x65, 0x68, 0x3c, 0x44, 0xe9, 0x28, 0xf8, 0x0b, 0x28, 0x98, 0x7d, 0x39, 0xdb, 0x51, 0x4e,
|
0x63, 0xb9, 0x64, 0xa8, 0x7b, 0xe3, 0xc0, 0x5f, 0xc0, 0x81, 0xd9, 0x17, 0x25, 0xa9, 0x2b, 0x27,
|
||||||
0x36, 0xa9, 0xb4, 0xb7, 0xf7, 0xf9, 0x3e, 0xfb, 0xec, 0xf3, 0x76, 0x82, 0xab, 0x3d, 0x3f, 0xb9,
|
0xf4, 0xe4, 0xd5, 0xea, 0xf3, 0x7d, 0xf6, 0xd9, 0xe7, 0x4d, 0x86, 0xab, 0x5d, 0x3f, 0xb9, 0xdf,
|
||||||
0x3f, 0xd8, 0xaa, 0xbb, 0x2c, 0x9c, 0xf7, 0x98, 0xbb, 0x83, 0x7c, 0x3e, 0xde, 0x73, 0x78, 0xb8,
|
0xdf, 0xae, 0xb9, 0x2c, 0x5c, 0xf0, 0x98, 0xbb, 0x8b, 0x7c, 0x21, 0xde, 0x73, 0x78, 0xb8, 0xeb,
|
||||||
0xe3, 0x27, 0xf3, 0x4e, 0xdf, 0x9f, 0xdf, 0x73, 0x12, 0xf7, 0x7e, 0xbd, 0xcf, 0x59, 0xc2, 0x08,
|
0x27, 0x0b, 0x4e, 0xcf, 0x5f, 0xd8, 0x73, 0x12, 0xf7, 0x7e, 0xad, 0xc7, 0x59, 0xc2, 0x08, 0x51,
|
||||||
0x51, 0x40, 0x3d, 0x05, 0xea, 0xbb, 0xef, 0x97, 0x4f, 0xd3, 0xc7, 0x7d, 0x74, 0x63, 0xa5, 0x2f,
|
0x40, 0x2d, 0x05, 0x6a, 0x83, 0x0f, 0x4b, 0xa7, 0xe9, 0xe3, 0x1e, 0xba, 0xb1, 0xd2, 0x97, 0xae,
|
||||||
0x5f, 0x3f, 0x85, 0x65, 0x5b, 0x5f, 0xa2, 0x9b, 0xa4, 0xf4, 0x69, 0x96, 0x93, 0xfd, 0x3e, 0xa6,
|
0x9f, 0xc2, 0xb2, 0xed, 0xaf, 0xd1, 0x4d, 0x52, 0xfa, 0x34, 0xcb, 0xc9, 0x7e, 0x0f, 0x53, 0x76,
|
||||||
0xec, 0x6c, 0x8f, 0xf5, 0x98, 0x5c, 0xce, 0x8b, 0x95, 0xde, 0xbd, 0x75, 0x82, 0x05, 0x49, 0x6c,
|
0xae, 0xcb, 0xba, 0x4c, 0x2e, 0x17, 0xc4, 0x4a, 0xef, 0xde, 0x3a, 0xc1, 0x82, 0x24, 0xb6, 0xfb,
|
||||||
0x0d, 0xb6, 0xe7, 0xfb, 0xc1, 0xa0, 0xe7, 0x47, 0xfa, 0x47, 0x09, 0x6b, 0xdf, 0xe4, 0xc1, 0x5a,
|
0x3b, 0x0b, 0xbd, 0xa0, 0xdf, 0xf5, 0x23, 0xfd, 0xa3, 0x84, 0xd5, 0xc7, 0x79, 0xb0, 0xd6, 0xa4,
|
||||||
0x95, 0xce, 0x90, 0x3a, 0xe4, 0x23, 0xe6, 0xa1, 0x6d, 0x56, 0xcd, 0x2b, 0x93, 0x0b, 0x76, 0xfd,
|
0x33, 0xa4, 0x06, 0xf9, 0x88, 0x79, 0x68, 0x9b, 0x15, 0xf3, 0xca, 0xd4, 0xa2, 0x5d, 0x7b, 0x35,
|
||||||
0xe5, 0x10, 0xd4, 0x57, 0x98, 0x87, 0x6d, 0x83, 0x4a, 0x8e, 0xdc, 0x82, 0x42, 0x8c, 0x7c, 0xd7,
|
0x04, 0xb5, 0x55, 0xe6, 0x61, 0xcb, 0xa0, 0x92, 0x23, 0xb7, 0x60, 0x32, 0x46, 0x3e, 0xf0, 0x5d,
|
||||||
0x77, 0xd1, 0xce, 0x49, 0xc9, 0xff, 0xb3, 0x24, 0xeb, 0x0a, 0x69, 0x1b, 0x34, 0xa5, 0x85, 0x30,
|
0xb4, 0x73, 0x52, 0xf2, 0xff, 0x2c, 0xc9, 0x86, 0x42, 0x5a, 0x06, 0x4d, 0x69, 0x21, 0x8c, 0x30,
|
||||||
0xc2, 0x64, 0x8f, 0xf1, 0x1d, 0xfb, 0xcc, 0x78, 0xe1, 0x8a, 0x42, 0x84, 0x50, 0xd3, 0xc2, 0xc3,
|
0xd9, 0x63, 0x7c, 0xd7, 0x3e, 0x33, 0x5e, 0xb8, 0xaa, 0x10, 0x21, 0xd4, 0xb4, 0xf0, 0x30, 0x71,
|
||||||
0xc4, 0x89, 0x77, 0xec, 0xfc, 0x78, 0x0f, 0x37, 0x9c, 0x58, 0x48, 0x24, 0x27, 0x0e, 0x72, 0x83,
|
0xe2, 0x5d, 0x3b, 0x3f, 0xde, 0xc3, 0x4d, 0x27, 0x16, 0x12, 0xc9, 0x89, 0x83, 0xdc, 0xa0, 0x1f,
|
||||||
0x41, 0x9c, 0x20, 0xb7, 0x27, 0xc6, 0x1f, 0xd4, 0x54, 0x88, 0x38, 0x48, 0xd3, 0xe4, 0x06, 0x58,
|
0x27, 0xc8, 0xed, 0x89, 0xf1, 0x07, 0x35, 0x14, 0x22, 0x0e, 0xd2, 0x34, 0xb9, 0x01, 0x56, 0x8c,
|
||||||
0x31, 0xba, 0x1c, 0x13, 0xdb, 0x92, 0xba, 0x72, 0xf6, 0xcd, 0x04, 0xd1, 0x36, 0xa8, 0x66, 0xc9,
|
0x2e, 0xc7, 0xc4, 0xb6, 0xa4, 0xae, 0x94, 0x7d, 0x33, 0x41, 0xb4, 0x0c, 0xaa, 0x59, 0xf2, 0x29,
|
||||||
0xc7, 0x50, 0xe4, 0x18, 0xb3, 0x01, 0x77, 0xd1, 0x2e, 0x48, 0xdd, 0xe5, 0x2c, 0x1d, 0xd5, 0x4c,
|
0x14, 0x38, 0xc6, 0xac, 0xcf, 0x5d, 0xb4, 0x27, 0xa5, 0xee, 0x72, 0x96, 0x8e, 0x6a, 0xa6, 0x65,
|
||||||
0xdb, 0xa0, 0x87, 0x3c, 0xf9, 0x14, 0x4a, 0xf8, 0x20, 0xc1, 0x28, 0xf6, 0x59, 0x64, 0x17, 0xa5,
|
0xd0, 0x43, 0x9e, 0x7c, 0x0e, 0x45, 0x7c, 0x90, 0x60, 0x14, 0xfb, 0x2c, 0xb2, 0x0b, 0x52, 0xfc,
|
||||||
0xf8, 0xb5, 0x2c, 0x71, 0x2b, 0x85, 0xda, 0x06, 0x3d, 0x52, 0x08, 0x87, 0x5d, 0x16, 0x6d, 0xfb,
|
0x46, 0x96, 0xb8, 0x99, 0x42, 0x2d, 0x83, 0x1e, 0x29, 0x84, 0xc3, 0x2e, 0x8b, 0x76, 0xfc, 0xae,
|
||||||
0x3d, 0xbb, 0x34, 0xde, 0xe1, 0xa6, 0x24, 0x84, 0xc3, 0x8a, 0x6d, 0x14, 0xd3, 0xdc, 0xd7, 0xd6,
|
0x5d, 0x1c, 0xef, 0x70, 0x43, 0x12, 0xc2, 0x61, 0xc5, 0x0a, 0xd5, 0x80, 0x05, 0xfd, 0x10, 0x6d,
|
||||||
0x60, 0x6a, 0x1d, 0x03, 0x74, 0x93, 0xc6, 0xfe, 0x7a, 0xc0, 0x12, 0x72, 0x1d, 0x40, 0x67, 0xab,
|
0x18, 0xaf, 0xba, 0x2b, 0x09, 0xa1, 0x52, 0x6c, 0xbd, 0x90, 0x56, 0x4c, 0x75, 0x1d, 0xa6, 0x37,
|
||||||
0xeb, 0x7b, 0xb2, 0x22, 0x4a, 0x8d, 0xe9, 0xe1, 0xc1, 0x5c, 0x49, 0xa7, 0xb3, 0xb3, 0x44, 0x4b,
|
0x30, 0x40, 0x37, 0xa9, 0xef, 0x6f, 0x04, 0x2c, 0x21, 0xd7, 0x01, 0x74, 0x8e, 0x3b, 0xbe, 0x27,
|
||||||
0x1a, 0xe8, 0x78, 0x84, 0x40, 0x3e, 0x0e, 0x58, 0x22, 0xcb, 0x20, 0x4f, 0xe5, 0xba, 0xb6, 0x06,
|
0xeb, 0xa8, 0x58, 0x9f, 0x19, 0x1e, 0xcc, 0x17, 0x75, 0x11, 0xb4, 0x97, 0x69, 0x51, 0x03, 0x6d,
|
||||||
0x67, 0x53, 0x8b, 0xcd, 0x41, 0x9c, 0xb0, 0x50, 0x50, 0x3b, 0x7e, 0xa4, 0xad, 0x51, 0xb9, 0x26,
|
0x8f, 0x10, 0xc8, 0xc7, 0x01, 0x4b, 0x64, 0xf1, 0xe4, 0xa9, 0x5c, 0x57, 0xd7, 0xe1, 0x6c, 0x6a,
|
||||||
0xb3, 0x30, 0xe1, 0x47, 0x1e, 0x3e, 0x90, 0xd2, 0x12, 0x55, 0x0f, 0x62, 0x77, 0xd7, 0x09, 0x06,
|
0xb1, 0xd1, 0x8f, 0x13, 0x16, 0x0a, 0x6a, 0xd7, 0x8f, 0xb4, 0x35, 0x2a, 0xd7, 0x64, 0x0e, 0x26,
|
||||||
0x28, 0xcb, 0xa3, 0x44, 0xd5, 0x43, 0xed, 0x2f, 0x0b, 0x8a, 0xa9, 0x49, 0x62, 0x43, 0xee, 0xd0,
|
0xfc, 0xc8, 0xc3, 0x07, 0x52, 0x5a, 0xa4, 0xea, 0x41, 0xec, 0x0e, 0x9c, 0xa0, 0x8f, 0xb2, 0xa8,
|
||||||
0x31, 0x6b, 0x78, 0x30, 0x97, 0xeb, 0x2c, 0xb5, 0x0d, 0x9a, 0xf3, 0x3d, 0x72, 0x0d, 0x4a, 0xbe,
|
0x8a, 0x54, 0x3d, 0x54, 0xff, 0xb2, 0xa0, 0x90, 0x9a, 0x24, 0x36, 0xe4, 0x0e, 0x1d, 0xb3, 0x86,
|
||||||
0xd7, 0xed, 0x73, 0xdc, 0xf6, 0xb5, 0xd9, 0xc6, 0xd4, 0xf0, 0x60, 0xae, 0xd8, 0x59, 0x5a, 0x93,
|
0x07, 0xf3, 0xb9, 0xf6, 0x72, 0xcb, 0xa0, 0x39, 0xdf, 0x23, 0xd7, 0xa0, 0xe8, 0x7b, 0x9d, 0x1e,
|
||||||
0x7b, 0x22, 0xec, 0xbe, 0xa7, 0xd6, 0x64, 0x16, 0xf2, 0x91, 0x13, 0xea, 0x83, 0x64, 0x65, 0x3b,
|
0xc7, 0x1d, 0x5f, 0x9b, 0xad, 0x4f, 0x0f, 0x0f, 0xe6, 0x0b, 0xed, 0xe5, 0x75, 0xb9, 0x27, 0x92,
|
||||||
0x21, 0x92, 0xd7, 0x61, 0x52, 0xfc, 0xa6, 0x46, 0xf2, 0xfa, 0x25, 0x88, 0x4d, 0x2d, 0xbc, 0x0d,
|
0xe5, 0x7b, 0x6a, 0x4d, 0xe6, 0x20, 0x1f, 0x39, 0xa1, 0x3e, 0x48, 0xf6, 0x83, 0x13, 0x22, 0x79,
|
||||||
0x96, 0x2b, 0xaf, 0xa5, 0x2b, 0xab, 0x96, 0x5d, 0x21, 0xc7, 0x03, 0x20, 0x03, 0xaf, 0x42, 0xd1,
|
0x13, 0xa6, 0xc4, 0x6f, 0x6a, 0x24, 0xaf, 0x5f, 0x82, 0xd8, 0xd4, 0xc2, 0xdb, 0x60, 0xb9, 0xf2,
|
||||||
0x81, 0x69, 0xb5, 0x4a, 0x8f, 0xb0, 0x5e, 0xc1, 0xc8, 0x94, 0x92, 0x6a, 0x47, 0xea, 0x2f, 0x64,
|
0x5a, 0xba, 0x1e, 0xab, 0xd9, 0x75, 0x75, 0x3c, 0x00, 0x32, 0x5d, 0x2a, 0x14, 0x6d, 0x98, 0x51,
|
||||||
0xaa, 0x90, 0x91, 0x29, 0x51, 0x29, 0x47, 0xb9, 0x7a, 0x0b, 0x0a, 0xa2, 0x7b, 0x05, 0x5c, 0x94,
|
0xab, 0xf4, 0x08, 0xeb, 0x35, 0x8c, 0x4c, 0x2b, 0xa9, 0x76, 0xa4, 0xf6, 0x52, 0xa6, 0x26, 0x33,
|
||||||
0x30, 0x0c, 0x0f, 0xe6, 0x2c, 0xd1, 0xd8, 0x92, 0xb4, 0xc4, 0xcb, 0x8e, 0x47, 0x6e, 0xea, 0x94,
|
0x32, 0x25, 0xea, 0xeb, 0x28, 0x57, 0xef, 0xc0, 0xa4, 0xe8, 0x79, 0x01, 0x17, 0x24, 0x0c, 0xc3,
|
||||||
0xaa, 0x72, 0xaa, 0x9e, 0xe4, 0x98, 0x28, 0x18, 0x11, 0x3a, 0xc1, 0x93, 0x25, 0x98, 0xf6, 0x30,
|
0x83, 0x79, 0x4b, 0x8c, 0x03, 0x49, 0x5a, 0xe2, 0x65, 0xdb, 0x23, 0x37, 0x75, 0x4a, 0x55, 0x11,
|
||||||
0xf6, 0x39, 0x7a, 0xdd, 0x38, 0x71, 0x12, 0xb4, 0xa1, 0x6a, 0x5e, 0x39, 0x9b, 0x5d, 0xcb, 0xa2,
|
0x56, 0x4e, 0x72, 0x4c, 0x14, 0x8c, 0x08, 0x9d, 0xe0, 0xc9, 0x32, 0xcc, 0x78, 0x18, 0xfb, 0x1c,
|
||||||
0x57, 0xd7, 0x05, 0x24, 0x2e, 0xa5, 0x55, 0xf2, 0x99, 0x2c, 0x40, 0x9e, 0xb3, 0x00, 0xed, 0x49,
|
0xbd, 0x4e, 0x9c, 0x38, 0x89, 0xaa, 0xc7, 0xb3, 0xd9, 0x1d, 0x20, 0x3a, 0x7c, 0x43, 0x40, 0xe2,
|
||||||
0x29, 0xbe, 0x3c, 0x6e, 0x14, 0x51, 0x16, 0xc8, 0x71, 0x24, 0x58, 0xd2, 0x01, 0x08, 0x31, 0xdc,
|
0x52, 0x5a, 0x25, 0x9f, 0xc9, 0x22, 0xe4, 0x39, 0x0b, 0xd0, 0x9e, 0x92, 0xe2, 0xcb, 0xe3, 0x06,
|
||||||
0x42, 0x1e, 0xdf, 0xf7, 0xfb, 0xf6, 0x94, 0x54, 0xbe, 0x33, 0x4e, 0xb9, 0xde, 0x47, 0xb7, 0xbe,
|
0x18, 0x65, 0x81, 0x1c, 0x62, 0x82, 0x25, 0x6d, 0x80, 0x10, 0xc3, 0x6d, 0xe4, 0xf1, 0x7d, 0xbf,
|
||||||
0x7c, 0x88, 0x8b, 0xe4, 0x1e, 0x89, 0xc9, 0x32, 0x5c, 0xe0, 0xb8, 0x8d, 0x1c, 0x23, 0x17, 0xbd,
|
0x67, 0x4f, 0x4b, 0xe5, 0x7b, 0xe3, 0x94, 0x1b, 0x3d, 0x74, 0x6b, 0x2b, 0x87, 0xb8, 0x48, 0xee,
|
||||||
0xae, 0x9e, 0x3e, 0x22, 0x62, 0xd3, 0x32, 0x62, 0x97, 0x86, 0x07, 0x73, 0xe7, 0xe9, 0x21, 0xa0,
|
0x91, 0x98, 0xac, 0xc0, 0x05, 0x8e, 0x3b, 0xc8, 0x31, 0x72, 0xd1, 0xeb, 0xe8, 0x99, 0x25, 0x22,
|
||||||
0x07, 0x95, 0x0c, 0xdf, 0x79, 0xfe, 0xd2, 0xb6, 0x47, 0xbe, 0x80, 0xd9, 0x63, 0xe6, 0xd4, 0xb0,
|
0x36, 0x23, 0x23, 0x76, 0x69, 0x78, 0x30, 0x7f, 0x9e, 0x1e, 0x02, 0x7a, 0xbc, 0xc9, 0xf0, 0x9d,
|
||||||
0x10, 0xd6, 0xce, 0x4a, 0x6b, 0x17, 0x87, 0x07, 0x73, 0xe4, 0xc8, 0x9a, 0x9a, 0x2a, 0xd2, 0x18,
|
0xe7, 0xaf, 0x6c, 0x7b, 0xe4, 0x2b, 0x98, 0x3b, 0x66, 0x4e, 0x8d, 0x18, 0x61, 0xed, 0xac, 0xb4,
|
||||||
0xe1, 0xa3, 0xbb, 0xa3, 0xb6, 0x54, 0x1f, 0x0b, 0x5b, 0x33, 0x59, 0xb6, 0x54, 0xc3, 0x8f, 0xda,
|
0x76, 0x71, 0x78, 0x30, 0x4f, 0x8e, 0xac, 0xa9, 0x59, 0x24, 0x8d, 0x11, 0x3e, 0xba, 0x3b, 0x6a,
|
||||||
0xd2, 0xbb, 0xa2, 0xf9, 0x54, 0x43, 0x9e, 0x4b, 0x8b, 0x5f, 0x3c, 0x35, 0xf2, 0x90, 0x6b, 0xec,
|
0x4b, 0x75, 0xbf, 0xb0, 0x35, 0x9b, 0x65, 0x4b, 0x8d, 0x89, 0x51, 0x5b, 0x7a, 0x57, 0x34, 0x9f,
|
||||||
0xd7, 0xfe, 0xc8, 0xc1, 0xd4, 0x3d, 0xf1, 0x41, 0xa4, 0xf8, 0xd5, 0x00, 0xe3, 0x84, 0xb4, 0xa0,
|
0x6a, 0xc8, 0x73, 0x69, 0xf1, 0x8b, 0xa7, 0x7a, 0x1e, 0x72, 0xf5, 0xfd, 0xea, 0x1f, 0x39, 0x98,
|
||||||
0x80, 0x51, 0xc2, 0x7d, 0x8c, 0x6d, 0xb3, 0x7a, 0xe6, 0xca, 0xe4, 0xc2, 0xb5, 0xac, 0xd8, 0x1e,
|
0xbe, 0x27, 0x3e, 0xa3, 0x14, 0xbf, 0xe9, 0x63, 0x9c, 0x90, 0x26, 0x4c, 0x62, 0x94, 0x70, 0x1f,
|
||||||
0x97, 0xa8, 0x87, 0x56, 0x94, 0xf0, 0x7d, 0x9a, 0x6a, 0xc9, 0x6d, 0x98, 0xe4, 0x18, 0x0f, 0x42,
|
0x63, 0xdb, 0xac, 0x9c, 0xb9, 0x32, 0xb5, 0x78, 0x2d, 0x2b, 0xb6, 0xc7, 0x25, 0xea, 0xa1, 0x19,
|
||||||
0xec, 0x6e, 0x73, 0x16, 0x9e, 0xf4, 0xe1, 0xb8, 0x8b, 0x5c, 0x8c, 0x36, 0x0a, 0x8a, 0xff, 0x9c,
|
0x25, 0x7c, 0x9f, 0xa6, 0x5a, 0x72, 0x1b, 0xa6, 0x38, 0xc6, 0xfd, 0x10, 0x3b, 0x3b, 0x9c, 0x85,
|
||||||
0xb3, 0x90, 0x5c, 0x07, 0xe2, 0x47, 0x6e, 0x30, 0xf0, 0xb0, 0xcb, 0x02, 0xaf, 0xab, 0xbe, 0xa2,
|
0x27, 0x7d, 0x6e, 0xee, 0x22, 0x17, 0x03, 0x91, 0x82, 0xe2, 0xbf, 0xe4, 0x2c, 0x24, 0xd7, 0x81,
|
||||||
0xb2, 0x79, 0x8b, 0x74, 0x46, 0xbf, 0x59, 0x0d, 0x3c, 0x35, 0xd4, 0xca, 0xdf, 0x9b, 0x00, 0x47,
|
0xf8, 0x91, 0x1b, 0xf4, 0x3d, 0xec, 0xb0, 0xc0, 0xeb, 0xa8, 0x6f, 0xaf, 0x6c, 0xde, 0x02, 0x9d,
|
||||||
0x3e, 0x64, 0xce, 0x9f, 0x4f, 0xc0, 0x72, 0xdc, 0x44, 0xcc, 0xdc, 0x9c, 0x2c, 0x98, 0x37, 0xc6,
|
0xd5, 0x6f, 0xd6, 0x02, 0x4f, 0x0d, 0xb5, 0xd2, 0x0f, 0x26, 0xc0, 0x91, 0x0f, 0x99, 0xf3, 0xe7,
|
||||||
0x5e, 0x6a, 0x51, 0x62, 0x77, 0xfc, 0xc8, 0xa3, 0x5a, 0x42, 0x6e, 0x42, 0x61, 0xdb, 0x0f, 0x12,
|
0x33, 0xb0, 0x1c, 0x37, 0x11, 0x93, 0x3a, 0x27, 0x0b, 0xe6, 0xad, 0xb1, 0x97, 0x5a, 0x92, 0xd8,
|
||||||
0xe4, 0xb1, 0x7d, 0x46, 0x86, 0xe4, 0xf2, 0x49, 0x6d, 0x42, 0x53, 0xb8, 0xf6, 0x5b, 0x1a, 0xdb,
|
0x1d, 0x3f, 0xf2, 0xa8, 0x96, 0x90, 0x9b, 0x30, 0xb9, 0xe3, 0x07, 0x09, 0xf2, 0xd8, 0x3e, 0x23,
|
||||||
0x65, 0x8c, 0x63, 0xa7, 0x87, 0xe4, 0x33, 0xb0, 0x70, 0x17, 0xa3, 0x24, 0x0d, 0xed, 0xdb, 0x63,
|
0x43, 0x72, 0xf9, 0xa4, 0x36, 0xa1, 0x29, 0x5c, 0xfd, 0x2d, 0x8d, 0xed, 0x0a, 0xc6, 0xb1, 0xd3,
|
||||||
0xbd, 0xd0, 0x8a, 0x7a, 0x4b, 0xe0, 0x54, 0xab, 0xc8, 0x87, 0x50, 0xd8, 0x55, 0xd1, 0xfa, 0x2f,
|
0x45, 0xf2, 0x05, 0x58, 0x38, 0xc0, 0x28, 0x49, 0x43, 0xfb, 0xee, 0x58, 0x2f, 0xb4, 0xa2, 0xd6,
|
||||||
0x01, 0x4d, 0xd9, 0xf2, 0x2f, 0x26, 0x4c, 0x48, 0x43, 0xc7, 0xc2, 0x60, 0xbe, 0x7a, 0x18, 0x16,
|
0x14, 0x38, 0xd5, 0x2a, 0xf2, 0x31, 0x4c, 0x0e, 0x54, 0xb4, 0xfe, 0x4b, 0x40, 0x53, 0xb6, 0xf4,
|
||||||
0xc0, 0xd2, 0x89, 0xc8, 0x8d, 0xff, 0xf6, 0xa8, 0x94, 0x50, 0x4d, 0x92, 0x8f, 0x00, 0x46, 0x12,
|
0xab, 0x09, 0x13, 0xd2, 0xd0, 0xb1, 0x30, 0x98, 0xaf, 0x1f, 0x86, 0x45, 0xb0, 0x74, 0x22, 0x72,
|
||||||
0x78, 0xb2, 0xae, 0xc4, 0xd2, 0xac, 0x5e, 0xfd, 0xc7, 0x84, 0x73, 0x23, 0xae, 0x90, 0x1b, 0x30,
|
0xe3, 0xbf, 0x3d, 0x2a, 0x25, 0x54, 0x93, 0xe4, 0x13, 0x80, 0x91, 0x04, 0x9e, 0xac, 0x2b, 0xb2,
|
||||||
0x7b, 0x6f, 0x71, 0xa3, 0xd9, 0xee, 0x2e, 0x36, 0x37, 0x3a, 0xab, 0x2b, 0xdd, 0xcd, 0x95, 0x3b,
|
0x34, 0xab, 0x57, 0xff, 0x31, 0xe1, 0xdc, 0x88, 0x2b, 0xe4, 0x06, 0xcc, 0xdd, 0x5b, 0xda, 0x6c,
|
||||||
0x2b, 0xab, 0xf7, 0x56, 0x66, 0x8c, 0x72, 0xf9, 0xe1, 0xa3, 0xea, 0xc5, 0x11, 0x7c, 0x33, 0xda,
|
0xb4, 0x3a, 0x4b, 0x8d, 0xcd, 0xf6, 0xda, 0x6a, 0x67, 0x6b, 0xf5, 0xce, 0xea, 0xda, 0xbd, 0xd5,
|
||||||
0x89, 0xd8, 0x9e, 0x70, 0xfc, 0xfc, 0x0b, 0xaa, 0x26, 0x6d, 0x2d, 0x6e, 0xb4, 0x66, 0xcc, 0xf2,
|
0x59, 0xa3, 0x54, 0x7a, 0xf8, 0xa8, 0x72, 0x71, 0x04, 0xdf, 0x8a, 0x76, 0x23, 0xb6, 0x27, 0x1c,
|
||||||
0xff, 0x1e, 0x3e, 0xaa, 0x5e, 0x18, 0x11, 0x35, 0x39, 0xaa, 0xc9, 0xf4, 0xa2, 0x66, 0x73, 0x6d,
|
0x3f, 0xff, 0x92, 0xaa, 0x41, 0x9b, 0x4b, 0x9b, 0xcd, 0x59, 0xb3, 0xf4, 0xbf, 0x87, 0x8f, 0x2a,
|
||||||
0x49, 0x68, 0x72, 0x99, 0x9a, 0xcd, 0xbe, 0x97, 0xa5, 0xa1, 0xad, 0xe5, 0xd5, 0xbb, 0xad, 0x99,
|
0x17, 0x46, 0x44, 0x0d, 0x8e, 0x6a, 0x32, 0xbd, 0xac, 0xd9, 0x5a, 0x5f, 0x16, 0x9a, 0x5c, 0xa6,
|
||||||
0x7c, 0xa6, 0x86, 0x62, 0xc8, 0x76, 0xb1, 0x7c, 0xe9, 0xdb, 0x1f, 0x2b, 0xc6, 0xaf, 0x3f, 0x55,
|
0x66, 0xab, 0xe7, 0x65, 0x69, 0x68, 0x73, 0x65, 0xed, 0x6e, 0x73, 0x36, 0x9f, 0xa9, 0xa1, 0x18,
|
||||||
0x46, 0xaf, 0xba, 0x10, 0xc2, 0x84, 0xdc, 0x22, 0x5e, 0xba, 0xa8, 0x9e, 0xd6, 0x88, 0xe5, 0xea,
|
0xb2, 0x01, 0x96, 0x2e, 0x7d, 0xf7, 0x53, 0xd9, 0x78, 0xfc, 0x73, 0x79, 0xf4, 0xaa, 0x8b, 0x21,
|
||||||
0x69, 0xf5, 0x54, 0xbb, 0xf0, 0xfb, 0xcf, 0x7f, 0xff, 0x90, 0x3b, 0x07, 0xd3, 0x92, 0x78, 0x37,
|
0x4c, 0xc8, 0x2d, 0xe2, 0xa5, 0x8b, 0xca, 0x69, 0x8d, 0x58, 0xaa, 0x9c, 0x56, 0x4f, 0xd5, 0x0b,
|
||||||
0x74, 0x22, 0xa7, 0x87, 0xfc, 0x3d, 0xb3, 0xf1, 0xe6, 0xe3, 0x67, 0x15, 0xe3, 0xe9, 0xb3, 0x8a,
|
0xbf, 0xff, 0xf2, 0xf7, 0x8f, 0xb9, 0x73, 0x30, 0x23, 0x89, 0xf7, 0x43, 0x27, 0x72, 0xba, 0xc8,
|
||||||
0xf1, 0xf5, 0xb0, 0x62, 0x3e, 0x1e, 0x56, 0xcc, 0x27, 0xc3, 0x8a, 0xf9, 0xe7, 0xb0, 0x62, 0x7e,
|
0x3f, 0x30, 0xeb, 0x6f, 0x3f, 0x79, 0x5e, 0x36, 0x9e, 0x3d, 0x2f, 0x1b, 0xdf, 0x0e, 0xcb, 0xe6,
|
||||||
0xf7, 0xbc, 0x62, 0x3c, 0x79, 0x5e, 0x31, 0x9e, 0x3e, 0xaf, 0x18, 0x5b, 0x96, 0xfc, 0x33, 0xf9,
|
0x93, 0x61, 0xd9, 0x7c, 0x3a, 0x2c, 0x9b, 0x7f, 0x0e, 0xcb, 0xe6, 0xf7, 0x2f, 0xca, 0xc6, 0xd3,
|
||||||
0xc1, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x96, 0x4e, 0x58, 0x61, 0x63, 0x0b, 0x00, 0x00,
|
0x17, 0x65, 0xe3, 0xd9, 0x8b, 0xb2, 0xb1, 0x6d, 0xc9, 0xbf, 0xa0, 0x1f, 0xfd, 0x1b, 0x00, 0x00,
|
||||||
|
0xff, 0xff, 0x36, 0x4b, 0xa7, 0x78, 0x99, 0x0b, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
||||||
type authenticatedWrapperWatchServer struct {
|
type authenticatedWrapperWatchServer struct {
|
||||||
|
@ -956,6 +970,12 @@ func (m *Object) CopyFrom(src interface{}) {
|
||||||
}
|
}
|
||||||
github_com_docker_swarmkit_api_deepcopy.Copy(v.Config, o.GetConfig())
|
github_com_docker_swarmkit_api_deepcopy.Copy(v.Config, o.GetConfig())
|
||||||
m.Object = &v
|
m.Object = &v
|
||||||
|
case *Object_Volume:
|
||||||
|
v := Object_Volume{
|
||||||
|
Volume: &Volume{},
|
||||||
|
}
|
||||||
|
github_com_docker_swarmkit_api_deepcopy.Copy(v.Volume, o.GetVolume())
|
||||||
|
m.Object = &v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1532,6 +1552,27 @@ func (m *Object_Config) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
}
|
}
|
||||||
return len(dAtA) - i, nil
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
func (m *Object_Volume) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Object_Volume) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
|
if m.Volume != nil {
|
||||||
|
{
|
||||||
|
size, err := m.Volume.MarshalToSizedBuffer(dAtA[:i])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i -= size
|
||||||
|
i = encodeVarintWatch(dAtA, i, uint64(size))
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x52
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
|
}
|
||||||
func (m *SelectBySlot) Marshal() (dAtA []byte, err error) {
|
func (m *SelectBySlot) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
|
@ -2348,6 +2389,18 @@ func (m *Object_Config) Size() (n int) {
|
||||||
}
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
func (m *Object_Volume) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.Volume != nil {
|
||||||
|
l = m.Volume.Size()
|
||||||
|
n += 1 + l + sovWatch(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
func (m *SelectBySlot) Size() (n int) {
|
func (m *SelectBySlot) Size() (n int) {
|
||||||
if m == nil {
|
if m == nil {
|
||||||
return 0
|
return 0
|
||||||
|
@ -2749,6 +2802,16 @@ func (this *Object_Config) String() string {
|
||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
func (this *Object_Volume) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&Object_Volume{`,
|
||||||
|
`Volume:` + strings.Replace(fmt.Sprintf("%v", this.Volume), "Volume", "Volume", 1) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
func (this *SelectBySlot) String() string {
|
func (this *SelectBySlot) String() string {
|
||||||
if this == nil {
|
if this == nil {
|
||||||
return "nil"
|
return "nil"
|
||||||
|
@ -3356,6 +3419,41 @@ func (m *Object) Unmarshal(dAtA []byte) error {
|
||||||
}
|
}
|
||||||
m.Object = &Object_Config{v}
|
m.Object = &Object_Config{v}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
|
case 10:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowWatch
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= int(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthWatch
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthWatch
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
v := &Volume{}
|
||||||
|
if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.Object = &Object_Volume{v}
|
||||||
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipWatch(dAtA[iNdEx:])
|
skippy, err := skipWatch(dAtA[iNdEx:])
|
||||||
|
|
|
@ -19,6 +19,7 @@ message Object {
|
||||||
Resource resource = 7;
|
Resource resource = 7;
|
||||||
Extension extension = 8;
|
Extension extension = 8;
|
||||||
Config config = 9;
|
Config config = 9;
|
||||||
|
Volume volume = 10;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,17 @@ This package provides various compression algorithms.
|
||||||
|
|
||||||
# changelog
|
# changelog
|
||||||
|
|
||||||
|
* Feb 22, 2022 (v1.14.4)
|
||||||
|
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
|
||||||
|
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
|
||||||
|
* zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501
|
||||||
|
* huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
|
||||||
|
|
||||||
|
* Feb 17, 2022 (v1.14.3)
|
||||||
|
* flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
|
||||||
|
* flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483)
|
||||||
|
* s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486)
|
||||||
|
|
||||||
* Jan 25, 2022 (v1.14.2)
|
* Jan 25, 2022 (v1.14.2)
|
||||||
* zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476)
|
* zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476)
|
||||||
* zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469)
|
* zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469)
|
||||||
|
@ -61,6 +72,9 @@ This package provides various compression algorithms.
|
||||||
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
|
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
|
||||||
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
|
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>See changes to v1.12.x</summary>
|
||||||
|
|
||||||
* May 25, 2021 (v1.12.3)
|
* May 25, 2021 (v1.12.3)
|
||||||
* deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374)
|
* deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374)
|
||||||
* deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375)
|
* deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375)
|
||||||
|
@ -82,9 +96,10 @@ This package provides various compression algorithms.
|
||||||
* s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352)
|
* s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352)
|
||||||
* zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
|
* zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
|
||||||
* s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
|
* s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
|
||||||
|
</details>
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>See changes prior to v1.12.1</summary>
|
<summary>See changes to v1.11.x</summary>
|
||||||
|
|
||||||
* Mar 26, 2021 (v1.11.13)
|
* Mar 26, 2021 (v1.11.13)
|
||||||
* zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
|
* zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
|
||||||
|
@ -143,7 +158,7 @@ This package provides various compression algorithms.
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>See changes prior to v1.11.0</summary>
|
<summary>See changes to v1.10.x</summary>
|
||||||
|
|
||||||
* July 8, 2020 (v1.10.11)
|
* July 8, 2020 (v1.10.11)
|
||||||
* zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278)
|
* zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278)
|
||||||
|
@ -305,11 +320,6 @@ This package provides various compression algorithms.
|
||||||
|
|
||||||
# deflate usage
|
# deflate usage
|
||||||
|
|
||||||
* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/).
|
|
||||||
* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/).
|
|
||||||
* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
|
|
||||||
* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/)
|
|
||||||
|
|
||||||
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
|
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
|
||||||
|
|
||||||
| old import | new import | Documentation
|
| old import | new import | Documentation
|
||||||
|
@ -331,6 +341,8 @@ Memory usage is typically 1MB for a Writer. stdlib is in the same range.
|
||||||
If you expect to have a lot of concurrently allocated Writers consider using
|
If you expect to have a lot of concurrently allocated Writers consider using
|
||||||
the stateless compress described below.
|
the stateless compress described below.
|
||||||
|
|
||||||
|
For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing).
|
||||||
|
|
||||||
# Stateless compression
|
# Stateless compression
|
||||||
|
|
||||||
This package offers stateless compression as a special option for gzip/deflate.
|
This package offers stateless compression as a special option for gzip/deflate.
|
||||||
|
|
|
@ -8,115 +8,10 @@ package huff0
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
// bitReader reads a bitstream in reverse.
|
|
||||||
// The last set bit indicates the start of the stream and is used
|
|
||||||
// for aligning the input.
|
|
||||||
type bitReader struct {
|
|
||||||
in []byte
|
|
||||||
off uint // next byte to read is at in[off - 1]
|
|
||||||
value uint64
|
|
||||||
bitsRead uint8
|
|
||||||
}
|
|
||||||
|
|
||||||
// init initializes and resets the bit reader.
|
|
||||||
func (b *bitReader) init(in []byte) error {
|
|
||||||
if len(in) < 1 {
|
|
||||||
return errors.New("corrupt stream: too short")
|
|
||||||
}
|
|
||||||
b.in = in
|
|
||||||
b.off = uint(len(in))
|
|
||||||
// The highest bit of the last byte indicates where to start
|
|
||||||
v := in[len(in)-1]
|
|
||||||
if v == 0 {
|
|
||||||
return errors.New("corrupt stream, did not find end of stream")
|
|
||||||
}
|
|
||||||
b.bitsRead = 64
|
|
||||||
b.value = 0
|
|
||||||
if len(in) >= 8 {
|
|
||||||
b.fillFastStart()
|
|
||||||
} else {
|
|
||||||
b.fill()
|
|
||||||
b.fill()
|
|
||||||
}
|
|
||||||
b.bitsRead += 8 - uint8(highBit32(uint32(v)))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// peekBitsFast requires that at least one bit is requested every time.
|
|
||||||
// There are no checks if the buffer is filled.
|
|
||||||
func (b *bitReader) peekBitsFast(n uint8) uint16 {
|
|
||||||
const regMask = 64 - 1
|
|
||||||
v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// fillFast() will make sure at least 32 bits are available.
|
|
||||||
// There must be at least 4 bytes available.
|
|
||||||
func (b *bitReader) fillFast() {
|
|
||||||
if b.bitsRead < 32 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2 bounds checks.
|
|
||||||
v := b.in[b.off-4 : b.off]
|
|
||||||
v = v[:4]
|
|
||||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
|
||||||
b.value = (b.value << 32) | uint64(low)
|
|
||||||
b.bitsRead -= 32
|
|
||||||
b.off -= 4
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *bitReader) advance(n uint8) {
|
|
||||||
b.bitsRead += n
|
|
||||||
}
|
|
||||||
|
|
||||||
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
|
|
||||||
func (b *bitReader) fillFastStart() {
|
|
||||||
// Do single re-slice to avoid bounds checks.
|
|
||||||
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
|
|
||||||
b.bitsRead = 0
|
|
||||||
b.off -= 8
|
|
||||||
}
|
|
||||||
|
|
||||||
// fill() will make sure at least 32 bits are available.
|
|
||||||
func (b *bitReader) fill() {
|
|
||||||
if b.bitsRead < 32 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if b.off > 4 {
|
|
||||||
v := b.in[b.off-4:]
|
|
||||||
v = v[:4]
|
|
||||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
|
||||||
b.value = (b.value << 32) | uint64(low)
|
|
||||||
b.bitsRead -= 32
|
|
||||||
b.off -= 4
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for b.off > 0 {
|
|
||||||
b.value = (b.value << 8) | uint64(b.in[b.off-1])
|
|
||||||
b.bitsRead -= 8
|
|
||||||
b.off--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// finished returns true if all bits have been read from the bit stream.
|
|
||||||
func (b *bitReader) finished() bool {
|
|
||||||
return b.off == 0 && b.bitsRead >= 64
|
|
||||||
}
|
|
||||||
|
|
||||||
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
|
||||||
func (b *bitReader) close() error {
|
|
||||||
// Release reference.
|
|
||||||
b.in = nil
|
|
||||||
if b.bitsRead > 64 {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// bitReader reads a bitstream in reverse.
|
// bitReader reads a bitstream in reverse.
|
||||||
// The last set bit indicates the start of the stream and is used
|
// The last set bit indicates the start of the stream and is used
|
||||||
// for aligning the input.
|
// for aligning the input.
|
||||||
|
@ -213,10 +108,17 @@ func (b *bitReaderBytes) finished() bool {
|
||||||
return b.off == 0 && b.bitsRead >= 64
|
return b.off == 0 && b.bitsRead >= 64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *bitReaderBytes) remaining() uint {
|
||||||
|
return b.off*8 + uint(64-b.bitsRead)
|
||||||
|
}
|
||||||
|
|
||||||
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
||||||
func (b *bitReaderBytes) close() error {
|
func (b *bitReaderBytes) close() error {
|
||||||
// Release reference.
|
// Release reference.
|
||||||
b.in = nil
|
b.in = nil
|
||||||
|
if b.remaining() > 0 {
|
||||||
|
return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining())
|
||||||
|
}
|
||||||
if b.bitsRead > 64 {
|
if b.bitsRead > 64 {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
@ -318,10 +220,17 @@ func (b *bitReaderShifted) finished() bool {
|
||||||
return b.off == 0 && b.bitsRead >= 64
|
return b.off == 0 && b.bitsRead >= 64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *bitReaderShifted) remaining() uint {
|
||||||
|
return b.off*8 + uint(64-b.bitsRead)
|
||||||
|
}
|
||||||
|
|
||||||
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
||||||
func (b *bitReaderShifted) close() error {
|
func (b *bitReaderShifted) close() error {
|
||||||
// Release reference.
|
// Release reference.
|
||||||
b.in = nil
|
b.in = nil
|
||||||
|
if b.remaining() > 0 {
|
||||||
|
return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining())
|
||||||
|
}
|
||||||
if b.bitsRead > 64 {
|
if b.bitsRead > 64 {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@ package huff0
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
@ -289,6 +290,10 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if len(s.Out)-idx > math.MaxUint16 {
|
||||||
|
// We cannot store the size in the jump table
|
||||||
|
return nil, ErrIncompressible
|
||||||
|
}
|
||||||
// Write compressed length as little endian before block.
|
// Write compressed length as little endian before block.
|
||||||
if i < 3 {
|
if i < 3 {
|
||||||
// Last length is not written.
|
// Last length is not written.
|
||||||
|
@ -332,6 +337,10 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
|
||||||
return nil, errs[i]
|
return nil, errs[i]
|
||||||
}
|
}
|
||||||
o := s.tmpOut[i]
|
o := s.tmpOut[i]
|
||||||
|
if len(o) > math.MaxUint16 {
|
||||||
|
// We cannot store the size in the jump table
|
||||||
|
return nil, ErrIncompressible
|
||||||
|
}
|
||||||
// Write compressed length as little endian before block.
|
// Write compressed length as little endian before block.
|
||||||
if i < 3 {
|
if i < 3 {
|
||||||
// Last length is not written.
|
// Last length is not written.
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/klauspost/compress/fse"
|
"github.com/klauspost/compress/fse"
|
||||||
)
|
)
|
||||||
|
@ -216,6 +217,7 @@ func (s *Scratch) Decoder() *Decoder {
|
||||||
return &Decoder{
|
return &Decoder{
|
||||||
dt: s.dt,
|
dt: s.dt,
|
||||||
actualTableLog: s.actualTableLog,
|
actualTableLog: s.actualTableLog,
|
||||||
|
bufs: &s.decPool,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -223,6 +225,15 @@ func (s *Scratch) Decoder() *Decoder {
|
||||||
type Decoder struct {
|
type Decoder struct {
|
||||||
dt dTable
|
dt dTable
|
||||||
actualTableLog uint8
|
actualTableLog uint8
|
||||||
|
bufs *sync.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) buffer() *[4][256]byte {
|
||||||
|
buf, ok := d.bufs.Get().(*[4][256]byte)
|
||||||
|
if ok {
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
return &[4][256]byte{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decompress1X will decompress a 1X encoded stream.
|
// Decompress1X will decompress a 1X encoded stream.
|
||||||
|
@ -249,7 +260,8 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
|
||||||
dt := d.dt.single[:tlSize]
|
dt := d.dt.single[:tlSize]
|
||||||
|
|
||||||
// Use temp table to avoid bound checks/append penalty.
|
// Use temp table to avoid bound checks/append penalty.
|
||||||
var buf [256]byte
|
bufs := d.buffer()
|
||||||
|
buf := &bufs[0]
|
||||||
var off uint8
|
var off uint8
|
||||||
|
|
||||||
for br.off >= 8 {
|
for br.off >= 8 {
|
||||||
|
@ -277,6 +289,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
|
||||||
if off == 0 {
|
if off == 0 {
|
||||||
if len(dst)+256 > maxDecodedSize {
|
if len(dst)+256 > maxDecodedSize {
|
||||||
br.close()
|
br.close()
|
||||||
|
d.bufs.Put(bufs)
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
dst = append(dst, buf[:]...)
|
dst = append(dst, buf[:]...)
|
||||||
|
@ -284,6 +297,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(dst)+int(off) > maxDecodedSize {
|
if len(dst)+int(off) > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
@ -310,6 +324,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(dst) >= maxDecodedSize {
|
if len(dst) >= maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
@ -319,6 +334,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
|
||||||
bitsLeft -= nBits
|
bitsLeft -= nBits
|
||||||
dst = append(dst, uint8(v.entry>>8))
|
dst = append(dst, uint8(v.entry>>8))
|
||||||
}
|
}
|
||||||
|
d.bufs.Put(bufs)
|
||||||
return dst, br.close()
|
return dst, br.close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -341,7 +357,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
dt := d.dt.single[:256]
|
dt := d.dt.single[:256]
|
||||||
|
|
||||||
// Use temp table to avoid bound checks/append penalty.
|
// Use temp table to avoid bound checks/append penalty.
|
||||||
var buf [256]byte
|
bufs := d.buffer()
|
||||||
|
buf := &bufs[0]
|
||||||
var off uint8
|
var off uint8
|
||||||
|
|
||||||
switch d.actualTableLog {
|
switch d.actualTableLog {
|
||||||
|
@ -369,6 +386,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
if off == 0 {
|
if off == 0 {
|
||||||
if len(dst)+256 > maxDecodedSize {
|
if len(dst)+256 > maxDecodedSize {
|
||||||
br.close()
|
br.close()
|
||||||
|
d.bufs.Put(bufs)
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
dst = append(dst, buf[:]...)
|
dst = append(dst, buf[:]...)
|
||||||
|
@ -398,6 +416,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
if off == 0 {
|
if off == 0 {
|
||||||
if len(dst)+256 > maxDecodedSize {
|
if len(dst)+256 > maxDecodedSize {
|
||||||
br.close()
|
br.close()
|
||||||
|
d.bufs.Put(bufs)
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
dst = append(dst, buf[:]...)
|
dst = append(dst, buf[:]...)
|
||||||
|
@ -426,6 +445,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
off += 4
|
off += 4
|
||||||
if off == 0 {
|
if off == 0 {
|
||||||
if len(dst)+256 > maxDecodedSize {
|
if len(dst)+256 > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
@ -455,6 +475,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
off += 4
|
off += 4
|
||||||
if off == 0 {
|
if off == 0 {
|
||||||
if len(dst)+256 > maxDecodedSize {
|
if len(dst)+256 > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
@ -484,6 +505,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
off += 4
|
off += 4
|
||||||
if off == 0 {
|
if off == 0 {
|
||||||
if len(dst)+256 > maxDecodedSize {
|
if len(dst)+256 > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
@ -513,6 +535,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
off += 4
|
off += 4
|
||||||
if off == 0 {
|
if off == 0 {
|
||||||
if len(dst)+256 > maxDecodedSize {
|
if len(dst)+256 > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
@ -542,6 +565,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
off += 4
|
off += 4
|
||||||
if off == 0 {
|
if off == 0 {
|
||||||
if len(dst)+256 > maxDecodedSize {
|
if len(dst)+256 > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
@ -571,6 +595,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
off += 4
|
off += 4
|
||||||
if off == 0 {
|
if off == 0 {
|
||||||
if len(dst)+256 > maxDecodedSize {
|
if len(dst)+256 > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
@ -578,10 +603,12 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
d.bufs.Put(bufs)
|
||||||
return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog)
|
return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(dst)+int(off) > maxDecodedSize {
|
if len(dst)+int(off) > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
@ -601,6 +628,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
if len(dst) >= maxDecodedSize {
|
if len(dst) >= maxDecodedSize {
|
||||||
br.close()
|
br.close()
|
||||||
|
d.bufs.Put(bufs)
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
v := dt[br.peekByteFast()>>shift]
|
v := dt[br.peekByteFast()>>shift]
|
||||||
|
@ -609,6 +637,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
bitsLeft -= int8(nBits)
|
bitsLeft -= int8(nBits)
|
||||||
dst = append(dst, uint8(v.entry>>8))
|
dst = append(dst, uint8(v.entry>>8))
|
||||||
}
|
}
|
||||||
|
d.bufs.Put(bufs)
|
||||||
return dst, br.close()
|
return dst, br.close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -628,7 +657,8 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
|
||||||
dt := d.dt.single[:256]
|
dt := d.dt.single[:256]
|
||||||
|
|
||||||
// Use temp table to avoid bound checks/append penalty.
|
// Use temp table to avoid bound checks/append penalty.
|
||||||
var buf [256]byte
|
bufs := d.buffer()
|
||||||
|
buf := &bufs[0]
|
||||||
var off uint8
|
var off uint8
|
||||||
|
|
||||||
const shift = 56
|
const shift = 56
|
||||||
|
@ -655,6 +685,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
|
||||||
off += 4
|
off += 4
|
||||||
if off == 0 {
|
if off == 0 {
|
||||||
if len(dst)+256 > maxDecodedSize {
|
if len(dst)+256 > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
@ -663,6 +694,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(dst)+int(off) > maxDecodedSize {
|
if len(dst)+int(off) > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
@ -679,6 +711,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(dst) >= maxDecodedSize {
|
if len(dst) >= maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
@ -688,6 +721,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
|
||||||
bitsLeft -= int8(nBits)
|
bitsLeft -= int8(nBits)
|
||||||
dst = append(dst, uint8(v.entry>>8))
|
dst = append(dst, uint8(v.entry>>8))
|
||||||
}
|
}
|
||||||
|
d.bufs.Put(bufs)
|
||||||
return dst, br.close()
|
return dst, br.close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -707,6 +741,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var br [4]bitReaderShifted
|
var br [4]bitReaderShifted
|
||||||
|
// Decode "jump table"
|
||||||
start := 6
|
start := 6
|
||||||
for i := 0; i < 3; i++ {
|
for i := 0; i < 3; i++ {
|
||||||
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
|
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
|
||||||
|
@ -735,12 +770,12 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||||
single := d.dt.single[:tlSize]
|
single := d.dt.single[:tlSize]
|
||||||
|
|
||||||
// Use temp table to avoid bound checks/append penalty.
|
// Use temp table to avoid bound checks/append penalty.
|
||||||
var buf [256]byte
|
buf := d.buffer()
|
||||||
var off uint8
|
var off uint8
|
||||||
var decoded int
|
var decoded int
|
||||||
|
|
||||||
// Decode 2 values from each decoder/loop.
|
// Decode 2 values from each decoder/loop.
|
||||||
const bufoff = 256 / 4
|
const bufoff = 256
|
||||||
for {
|
for {
|
||||||
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
||||||
break
|
break
|
||||||
|
@ -758,8 +793,8 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||||
v2 := single[val2&tlMask]
|
v2 := single[val2&tlMask]
|
||||||
br[stream].advance(uint8(v.entry))
|
br[stream].advance(uint8(v.entry))
|
||||||
br[stream2].advance(uint8(v2.entry))
|
br[stream2].advance(uint8(v2.entry))
|
||||||
buf[off+bufoff*stream] = uint8(v.entry >> 8)
|
buf[stream][off] = uint8(v.entry >> 8)
|
||||||
buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
|
buf[stream2][off] = uint8(v2.entry >> 8)
|
||||||
|
|
||||||
val = br[stream].peekBitsFast(d.actualTableLog)
|
val = br[stream].peekBitsFast(d.actualTableLog)
|
||||||
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
||||||
|
@ -767,8 +802,8 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||||
v2 = single[val2&tlMask]
|
v2 = single[val2&tlMask]
|
||||||
br[stream].advance(uint8(v.entry))
|
br[stream].advance(uint8(v.entry))
|
||||||
br[stream2].advance(uint8(v2.entry))
|
br[stream2].advance(uint8(v2.entry))
|
||||||
buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
|
buf[stream][off+1] = uint8(v.entry >> 8)
|
||||||
buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
|
buf[stream2][off+1] = uint8(v2.entry >> 8)
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -783,8 +818,8 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||||
v2 := single[val2&tlMask]
|
v2 := single[val2&tlMask]
|
||||||
br[stream].advance(uint8(v.entry))
|
br[stream].advance(uint8(v.entry))
|
||||||
br[stream2].advance(uint8(v2.entry))
|
br[stream2].advance(uint8(v2.entry))
|
||||||
buf[off+bufoff*stream] = uint8(v.entry >> 8)
|
buf[stream][off] = uint8(v.entry >> 8)
|
||||||
buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
|
buf[stream2][off] = uint8(v2.entry >> 8)
|
||||||
|
|
||||||
val = br[stream].peekBitsFast(d.actualTableLog)
|
val = br[stream].peekBitsFast(d.actualTableLog)
|
||||||
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
||||||
|
@ -792,25 +827,26 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||||
v2 = single[val2&tlMask]
|
v2 = single[val2&tlMask]
|
||||||
br[stream].advance(uint8(v.entry))
|
br[stream].advance(uint8(v.entry))
|
||||||
br[stream2].advance(uint8(v2.entry))
|
br[stream2].advance(uint8(v2.entry))
|
||||||
buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
|
buf[stream][off+1] = uint8(v.entry >> 8)
|
||||||
buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
|
buf[stream2][off+1] = uint8(v2.entry >> 8)
|
||||||
}
|
}
|
||||||
|
|
||||||
off += 2
|
off += 2
|
||||||
|
|
||||||
if off == bufoff {
|
if off == 0 {
|
||||||
if bufoff > dstEvery {
|
if bufoff > dstEvery {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 1")
|
return nil, errors.New("corruption detected: stream overrun 1")
|
||||||
}
|
}
|
||||||
copy(out, buf[:bufoff])
|
copy(out, buf[0][:])
|
||||||
copy(out[dstEvery:], buf[bufoff:bufoff*2])
|
copy(out[dstEvery:], buf[1][:])
|
||||||
copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
|
copy(out[dstEvery*2:], buf[2][:])
|
||||||
copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
|
copy(out[dstEvery*3:], buf[3][:])
|
||||||
off = 0
|
|
||||||
out = out[bufoff:]
|
out = out[bufoff:]
|
||||||
decoded += 256
|
decoded += bufoff * 4
|
||||||
// There must at least be 3 buffers left.
|
// There must at least be 3 buffers left.
|
||||||
if len(out) < dstEvery*3 {
|
if len(out) < dstEvery*3 {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 2")
|
return nil, errors.New("corruption detected: stream overrun 2")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -818,41 +854,31 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||||
if off > 0 {
|
if off > 0 {
|
||||||
ioff := int(off)
|
ioff := int(off)
|
||||||
if len(out) < dstEvery*3+ioff {
|
if len(out) < dstEvery*3+ioff {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 3")
|
return nil, errors.New("corruption detected: stream overrun 3")
|
||||||
}
|
}
|
||||||
copy(out, buf[:off])
|
copy(out, buf[0][:off])
|
||||||
copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
|
copy(out[dstEvery:], buf[1][:off])
|
||||||
copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
|
copy(out[dstEvery*2:], buf[2][:off])
|
||||||
copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
|
copy(out[dstEvery*3:], buf[3][:off])
|
||||||
decoded += int(off) * 4
|
decoded += int(off) * 4
|
||||||
out = out[off:]
|
out = out[off:]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode remaining.
|
// Decode remaining.
|
||||||
|
remainBytes := dstEvery - (decoded / 4)
|
||||||
for i := range br {
|
for i := range br {
|
||||||
offset := dstEvery * i
|
offset := dstEvery * i
|
||||||
|
endsAt := offset + remainBytes
|
||||||
|
if endsAt > len(out) {
|
||||||
|
endsAt = len(out)
|
||||||
|
}
|
||||||
br := &br[i]
|
br := &br[i]
|
||||||
bitsLeft := br.off*8 + uint(64-br.bitsRead)
|
bitsLeft := br.remaining()
|
||||||
for bitsLeft > 0 {
|
for bitsLeft > 0 {
|
||||||
br.fill()
|
br.fill()
|
||||||
if false && br.bitsRead >= 32 {
|
if offset >= endsAt {
|
||||||
if br.off >= 4 {
|
d.bufs.Put(buf)
|
||||||
v := br.in[br.off-4:]
|
|
||||||
v = v[:4]
|
|
||||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
|
||||||
br.value = (br.value << 32) | uint64(low)
|
|
||||||
br.bitsRead -= 32
|
|
||||||
br.off -= 4
|
|
||||||
} else {
|
|
||||||
for br.off > 0 {
|
|
||||||
br.value = (br.value << 8) | uint64(br.in[br.off-1])
|
|
||||||
br.bitsRead -= 8
|
|
||||||
br.off--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// end inline...
|
|
||||||
if offset >= len(out) {
|
|
||||||
return nil, errors.New("corruption detected: stream overrun 4")
|
return nil, errors.New("corruption detected: stream overrun 4")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -865,12 +891,17 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||||
out[offset] = uint8(v >> 8)
|
out[offset] = uint8(v >> 8)
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
|
if offset != endsAt {
|
||||||
|
d.bufs.Put(buf)
|
||||||
|
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
|
||||||
|
}
|
||||||
decoded += offset - dstEvery*i
|
decoded += offset - dstEvery*i
|
||||||
err = br.close()
|
err = br.close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
d.bufs.Put(buf)
|
||||||
if dstSize != decoded {
|
if dstSize != decoded {
|
||||||
return nil, errors.New("corruption detected: short output block")
|
return nil, errors.New("corruption detected: short output block")
|
||||||
}
|
}
|
||||||
|
@ -916,12 +947,12 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
single := d.dt.single[:tlSize]
|
single := d.dt.single[:tlSize]
|
||||||
|
|
||||||
// Use temp table to avoid bound checks/append penalty.
|
// Use temp table to avoid bound checks/append penalty.
|
||||||
var buf [256]byte
|
buf := d.buffer()
|
||||||
var off uint8
|
var off uint8
|
||||||
var decoded int
|
var decoded int
|
||||||
|
|
||||||
// Decode 4 values from each decoder/loop.
|
// Decode 4 values from each decoder/loop.
|
||||||
const bufoff = 256 / 4
|
const bufoff = 256
|
||||||
for {
|
for {
|
||||||
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
||||||
break
|
break
|
||||||
|
@ -942,8 +973,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
br1.value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br2.bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br2.value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream] = uint8(v >> 8)
|
buf[stream][off] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2] = uint8(v2 >> 8)
|
buf[stream2][off] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br1.value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br2.value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
|
@ -951,8 +982,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
br1.value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br2.bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br2.value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+1] = uint8(v >> 8)
|
buf[stream][off+1] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
|
buf[stream2][off+1] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br1.value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br2.value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
|
@ -960,8 +991,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
br1.value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br2.bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br2.value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+2] = uint8(v >> 8)
|
buf[stream][off+2] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
|
buf[stream2][off+2] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br1.value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br2.value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
|
@ -969,8 +1000,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
br1.value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br2.bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br2.value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
|
buf[stream][off+3] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream+3] = uint8(v >> 8)
|
buf[stream2][off+3] = uint8(v2 >> 8)
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -987,8 +1018,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
br1.value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br2.bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br2.value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream] = uint8(v >> 8)
|
buf[stream][off] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2] = uint8(v2 >> 8)
|
buf[stream2][off] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br1.value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br2.value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
|
@ -996,8 +1027,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
br1.value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br2.bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br2.value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+1] = uint8(v >> 8)
|
buf[stream][off+1] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
|
buf[stream2][off+1] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br1.value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br2.value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
|
@ -1005,8 +1036,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
br1.value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br2.bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br2.value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+2] = uint8(v >> 8)
|
buf[stream][off+2] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
|
buf[stream2][off+2] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br1.value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br2.value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
|
@ -1014,25 +1045,26 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
br1.value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br2.bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br2.value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
|
buf[stream][off+3] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream+3] = uint8(v >> 8)
|
buf[stream2][off+3] = uint8(v2 >> 8)
|
||||||
}
|
}
|
||||||
|
|
||||||
off += 4
|
off += 4
|
||||||
|
|
||||||
if off == bufoff {
|
if off == 0 {
|
||||||
if bufoff > dstEvery {
|
if bufoff > dstEvery {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 1")
|
return nil, errors.New("corruption detected: stream overrun 1")
|
||||||
}
|
}
|
||||||
copy(out, buf[:bufoff])
|
copy(out, buf[0][:])
|
||||||
copy(out[dstEvery:], buf[bufoff:bufoff*2])
|
copy(out[dstEvery:], buf[1][:])
|
||||||
copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
|
copy(out[dstEvery*2:], buf[2][:])
|
||||||
copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
|
copy(out[dstEvery*3:], buf[3][:])
|
||||||
off = 0
|
|
||||||
out = out[bufoff:]
|
out = out[bufoff:]
|
||||||
decoded += 256
|
decoded += bufoff * 4
|
||||||
// There must at least be 3 buffers left.
|
// There must at least be 3 buffers left.
|
||||||
if len(out) < dstEvery*3 {
|
if len(out) < dstEvery*3 {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 2")
|
return nil, errors.New("corruption detected: stream overrun 2")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1040,23 +1072,31 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
if off > 0 {
|
if off > 0 {
|
||||||
ioff := int(off)
|
ioff := int(off)
|
||||||
if len(out) < dstEvery*3+ioff {
|
if len(out) < dstEvery*3+ioff {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 3")
|
return nil, errors.New("corruption detected: stream overrun 3")
|
||||||
}
|
}
|
||||||
copy(out, buf[:off])
|
copy(out, buf[0][:off])
|
||||||
copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
|
copy(out[dstEvery:], buf[1][:off])
|
||||||
copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
|
copy(out[dstEvery*2:], buf[2][:off])
|
||||||
copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
|
copy(out[dstEvery*3:], buf[3][:off])
|
||||||
decoded += int(off) * 4
|
decoded += int(off) * 4
|
||||||
out = out[off:]
|
out = out[off:]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode remaining.
|
// Decode remaining.
|
||||||
|
// Decode remaining.
|
||||||
|
remainBytes := dstEvery - (decoded / 4)
|
||||||
for i := range br {
|
for i := range br {
|
||||||
offset := dstEvery * i
|
offset := dstEvery * i
|
||||||
|
endsAt := offset + remainBytes
|
||||||
|
if endsAt > len(out) {
|
||||||
|
endsAt = len(out)
|
||||||
|
}
|
||||||
br := &br[i]
|
br := &br[i]
|
||||||
bitsLeft := int(br.off*8) + int(64-br.bitsRead)
|
bitsLeft := br.remaining()
|
||||||
for bitsLeft > 0 {
|
for bitsLeft > 0 {
|
||||||
if br.finished() {
|
if br.finished() {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, io.ErrUnexpectedEOF
|
return nil, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if br.bitsRead >= 56 {
|
if br.bitsRead >= 56 {
|
||||||
|
@ -1076,7 +1116,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// end inline...
|
// end inline...
|
||||||
if offset >= len(out) {
|
if offset >= endsAt {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 4")
|
return nil, errors.New("corruption detected: stream overrun 4")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1084,16 +1125,22 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
v := single[uint8(br.value>>shift)].entry
|
v := single[uint8(br.value>>shift)].entry
|
||||||
nBits := uint8(v)
|
nBits := uint8(v)
|
||||||
br.advance(nBits)
|
br.advance(nBits)
|
||||||
bitsLeft -= int(nBits)
|
bitsLeft -= uint(nBits)
|
||||||
out[offset] = uint8(v >> 8)
|
out[offset] = uint8(v >> 8)
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
|
if offset != endsAt {
|
||||||
|
d.bufs.Put(buf)
|
||||||
|
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
|
||||||
|
}
|
||||||
decoded += offset - dstEvery*i
|
decoded += offset - dstEvery*i
|
||||||
err = br.close()
|
err = br.close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
d.bufs.Put(buf)
|
||||||
if dstSize != decoded {
|
if dstSize != decoded {
|
||||||
return nil, errors.New("corruption detected: short output block")
|
return nil, errors.New("corruption detected: short output block")
|
||||||
}
|
}
|
||||||
|
@ -1135,12 +1182,12 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||||
single := d.dt.single[:tlSize]
|
single := d.dt.single[:tlSize]
|
||||||
|
|
||||||
// Use temp table to avoid bound checks/append penalty.
|
// Use temp table to avoid bound checks/append penalty.
|
||||||
var buf [256]byte
|
buf := d.buffer()
|
||||||
var off uint8
|
var off uint8
|
||||||
var decoded int
|
var decoded int
|
||||||
|
|
||||||
// Decode 4 values from each decoder/loop.
|
// Decode 4 values from each decoder/loop.
|
||||||
const bufoff = 256 / 4
|
const bufoff = 256
|
||||||
for {
|
for {
|
||||||
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
||||||
break
|
break
|
||||||
|
@ -1150,104 +1197,109 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||||
// Interleave 2 decodes.
|
// Interleave 2 decodes.
|
||||||
const stream = 0
|
const stream = 0
|
||||||
const stream2 = 1
|
const stream2 = 1
|
||||||
br[stream].fillFast()
|
br1 := &br[stream]
|
||||||
br[stream2].fillFast()
|
br2 := &br[stream2]
|
||||||
|
br1.fillFast()
|
||||||
|
br2.fillFast()
|
||||||
|
|
||||||
v := single[uint8(br[stream].value>>shift)].entry
|
v := single[uint8(br1.value>>shift)].entry
|
||||||
v2 := single[uint8(br[stream2].value>>shift)].entry
|
v2 := single[uint8(br2.value>>shift)].entry
|
||||||
br[stream].bitsRead += uint8(v)
|
br1.bitsRead += uint8(v)
|
||||||
br[stream].value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br[stream2].bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br[stream2].value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream] = uint8(v >> 8)
|
buf[stream][off] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2] = uint8(v2 >> 8)
|
buf[stream2][off] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br[stream].value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br[stream2].value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
br[stream].bitsRead += uint8(v)
|
br1.bitsRead += uint8(v)
|
||||||
br[stream].value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br[stream2].bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br[stream2].value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+1] = uint8(v >> 8)
|
buf[stream][off+1] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
|
buf[stream2][off+1] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br[stream].value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br[stream2].value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
br[stream].bitsRead += uint8(v)
|
br1.bitsRead += uint8(v)
|
||||||
br[stream].value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br[stream2].bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br[stream2].value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+2] = uint8(v >> 8)
|
buf[stream][off+2] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
|
buf[stream2][off+2] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br[stream].value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br[stream2].value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
br[stream].bitsRead += uint8(v)
|
br1.bitsRead += uint8(v)
|
||||||
br[stream].value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br[stream2].bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br[stream2].value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+3] = uint8(v >> 8)
|
buf[stream][off+3] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
|
buf[stream2][off+3] = uint8(v2 >> 8)
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
const stream = 2
|
const stream = 2
|
||||||
const stream2 = 3
|
const stream2 = 3
|
||||||
br[stream].fillFast()
|
br1 := &br[stream]
|
||||||
br[stream2].fillFast()
|
br2 := &br[stream2]
|
||||||
|
br1.fillFast()
|
||||||
|
br2.fillFast()
|
||||||
|
|
||||||
v := single[uint8(br[stream].value>>shift)].entry
|
v := single[uint8(br1.value>>shift)].entry
|
||||||
v2 := single[uint8(br[stream2].value>>shift)].entry
|
v2 := single[uint8(br2.value>>shift)].entry
|
||||||
br[stream].bitsRead += uint8(v)
|
br1.bitsRead += uint8(v)
|
||||||
br[stream].value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br[stream2].bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br[stream2].value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream] = uint8(v >> 8)
|
buf[stream][off] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2] = uint8(v2 >> 8)
|
buf[stream2][off] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br[stream].value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br[stream2].value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
br[stream].bitsRead += uint8(v)
|
br1.bitsRead += uint8(v)
|
||||||
br[stream].value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br[stream2].bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br[stream2].value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+1] = uint8(v >> 8)
|
buf[stream][off+1] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
|
buf[stream2][off+1] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br[stream].value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br[stream2].value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
br[stream].bitsRead += uint8(v)
|
br1.bitsRead += uint8(v)
|
||||||
br[stream].value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br[stream2].bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br[stream2].value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+2] = uint8(v >> 8)
|
buf[stream][off+2] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
|
buf[stream2][off+2] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br[stream].value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br[stream2].value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
br[stream].bitsRead += uint8(v)
|
br1.bitsRead += uint8(v)
|
||||||
br[stream].value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br[stream2].bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br[stream2].value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+3] = uint8(v >> 8)
|
buf[stream][off+3] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
|
buf[stream2][off+3] = uint8(v2 >> 8)
|
||||||
}
|
}
|
||||||
|
|
||||||
off += 4
|
off += 4
|
||||||
|
|
||||||
if off == bufoff {
|
if off == 0 {
|
||||||
if bufoff > dstEvery {
|
if bufoff > dstEvery {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 1")
|
return nil, errors.New("corruption detected: stream overrun 1")
|
||||||
}
|
}
|
||||||
copy(out, buf[:bufoff])
|
copy(out, buf[0][:])
|
||||||
copy(out[dstEvery:], buf[bufoff:bufoff*2])
|
copy(out[dstEvery:], buf[1][:])
|
||||||
copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
|
copy(out[dstEvery*2:], buf[2][:])
|
||||||
copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
|
copy(out[dstEvery*3:], buf[3][:])
|
||||||
off = 0
|
|
||||||
out = out[bufoff:]
|
out = out[bufoff:]
|
||||||
decoded += 256
|
decoded += bufoff * 4
|
||||||
// There must at least be 3 buffers left.
|
// There must at least be 3 buffers left.
|
||||||
if len(out) < dstEvery*3 {
|
if len(out) < dstEvery*3 {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 2")
|
return nil, errors.New("corruption detected: stream overrun 2")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1257,21 +1309,27 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||||
if len(out) < dstEvery*3+ioff {
|
if len(out) < dstEvery*3+ioff {
|
||||||
return nil, errors.New("corruption detected: stream overrun 3")
|
return nil, errors.New("corruption detected: stream overrun 3")
|
||||||
}
|
}
|
||||||
copy(out, buf[:off])
|
copy(out, buf[0][:off])
|
||||||
copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
|
copy(out[dstEvery:], buf[1][:off])
|
||||||
copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
|
copy(out[dstEvery*2:], buf[2][:off])
|
||||||
copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
|
copy(out[dstEvery*3:], buf[3][:off])
|
||||||
decoded += int(off) * 4
|
decoded += int(off) * 4
|
||||||
out = out[off:]
|
out = out[off:]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode remaining.
|
// Decode remaining.
|
||||||
|
remainBytes := dstEvery - (decoded / 4)
|
||||||
for i := range br {
|
for i := range br {
|
||||||
offset := dstEvery * i
|
offset := dstEvery * i
|
||||||
|
endsAt := offset + remainBytes
|
||||||
|
if endsAt > len(out) {
|
||||||
|
endsAt = len(out)
|
||||||
|
}
|
||||||
br := &br[i]
|
br := &br[i]
|
||||||
bitsLeft := int(br.off*8) + int(64-br.bitsRead)
|
bitsLeft := br.remaining()
|
||||||
for bitsLeft > 0 {
|
for bitsLeft > 0 {
|
||||||
if br.finished() {
|
if br.finished() {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, io.ErrUnexpectedEOF
|
return nil, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if br.bitsRead >= 56 {
|
if br.bitsRead >= 56 {
|
||||||
|
@ -1291,7 +1349,8 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// end inline...
|
// end inline...
|
||||||
if offset >= len(out) {
|
if offset >= endsAt {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 4")
|
return nil, errors.New("corruption detected: stream overrun 4")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1299,16 +1358,23 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||||
v := single[br.peekByteFast()].entry
|
v := single[br.peekByteFast()].entry
|
||||||
nBits := uint8(v)
|
nBits := uint8(v)
|
||||||
br.advance(nBits)
|
br.advance(nBits)
|
||||||
bitsLeft -= int(nBits)
|
bitsLeft -= uint(nBits)
|
||||||
out[offset] = uint8(v >> 8)
|
out[offset] = uint8(v >> 8)
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
|
if offset != endsAt {
|
||||||
|
d.bufs.Put(buf)
|
||||||
|
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
|
||||||
|
}
|
||||||
|
|
||||||
decoded += offset - dstEvery*i
|
decoded += offset - dstEvery*i
|
||||||
err = br.close()
|
err = br.close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
d.bufs.Put(buf)
|
||||||
if dstSize != decoded {
|
if dstSize != decoded {
|
||||||
return nil, errors.New("corruption detected: short output block")
|
return nil, errors.New("corruption detected: short output block")
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"math/bits"
|
"math/bits"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/klauspost/compress/fse"
|
"github.com/klauspost/compress/fse"
|
||||||
)
|
)
|
||||||
|
@ -116,6 +117,7 @@ type Scratch struct {
|
||||||
nodes []nodeElt
|
nodes []nodeElt
|
||||||
tmpOut [4][]byte
|
tmpOut [4][]byte
|
||||||
fse *fse.Scratch
|
fse *fse.Scratch
|
||||||
|
decPool sync.Pool // *[4][256]byte buffers.
|
||||||
huffWeight [maxSymbolValue + 1]byte
|
huffWeight [maxSymbolValue + 1]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -78,6 +78,9 @@ of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is
|
||||||
in the future. So if you want to limit concurrency for future updates, specify the concurrency
|
in the future. So if you want to limit concurrency for future updates, specify the concurrency
|
||||||
you would like.
|
you would like.
|
||||||
|
|
||||||
|
If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)`
|
||||||
|
which will compress input as each block is completed, blocking on writes until each has completed.
|
||||||
|
|
||||||
You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined
|
You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined
|
||||||
compression settings can be specified.
|
compression settings can be specified.
|
||||||
|
|
||||||
|
@ -104,7 +107,8 @@ and seems to ignore concatenated streams, even though [it is part of the spec](h
|
||||||
For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`.
|
For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`.
|
||||||
|
|
||||||
`EncodeAll` will encode all input in src and append it to dst.
|
`EncodeAll` will encode all input in src and append it to dst.
|
||||||
This function can be called concurrently, but each call will only run on a single goroutine.
|
This function can be called concurrently.
|
||||||
|
Each call will only run on a same goroutine as the caller.
|
||||||
|
|
||||||
Encoded blocks can be concatenated and the result will be the combined input stream.
|
Encoded blocks can be concatenated and the result will be the combined input stream.
|
||||||
Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`.
|
Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`.
|
||||||
|
@ -283,8 +287,13 @@ func Decompress(in io.Reader, out io.Writer) error {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
It is important to use the "Close" function when you no longer need the Reader to stop running goroutines.
|
It is important to use the "Close" function when you no longer need the Reader to stop running goroutines,
|
||||||
See "Allocation-less operation" below.
|
when running with default settings.
|
||||||
|
Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream.
|
||||||
|
|
||||||
|
Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput.
|
||||||
|
However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data
|
||||||
|
as it is being requested only.
|
||||||
|
|
||||||
For decoding buffers, it could look something like this:
|
For decoding buffers, it could look something like this:
|
||||||
|
|
||||||
|
@ -293,7 +302,7 @@ import "github.com/klauspost/compress/zstd"
|
||||||
|
|
||||||
// Create a reader that caches decompressors.
|
// Create a reader that caches decompressors.
|
||||||
// For this operation type we supply a nil Reader.
|
// For this operation type we supply a nil Reader.
|
||||||
var decoder, _ = zstd.NewReader(nil)
|
var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0))
|
||||||
|
|
||||||
// Decompress a buffer. We don't supply a destination buffer,
|
// Decompress a buffer. We don't supply a destination buffer,
|
||||||
// so it will be allocated by the decoder.
|
// so it will be allocated by the decoder.
|
||||||
|
@ -304,8 +313,11 @@ func Decompress(src []byte) ([]byte, error) {
|
||||||
|
|
||||||
Both of these cases should provide the functionality needed.
|
Both of these cases should provide the functionality needed.
|
||||||
The decoder can be used for *concurrent* decompression of multiple buffers.
|
The decoder can be used for *concurrent* decompression of multiple buffers.
|
||||||
|
By default 4 decompressors will be created.
|
||||||
|
|
||||||
It will only allow a certain number of concurrent operations to run.
|
It will only allow a certain number of concurrent operations to run.
|
||||||
To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder.
|
To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder.
|
||||||
|
It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders.
|
||||||
|
|
||||||
### Dictionaries
|
### Dictionaries
|
||||||
|
|
||||||
|
@ -357,18 +369,20 @@ In this case no unneeded allocations should be made.
|
||||||
The buffer decoder does everything on the same goroutine and does nothing concurrently.
|
The buffer decoder does everything on the same goroutine and does nothing concurrently.
|
||||||
It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that.
|
It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that.
|
||||||
|
|
||||||
The stream decoder operates on
|
The stream decoder will create goroutines that:
|
||||||
|
|
||||||
* One goroutine reads input and splits the input to several block decoders.
|
1) Reads input and splits the input into blocks.
|
||||||
* A number of decoders will decode blocks.
|
2) Decompression of literals.
|
||||||
* A goroutine coordinates these blocks and sends history from one to the next.
|
3) Decompression of sequences.
|
||||||
|
4) Reconstruction of output stream.
|
||||||
|
|
||||||
So effectively this also means the decoder will "read ahead" and prepare data to always be available for output.
|
So effectively this also means the decoder will "read ahead" and prepare data to always be available for output.
|
||||||
|
|
||||||
|
The concurrency level will, for streams, determine how many blocks ahead the compression will start.
|
||||||
|
|
||||||
Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency.
|
Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency.
|
||||||
|
|
||||||
In practice this means that concurrency is often limited to utilizing about 2 cores effectively.
|
In practice this means that concurrency is often limited to utilizing about 3 cores effectively.
|
||||||
|
|
||||||
|
|
||||||
### Benchmarks
|
### Benchmarks
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@ package zstd
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/bits"
|
"math/bits"
|
||||||
)
|
)
|
||||||
|
@ -132,6 +133,9 @@ func (b *bitReader) remain() uint {
|
||||||
func (b *bitReader) close() error {
|
func (b *bitReader) close() error {
|
||||||
// Release reference.
|
// Release reference.
|
||||||
b.in = nil
|
b.in = nil
|
||||||
|
if !b.finished() {
|
||||||
|
return fmt.Errorf("%d extra bits on block, should be 0", b.remain())
|
||||||
|
}
|
||||||
if b.bitsRead > 64 {
|
if b.bitsRead > 64 {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
|
|
@ -76,16 +76,25 @@ type blockDec struct {
|
||||||
// Window size of the block.
|
// Window size of the block.
|
||||||
WindowSize uint64
|
WindowSize uint64
|
||||||
|
|
||||||
history chan *history
|
err error
|
||||||
input chan struct{}
|
|
||||||
result chan decodeOutput
|
// Check against this crc
|
||||||
err error
|
checkCRC []byte
|
||||||
decWG sync.WaitGroup
|
|
||||||
|
|
||||||
// Frame to use for singlethreaded decoding.
|
// Frame to use for singlethreaded decoding.
|
||||||
// Should not be used by the decoder itself since parent may be another frame.
|
// Should not be used by the decoder itself since parent may be another frame.
|
||||||
localFrame *frameDec
|
localFrame *frameDec
|
||||||
|
|
||||||
|
sequence []seqVals
|
||||||
|
|
||||||
|
async struct {
|
||||||
|
newHist *history
|
||||||
|
literals []byte
|
||||||
|
seqData []byte
|
||||||
|
seqSize int // Size of uncompressed sequences
|
||||||
|
fcs uint64
|
||||||
|
}
|
||||||
|
|
||||||
// Block is RLE, this is the size.
|
// Block is RLE, this is the size.
|
||||||
RLESize uint32
|
RLESize uint32
|
||||||
tmp [4]byte
|
tmp [4]byte
|
||||||
|
@ -108,13 +117,8 @@ func (b *blockDec) String() string {
|
||||||
|
|
||||||
func newBlockDec(lowMem bool) *blockDec {
|
func newBlockDec(lowMem bool) *blockDec {
|
||||||
b := blockDec{
|
b := blockDec{
|
||||||
lowMem: lowMem,
|
lowMem: lowMem,
|
||||||
result: make(chan decodeOutput, 1),
|
|
||||||
input: make(chan struct{}, 1),
|
|
||||||
history: make(chan *history, 1),
|
|
||||||
}
|
}
|
||||||
b.decWG.Add(1)
|
|
||||||
go b.startDecoder()
|
|
||||||
return &b
|
return &b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,6 +141,12 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
|
||||||
case blockTypeReserved:
|
case blockTypeReserved:
|
||||||
return ErrReservedBlockType
|
return ErrReservedBlockType
|
||||||
case blockTypeRLE:
|
case blockTypeRLE:
|
||||||
|
if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
|
||||||
|
if debugDecoder {
|
||||||
|
printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b)
|
||||||
|
}
|
||||||
|
return ErrWindowSizeExceeded
|
||||||
|
}
|
||||||
b.RLESize = uint32(cSize)
|
b.RLESize = uint32(cSize)
|
||||||
if b.lowMem {
|
if b.lowMem {
|
||||||
maxSize = cSize
|
maxSize = cSize
|
||||||
|
@ -158,6 +168,13 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
|
||||||
return ErrCompressedSizeTooBig
|
return ErrCompressedSizeTooBig
|
||||||
}
|
}
|
||||||
case blockTypeRaw:
|
case blockTypeRaw:
|
||||||
|
if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
|
||||||
|
if debugDecoder {
|
||||||
|
printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b)
|
||||||
|
}
|
||||||
|
return ErrWindowSizeExceeded
|
||||||
|
}
|
||||||
|
|
||||||
b.RLESize = 0
|
b.RLESize = 0
|
||||||
// We do not need a destination for raw blocks.
|
// We do not need a destination for raw blocks.
|
||||||
maxSize = -1
|
maxSize = -1
|
||||||
|
@ -192,85 +209,14 @@ func (b *blockDec) sendErr(err error) {
|
||||||
b.Last = true
|
b.Last = true
|
||||||
b.Type = blockTypeReserved
|
b.Type = blockTypeReserved
|
||||||
b.err = err
|
b.err = err
|
||||||
b.input <- struct{}{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close will release resources.
|
// Close will release resources.
|
||||||
// Closed blockDec cannot be reset.
|
// Closed blockDec cannot be reset.
|
||||||
func (b *blockDec) Close() {
|
func (b *blockDec) Close() {
|
||||||
close(b.input)
|
|
||||||
close(b.history)
|
|
||||||
close(b.result)
|
|
||||||
b.decWG.Wait()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// decodeAsync will prepare decoding the block when it receives input.
|
// decodeBuf
|
||||||
// This will separate output and history.
|
|
||||||
func (b *blockDec) startDecoder() {
|
|
||||||
defer b.decWG.Done()
|
|
||||||
for range b.input {
|
|
||||||
//println("blockDec: Got block input")
|
|
||||||
switch b.Type {
|
|
||||||
case blockTypeRLE:
|
|
||||||
if cap(b.dst) < int(b.RLESize) {
|
|
||||||
if b.lowMem {
|
|
||||||
b.dst = make([]byte, b.RLESize)
|
|
||||||
} else {
|
|
||||||
b.dst = make([]byte, maxBlockSize)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
o := decodeOutput{
|
|
||||||
d: b,
|
|
||||||
b: b.dst[:b.RLESize],
|
|
||||||
err: nil,
|
|
||||||
}
|
|
||||||
v := b.data[0]
|
|
||||||
for i := range o.b {
|
|
||||||
o.b[i] = v
|
|
||||||
}
|
|
||||||
hist := <-b.history
|
|
||||||
hist.append(o.b)
|
|
||||||
b.result <- o
|
|
||||||
case blockTypeRaw:
|
|
||||||
o := decodeOutput{
|
|
||||||
d: b,
|
|
||||||
b: b.data,
|
|
||||||
err: nil,
|
|
||||||
}
|
|
||||||
hist := <-b.history
|
|
||||||
hist.append(o.b)
|
|
||||||
b.result <- o
|
|
||||||
case blockTypeCompressed:
|
|
||||||
b.dst = b.dst[:0]
|
|
||||||
err := b.decodeCompressed(nil)
|
|
||||||
o := decodeOutput{
|
|
||||||
d: b,
|
|
||||||
b: b.dst,
|
|
||||||
err: err,
|
|
||||||
}
|
|
||||||
if debugDecoder {
|
|
||||||
println("Decompressed to", len(b.dst), "bytes, error:", err)
|
|
||||||
}
|
|
||||||
b.result <- o
|
|
||||||
case blockTypeReserved:
|
|
||||||
// Used for returning errors.
|
|
||||||
<-b.history
|
|
||||||
b.result <- decodeOutput{
|
|
||||||
d: b,
|
|
||||||
b: nil,
|
|
||||||
err: b.err,
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
panic("Invalid block type")
|
|
||||||
}
|
|
||||||
if debugDecoder {
|
|
||||||
println("blockDec: Finished block")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// decodeAsync will prepare decoding the block when it receives the history.
|
|
||||||
// If history is provided, it will not fetch it from the channel.
|
|
||||||
func (b *blockDec) decodeBuf(hist *history) error {
|
func (b *blockDec) decodeBuf(hist *history) error {
|
||||||
switch b.Type {
|
switch b.Type {
|
||||||
case blockTypeRLE:
|
case blockTypeRLE:
|
||||||
|
@ -293,14 +239,23 @@ func (b *blockDec) decodeBuf(hist *history) error {
|
||||||
return nil
|
return nil
|
||||||
case blockTypeCompressed:
|
case blockTypeCompressed:
|
||||||
saved := b.dst
|
saved := b.dst
|
||||||
b.dst = hist.b
|
// Append directly to history
|
||||||
hist.b = nil
|
if hist.ignoreBuffer == 0 {
|
||||||
|
b.dst = hist.b
|
||||||
|
hist.b = nil
|
||||||
|
} else {
|
||||||
|
b.dst = b.dst[:0]
|
||||||
|
}
|
||||||
err := b.decodeCompressed(hist)
|
err := b.decodeCompressed(hist)
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
|
println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
|
||||||
}
|
}
|
||||||
hist.b = b.dst
|
if hist.ignoreBuffer == 0 {
|
||||||
b.dst = saved
|
hist.b = b.dst
|
||||||
|
b.dst = saved
|
||||||
|
} else {
|
||||||
|
hist.appendKeep(b.dst)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
case blockTypeReserved:
|
case blockTypeReserved:
|
||||||
// Used for returning errors.
|
// Used for returning errors.
|
||||||
|
@ -310,30 +265,18 @@ func (b *blockDec) decodeBuf(hist *history) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// decodeCompressed will start decompressing a block.
|
func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) {
|
||||||
// If no history is supplied the decoder will decodeAsync as much as possible
|
|
||||||
// before fetching from blockDec.history
|
|
||||||
func (b *blockDec) decodeCompressed(hist *history) error {
|
|
||||||
in := b.data
|
|
||||||
delayedHistory := hist == nil
|
|
||||||
|
|
||||||
if delayedHistory {
|
|
||||||
// We must always grab history.
|
|
||||||
defer func() {
|
|
||||||
if hist == nil {
|
|
||||||
<-b.history
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
// There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header
|
// There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header
|
||||||
if len(in) < 2 {
|
if len(in) < 2 {
|
||||||
return ErrBlockTooSmall
|
return in, ErrBlockTooSmall
|
||||||
}
|
}
|
||||||
|
|
||||||
litType := literalsBlockType(in[0] & 3)
|
litType := literalsBlockType(in[0] & 3)
|
||||||
var litRegenSize int
|
var litRegenSize int
|
||||||
var litCompSize int
|
var litCompSize int
|
||||||
sizeFormat := (in[0] >> 2) & 3
|
sizeFormat := (in[0] >> 2) & 3
|
||||||
var fourStreams bool
|
var fourStreams bool
|
||||||
|
var literals []byte
|
||||||
switch litType {
|
switch litType {
|
||||||
case literalsBlockRaw, literalsBlockRLE:
|
case literalsBlockRaw, literalsBlockRLE:
|
||||||
switch sizeFormat {
|
switch sizeFormat {
|
||||||
|
@ -349,7 +292,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
// Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes.
|
// Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes.
|
||||||
if len(in) < 3 {
|
if len(in) < 3 {
|
||||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
|
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
|
||||||
return ErrBlockTooSmall
|
return in, ErrBlockTooSmall
|
||||||
}
|
}
|
||||||
litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12)
|
litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12)
|
||||||
in = in[3:]
|
in = in[3:]
|
||||||
|
@ -360,7 +303,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
// Both Regenerated_Size and Compressed_Size use 10 bits (0-1023).
|
// Both Regenerated_Size and Compressed_Size use 10 bits (0-1023).
|
||||||
if len(in) < 3 {
|
if len(in) < 3 {
|
||||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
|
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
|
||||||
return ErrBlockTooSmall
|
return in, ErrBlockTooSmall
|
||||||
}
|
}
|
||||||
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12)
|
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12)
|
||||||
litRegenSize = int(n & 1023)
|
litRegenSize = int(n & 1023)
|
||||||
|
@ -371,7 +314,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
fourStreams = true
|
fourStreams = true
|
||||||
if len(in) < 4 {
|
if len(in) < 4 {
|
||||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
|
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
|
||||||
return ErrBlockTooSmall
|
return in, ErrBlockTooSmall
|
||||||
}
|
}
|
||||||
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20)
|
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20)
|
||||||
litRegenSize = int(n & 16383)
|
litRegenSize = int(n & 16383)
|
||||||
|
@ -381,7 +324,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
fourStreams = true
|
fourStreams = true
|
||||||
if len(in) < 5 {
|
if len(in) < 5 {
|
||||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
|
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
|
||||||
return ErrBlockTooSmall
|
return in, ErrBlockTooSmall
|
||||||
}
|
}
|
||||||
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28)
|
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28)
|
||||||
litRegenSize = int(n & 262143)
|
litRegenSize = int(n & 262143)
|
||||||
|
@ -392,13 +335,15 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
|
println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
|
||||||
}
|
}
|
||||||
var literals []byte
|
if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize {
|
||||||
var huff *huff0.Scratch
|
return in, ErrWindowSizeExceeded
|
||||||
|
}
|
||||||
|
|
||||||
switch litType {
|
switch litType {
|
||||||
case literalsBlockRaw:
|
case literalsBlockRaw:
|
||||||
if len(in) < litRegenSize {
|
if len(in) < litRegenSize {
|
||||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize)
|
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize)
|
||||||
return ErrBlockTooSmall
|
return in, ErrBlockTooSmall
|
||||||
}
|
}
|
||||||
literals = in[:litRegenSize]
|
literals = in[:litRegenSize]
|
||||||
in = in[litRegenSize:]
|
in = in[litRegenSize:]
|
||||||
|
@ -406,7 +351,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
case literalsBlockRLE:
|
case literalsBlockRLE:
|
||||||
if len(in) < 1 {
|
if len(in) < 1 {
|
||||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1)
|
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1)
|
||||||
return ErrBlockTooSmall
|
return in, ErrBlockTooSmall
|
||||||
}
|
}
|
||||||
if cap(b.literalBuf) < litRegenSize {
|
if cap(b.literalBuf) < litRegenSize {
|
||||||
if b.lowMem {
|
if b.lowMem {
|
||||||
|
@ -417,7 +362,6 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
b.literalBuf = make([]byte, litRegenSize)
|
b.literalBuf = make([]byte, litRegenSize)
|
||||||
} else {
|
} else {
|
||||||
b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
|
b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -433,7 +377,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
case literalsBlockTreeless:
|
case literalsBlockTreeless:
|
||||||
if len(in) < litCompSize {
|
if len(in) < litCompSize {
|
||||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
|
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
|
||||||
return ErrBlockTooSmall
|
return in, ErrBlockTooSmall
|
||||||
}
|
}
|
||||||
// Store compressed literals, so we defer decoding until we get history.
|
// Store compressed literals, so we defer decoding until we get history.
|
||||||
literals = in[:litCompSize]
|
literals = in[:litCompSize]
|
||||||
|
@ -441,15 +385,10 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
printf("Found %d compressed literals\n", litCompSize)
|
printf("Found %d compressed literals\n", litCompSize)
|
||||||
}
|
}
|
||||||
case literalsBlockCompressed:
|
huff := hist.huffTree
|
||||||
if len(in) < litCompSize {
|
if huff == nil {
|
||||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
|
return in, errors.New("literal block was treeless, but no history was defined")
|
||||||
return ErrBlockTooSmall
|
|
||||||
}
|
}
|
||||||
literals = in[:litCompSize]
|
|
||||||
in = in[litCompSize:]
|
|
||||||
huff = huffDecoderPool.Get().(*huff0.Scratch)
|
|
||||||
var err error
|
|
||||||
// Ensure we have space to store it.
|
// Ensure we have space to store it.
|
||||||
if cap(b.literalBuf) < litRegenSize {
|
if cap(b.literalBuf) < litRegenSize {
|
||||||
if b.lowMem {
|
if b.lowMem {
|
||||||
|
@ -458,14 +397,53 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
|
b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if huff == nil {
|
var err error
|
||||||
huff = &huff0.Scratch{}
|
// Use our out buffer.
|
||||||
|
huff.MaxDecodedSize = maxCompressedBlockSize
|
||||||
|
if fourStreams {
|
||||||
|
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
|
||||||
|
} else {
|
||||||
|
literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
|
||||||
}
|
}
|
||||||
|
// Make sure we don't leak our literals buffer
|
||||||
|
if err != nil {
|
||||||
|
println("decompressing literals:", err)
|
||||||
|
return in, err
|
||||||
|
}
|
||||||
|
if len(literals) != litRegenSize {
|
||||||
|
return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
|
||||||
|
}
|
||||||
|
|
||||||
|
case literalsBlockCompressed:
|
||||||
|
if len(in) < litCompSize {
|
||||||
|
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
|
||||||
|
return in, ErrBlockTooSmall
|
||||||
|
}
|
||||||
|
literals = in[:litCompSize]
|
||||||
|
in = in[litCompSize:]
|
||||||
|
// Ensure we have space to store it.
|
||||||
|
if cap(b.literalBuf) < litRegenSize {
|
||||||
|
if b.lowMem {
|
||||||
|
b.literalBuf = make([]byte, 0, litRegenSize)
|
||||||
|
} else {
|
||||||
|
b.literalBuf = make([]byte, 0, maxCompressedBlockSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
huff := hist.huffTree
|
||||||
|
if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) {
|
||||||
|
huff = huffDecoderPool.Get().(*huff0.Scratch)
|
||||||
|
if huff == nil {
|
||||||
|
huff = &huff0.Scratch{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var err error
|
||||||
huff, literals, err = huff0.ReadTable(literals, huff)
|
huff, literals, err = huff0.ReadTable(literals, huff)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
println("reading huffman table:", err)
|
println("reading huffman table:", err)
|
||||||
return err
|
return in, err
|
||||||
}
|
}
|
||||||
|
hist.huffTree = huff
|
||||||
|
huff.MaxDecodedSize = maxCompressedBlockSize
|
||||||
// Use our out buffer.
|
// Use our out buffer.
|
||||||
if fourStreams {
|
if fourStreams {
|
||||||
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
|
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
|
||||||
|
@ -474,24 +452,52 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
println("decoding compressed literals:", err)
|
println("decoding compressed literals:", err)
|
||||||
return err
|
return in, err
|
||||||
}
|
}
|
||||||
// Make sure we don't leak our literals buffer
|
// Make sure we don't leak our literals buffer
|
||||||
if len(literals) != litRegenSize {
|
if len(literals) != litRegenSize {
|
||||||
return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
|
return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
|
||||||
}
|
}
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
|
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
hist.decoders.literals = literals
|
||||||
|
return in, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeCompressed will start decompressing a block.
|
||||||
|
func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
|
in := b.data
|
||||||
|
in, err := b.decodeLiterals(in, hist)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = b.prepareSequences(in, hist)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if hist.decoders.nSeqs == 0 {
|
||||||
|
b.dst = append(b.dst, hist.decoders.literals...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err = hist.decoders.decodeSync(hist)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b.dst = hist.decoders.out
|
||||||
|
hist.recentOffsets = hist.decoders.prevOffset
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
|
||||||
// Decode Sequences
|
// Decode Sequences
|
||||||
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
|
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
|
||||||
if len(in) < 1 {
|
if len(in) < 1 {
|
||||||
return ErrBlockTooSmall
|
return ErrBlockTooSmall
|
||||||
}
|
}
|
||||||
|
var nSeqs int
|
||||||
seqHeader := in[0]
|
seqHeader := in[0]
|
||||||
nSeqs := 0
|
|
||||||
switch {
|
switch {
|
||||||
case seqHeader == 0:
|
case seqHeader == 0:
|
||||||
in = in[1:]
|
in = in[1:]
|
||||||
|
@ -512,7 +518,8 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
in = in[3:]
|
in = in[3:]
|
||||||
}
|
}
|
||||||
|
|
||||||
var seqs = &sequenceDecs{}
|
var seqs = &hist.decoders
|
||||||
|
seqs.nSeqs = nSeqs
|
||||||
if nSeqs > 0 {
|
if nSeqs > 0 {
|
||||||
if len(in) < 1 {
|
if len(in) < 1 {
|
||||||
return ErrBlockTooSmall
|
return ErrBlockTooSmall
|
||||||
|
@ -541,6 +548,9 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
}
|
}
|
||||||
switch mode {
|
switch mode {
|
||||||
case compModePredefined:
|
case compModePredefined:
|
||||||
|
if seq.fse != nil && !seq.fse.preDefined {
|
||||||
|
fseDecoderPool.Put(seq.fse)
|
||||||
|
}
|
||||||
seq.fse = &fsePredef[i]
|
seq.fse = &fsePredef[i]
|
||||||
case compModeRLE:
|
case compModeRLE:
|
||||||
if br.remain() < 1 {
|
if br.remain() < 1 {
|
||||||
|
@ -548,34 +558,36 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
}
|
}
|
||||||
v := br.Uint8()
|
v := br.Uint8()
|
||||||
br.advance(1)
|
br.advance(1)
|
||||||
dec := fseDecoderPool.Get().(*fseDecoder)
|
if seq.fse == nil || seq.fse.preDefined {
|
||||||
|
seq.fse = fseDecoderPool.Get().(*fseDecoder)
|
||||||
|
}
|
||||||
symb, err := decSymbolValue(v, symbolTableX[i])
|
symb, err := decSymbolValue(v, symbolTableX[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
printf("RLE Transform table (%v) error: %v", tableIndex(i), err)
|
printf("RLE Transform table (%v) error: %v", tableIndex(i), err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
dec.setRLE(symb)
|
seq.fse.setRLE(symb)
|
||||||
seq.fse = dec
|
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
printf("RLE set to %+v, code: %v", symb, v)
|
printf("RLE set to %+v, code: %v", symb, v)
|
||||||
}
|
}
|
||||||
case compModeFSE:
|
case compModeFSE:
|
||||||
println("Reading table for", tableIndex(i))
|
println("Reading table for", tableIndex(i))
|
||||||
dec := fseDecoderPool.Get().(*fseDecoder)
|
if seq.fse == nil || seq.fse.preDefined {
|
||||||
err := dec.readNCount(&br, uint16(maxTableSymbol[i]))
|
seq.fse = fseDecoderPool.Get().(*fseDecoder)
|
||||||
|
}
|
||||||
|
err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i]))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
println("Read table error:", err)
|
println("Read table error:", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = dec.transform(symbolTableX[i])
|
err = seq.fse.transform(symbolTableX[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
println("Transform table error:", err)
|
println("Transform table error:", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("Read table ok", "symbolLen:", dec.symbolLen)
|
println("Read table ok", "symbolLen:", seq.fse.symbolLen)
|
||||||
}
|
}
|
||||||
seq.fse = dec
|
|
||||||
case compModeRepeat:
|
case compModeRepeat:
|
||||||
seq.repeat = true
|
seq.repeat = true
|
||||||
}
|
}
|
||||||
|
@ -585,140 +597,88 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
}
|
}
|
||||||
in = br.unread()
|
in = br.unread()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for history.
|
|
||||||
// All time spent after this is critical since it is strictly sequential.
|
|
||||||
if hist == nil {
|
|
||||||
hist = <-b.history
|
|
||||||
if hist.error {
|
|
||||||
return ErrDecoderClosed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode treeless literal block.
|
|
||||||
if litType == literalsBlockTreeless {
|
|
||||||
// TODO: We could send the history early WITHOUT the stream history.
|
|
||||||
// This would allow decoding treeless literals before the byte history is available.
|
|
||||||
// Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless.
|
|
||||||
// So not much obvious gain here.
|
|
||||||
|
|
||||||
if hist.huffTree == nil {
|
|
||||||
return errors.New("literal block was treeless, but no history was defined")
|
|
||||||
}
|
|
||||||
// Ensure we have space to store it.
|
|
||||||
if cap(b.literalBuf) < litRegenSize {
|
|
||||||
if b.lowMem {
|
|
||||||
b.literalBuf = make([]byte, 0, litRegenSize)
|
|
||||||
} else {
|
|
||||||
b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
// Use our out buffer.
|
|
||||||
huff = hist.huffTree
|
|
||||||
if fourStreams {
|
|
||||||
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
|
|
||||||
} else {
|
|
||||||
literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
|
|
||||||
}
|
|
||||||
// Make sure we don't leak our literals buffer
|
|
||||||
if err != nil {
|
|
||||||
println("decompressing literals:", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(literals) != litRegenSize {
|
|
||||||
return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if hist.huffTree != nil && huff != nil {
|
|
||||||
if hist.dict == nil || hist.dict.litEnc != hist.huffTree {
|
|
||||||
huffDecoderPool.Put(hist.huffTree)
|
|
||||||
}
|
|
||||||
hist.huffTree = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if huff != nil {
|
|
||||||
hist.huffTree = huff
|
|
||||||
}
|
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.")
|
println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.")
|
||||||
}
|
}
|
||||||
|
|
||||||
if nSeqs == 0 {
|
if nSeqs == 0 {
|
||||||
// Decompressed content is defined entirely as Literals Section content.
|
if len(b.sequence) > 0 {
|
||||||
b.dst = append(b.dst, literals...)
|
b.sequence = b.sequence[:0]
|
||||||
if delayedHistory {
|
|
||||||
hist.append(literals)
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
br := seqs.br
|
||||||
seqs, err := seqs.mergeHistory(&hist.decoders)
|
if br == nil {
|
||||||
if err != nil {
|
br = &bitReader{}
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
if debugDecoder {
|
|
||||||
println("History merged ok")
|
|
||||||
}
|
|
||||||
br := &bitReader{}
|
|
||||||
if err := br.init(in); err != nil {
|
if err := br.init(in); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Investigate if sending history without decoders are faster.
|
if err := seqs.initialize(br, hist, b.dst); err != nil {
|
||||||
// This would allow the sequences to be decoded async and only have to construct stream history.
|
println("initializing sequences:", err)
|
||||||
// If only recent offsets were not transferred, this would be an obvious win.
|
return err
|
||||||
// Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded.
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blockDec) decodeSequences(hist *history) error {
|
||||||
|
if cap(b.sequence) < hist.decoders.nSeqs {
|
||||||
|
if b.lowMem {
|
||||||
|
b.sequence = make([]seqVals, 0, hist.decoders.nSeqs)
|
||||||
|
} else {
|
||||||
|
b.sequence = make([]seqVals, 0, 0x7F00+0xffff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.sequence = b.sequence[:hist.decoders.nSeqs]
|
||||||
|
if hist.decoders.nSeqs == 0 {
|
||||||
|
hist.decoders.seqSize = len(hist.decoders.literals)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
hist.decoders.prevOffset = hist.recentOffsets
|
||||||
|
err := hist.decoders.decode(b.sequence)
|
||||||
|
hist.recentOffsets = hist.decoders.prevOffset
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blockDec) executeSequences(hist *history) error {
|
||||||
hbytes := hist.b
|
hbytes := hist.b
|
||||||
if len(hbytes) > hist.windowSize {
|
if len(hbytes) > hist.windowSize {
|
||||||
hbytes = hbytes[len(hbytes)-hist.windowSize:]
|
hbytes = hbytes[len(hbytes)-hist.windowSize:]
|
||||||
// We do not need history any more.
|
// We do not need history anymore.
|
||||||
if hist.dict != nil {
|
if hist.dict != nil {
|
||||||
hist.dict.content = nil
|
hist.dict.content = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
hist.decoders.windowSize = hist.windowSize
|
||||||
if err := seqs.initialize(br, hist, literals, b.dst); err != nil {
|
hist.decoders.out = b.dst[:0]
|
||||||
println("initializing sequences:", err)
|
err := hist.decoders.execute(b.sequence, hbytes)
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = seqs.decode(nSeqs, br, hbytes)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !br.finished() {
|
return b.updateHistory(hist)
|
||||||
return fmt.Errorf("%d extra bits on block, should be 0", br.remain())
|
}
|
||||||
}
|
|
||||||
|
|
||||||
err = br.close()
|
func (b *blockDec) updateHistory(hist *history) error {
|
||||||
if err != nil {
|
|
||||||
printf("Closing sequences: %v, %+v\n", err, *br)
|
|
||||||
}
|
|
||||||
if len(b.data) > maxCompressedBlockSize {
|
if len(b.data) > maxCompressedBlockSize {
|
||||||
return fmt.Errorf("compressed block size too large (%d)", len(b.data))
|
return fmt.Errorf("compressed block size too large (%d)", len(b.data))
|
||||||
}
|
}
|
||||||
// Set output and release references.
|
// Set output and release references.
|
||||||
b.dst = seqs.out
|
b.dst = hist.decoders.out
|
||||||
seqs.out, seqs.literals, seqs.hist = nil, nil, nil
|
hist.recentOffsets = hist.decoders.prevOffset
|
||||||
|
|
||||||
if !delayedHistory {
|
|
||||||
// If we don't have delayed history, no need to update.
|
|
||||||
hist.recentOffsets = seqs.prevOffset
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if b.Last {
|
if b.Last {
|
||||||
// if last block we don't care about history.
|
// if last block we don't care about history.
|
||||||
println("Last block, no history returned")
|
println("Last block, no history returned")
|
||||||
hist.b = hist.b[:0]
|
hist.b = hist.b[:0]
|
||||||
return nil
|
return nil
|
||||||
|
} else {
|
||||||
|
hist.append(b.dst)
|
||||||
|
if debugDecoder {
|
||||||
|
println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
hist.append(b.dst)
|
hist.decoders.out, hist.decoders.literals = nil, nil
|
||||||
hist.recentOffsets = seqs.prevOffset
|
|
||||||
if debugDecoder {
|
|
||||||
println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -113,6 +113,9 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
|
||||||
func (r *readerWrapper) readByte() (byte, error) {
|
func (r *readerWrapper) readByte() (byte, error) {
|
||||||
n2, err := r.r.Read(r.tmp[:1])
|
n2, err := r.r.Read(r.tmp[:1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if n2 != 1 {
|
if n2 != 1 {
|
||||||
|
|
|
@ -5,9 +5,13 @@
|
||||||
package zstd
|
package zstd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/klauspost/compress/zstd/internal/xxhash"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Decoder provides decoding of zstandard streams.
|
// Decoder provides decoding of zstandard streams.
|
||||||
|
@ -22,12 +26,19 @@ type Decoder struct {
|
||||||
// Unreferenced decoders, ready for use.
|
// Unreferenced decoders, ready for use.
|
||||||
decoders chan *blockDec
|
decoders chan *blockDec
|
||||||
|
|
||||||
// Streams ready to be decoded.
|
|
||||||
stream chan decodeStream
|
|
||||||
|
|
||||||
// Current read position used for Reader functionality.
|
// Current read position used for Reader functionality.
|
||||||
current decoderState
|
current decoderState
|
||||||
|
|
||||||
|
// sync stream decoding
|
||||||
|
syncStream struct {
|
||||||
|
decodedFrame uint64
|
||||||
|
br readerWrapper
|
||||||
|
enabled bool
|
||||||
|
inFrame bool
|
||||||
|
}
|
||||||
|
|
||||||
|
frame *frameDec
|
||||||
|
|
||||||
// Custom dictionaries.
|
// Custom dictionaries.
|
||||||
// Always uses copies.
|
// Always uses copies.
|
||||||
dicts map[uint32]dict
|
dicts map[uint32]dict
|
||||||
|
@ -46,7 +57,10 @@ type decoderState struct {
|
||||||
output chan decodeOutput
|
output chan decodeOutput
|
||||||
|
|
||||||
// cancel remaining output.
|
// cancel remaining output.
|
||||||
cancel chan struct{}
|
cancel context.CancelFunc
|
||||||
|
|
||||||
|
// crc of current frame
|
||||||
|
crc *xxhash.Digest
|
||||||
|
|
||||||
flushed bool
|
flushed bool
|
||||||
}
|
}
|
||||||
|
@ -81,7 +95,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
d.current.output = make(chan decodeOutput, d.o.concurrent)
|
d.current.crc = xxhash.New()
|
||||||
d.current.flushed = true
|
d.current.flushed = true
|
||||||
|
|
||||||
if r == nil {
|
if r == nil {
|
||||||
|
@ -130,7 +144,7 @@ func (d *Decoder) Read(p []byte) (int, error) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if !d.nextBlock(n == 0) {
|
if !d.nextBlock(n == 0) {
|
||||||
return n, nil
|
return n, d.current.err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -162,6 +176,7 @@ func (d *Decoder) Reset(r io.Reader) error {
|
||||||
|
|
||||||
d.drainOutput()
|
d.drainOutput()
|
||||||
|
|
||||||
|
d.syncStream.br.r = nil
|
||||||
if r == nil {
|
if r == nil {
|
||||||
d.current.err = ErrDecoderNilInput
|
d.current.err = ErrDecoderNilInput
|
||||||
if len(d.current.b) > 0 {
|
if len(d.current.b) > 0 {
|
||||||
|
@ -195,33 +210,39 @@ func (d *Decoder) Reset(r io.Reader) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.stream == nil {
|
|
||||||
d.stream = make(chan decodeStream, 1)
|
|
||||||
d.streamWg.Add(1)
|
|
||||||
go d.startStreamDecoder(d.stream)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove current block.
|
// Remove current block.
|
||||||
|
d.stashDecoder()
|
||||||
d.current.decodeOutput = decodeOutput{}
|
d.current.decodeOutput = decodeOutput{}
|
||||||
d.current.err = nil
|
d.current.err = nil
|
||||||
d.current.cancel = make(chan struct{})
|
|
||||||
d.current.flushed = false
|
d.current.flushed = false
|
||||||
d.current.d = nil
|
d.current.d = nil
|
||||||
|
|
||||||
d.stream <- decodeStream{
|
// Ensure no-one else is still running...
|
||||||
r: r,
|
d.streamWg.Wait()
|
||||||
output: d.current.output,
|
if d.frame == nil {
|
||||||
cancel: d.current.cancel,
|
d.frame = newFrameDec(d.o)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.o.concurrent == 1 {
|
||||||
|
return d.startSyncDecoder(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.current.output = make(chan decodeOutput, d.o.concurrent)
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
d.current.cancel = cancel
|
||||||
|
d.streamWg.Add(1)
|
||||||
|
go d.startStreamDecoder(ctx, r, d.current.output)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// drainOutput will drain the output until errEndOfStream is sent.
|
// drainOutput will drain the output until errEndOfStream is sent.
|
||||||
func (d *Decoder) drainOutput() {
|
func (d *Decoder) drainOutput() {
|
||||||
if d.current.cancel != nil {
|
if d.current.cancel != nil {
|
||||||
println("cancelling current")
|
if debugDecoder {
|
||||||
close(d.current.cancel)
|
println("cancelling current")
|
||||||
|
}
|
||||||
|
d.current.cancel()
|
||||||
d.current.cancel = nil
|
d.current.cancel = nil
|
||||||
}
|
}
|
||||||
if d.current.d != nil {
|
if d.current.d != nil {
|
||||||
|
@ -243,12 +264,9 @@ func (d *Decoder) drainOutput() {
|
||||||
}
|
}
|
||||||
d.decoders <- v.d
|
d.decoders <- v.d
|
||||||
}
|
}
|
||||||
if v.err == errEndOfStream {
|
|
||||||
println("current flushed")
|
|
||||||
d.current.flushed = true
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
d.current.output = nil
|
||||||
|
d.current.flushed = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteTo writes data to w until there's no more data to write or when an error occurs.
|
// WriteTo writes data to w until there's no more data to write or when an error occurs.
|
||||||
|
@ -287,7 +305,7 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
|
||||||
// DecodeAll can be used concurrently.
|
// DecodeAll can be used concurrently.
|
||||||
// The Decoder concurrency limits will be respected.
|
// The Decoder concurrency limits will be respected.
|
||||||
func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||||
if d.current.err == ErrDecoderClosed {
|
if d.decoders == nil {
|
||||||
return dst, ErrDecoderClosed
|
return dst, ErrDecoderClosed
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -300,6 +318,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
frame.rawInput = nil
|
frame.rawInput = nil
|
||||||
frame.bBuf = nil
|
frame.bBuf = nil
|
||||||
|
if frame.history.decoders.br != nil {
|
||||||
|
frame.history.decoders.br.in = nil
|
||||||
|
}
|
||||||
d.decoders <- block
|
d.decoders <- block
|
||||||
}()
|
}()
|
||||||
frame.bBuf = input
|
frame.bBuf = input
|
||||||
|
@ -307,27 +328,31 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||||
for {
|
for {
|
||||||
frame.history.reset()
|
frame.history.reset()
|
||||||
err := frame.reset(&frame.bBuf)
|
err := frame.reset(&frame.bBuf)
|
||||||
if err == io.EOF {
|
if err != nil {
|
||||||
if debugDecoder {
|
if err == io.EOF {
|
||||||
println("frame reset return EOF")
|
if debugDecoder {
|
||||||
|
println("frame reset return EOF")
|
||||||
|
}
|
||||||
|
return dst, nil
|
||||||
}
|
}
|
||||||
return dst, nil
|
return dst, err
|
||||||
}
|
}
|
||||||
if frame.DictionaryID != nil {
|
if frame.DictionaryID != nil {
|
||||||
dict, ok := d.dicts[*frame.DictionaryID]
|
dict, ok := d.dicts[*frame.DictionaryID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, ErrUnknownDictionary
|
return nil, ErrUnknownDictionary
|
||||||
}
|
}
|
||||||
|
if debugDecoder {
|
||||||
|
println("setting dict", frame.DictionaryID)
|
||||||
|
}
|
||||||
frame.history.setDict(&dict)
|
frame.history.setDict(&dict)
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
return dst, err
|
|
||||||
}
|
|
||||||
if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
|
if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
|
||||||
return dst, ErrDecoderSizeExceeded
|
return dst, ErrDecoderSizeExceeded
|
||||||
}
|
}
|
||||||
if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 {
|
if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 {
|
||||||
// Never preallocate moe than 1 GB up front.
|
// Never preallocate more than 1 GB up front.
|
||||||
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
|
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
|
||||||
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
|
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
|
||||||
copy(dst2, dst)
|
copy(dst2, dst)
|
||||||
|
@ -368,6 +393,161 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||||
// If non-blocking mode is used the returned boolean will be false
|
// If non-blocking mode is used the returned boolean will be false
|
||||||
// if no data was available without blocking.
|
// if no data was available without blocking.
|
||||||
func (d *Decoder) nextBlock(blocking bool) (ok bool) {
|
func (d *Decoder) nextBlock(blocking bool) (ok bool) {
|
||||||
|
if d.current.err != nil {
|
||||||
|
// Keep error state.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
d.current.b = d.current.b[:0]
|
||||||
|
|
||||||
|
// SYNC:
|
||||||
|
if d.syncStream.enabled {
|
||||||
|
if !blocking {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
ok = d.nextBlockSync()
|
||||||
|
if !ok {
|
||||||
|
d.stashDecoder()
|
||||||
|
}
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
//ASYNC:
|
||||||
|
d.stashDecoder()
|
||||||
|
if blocking {
|
||||||
|
d.current.decodeOutput, ok = <-d.current.output
|
||||||
|
} else {
|
||||||
|
select {
|
||||||
|
case d.current.decodeOutput, ok = <-d.current.output:
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
// This should not happen, so signal error state...
|
||||||
|
d.current.err = io.ErrUnexpectedEOF
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
next := d.current.decodeOutput
|
||||||
|
if next.d != nil && next.d.async.newHist != nil {
|
||||||
|
d.current.crc.Reset()
|
||||||
|
}
|
||||||
|
if debugDecoder {
|
||||||
|
var tmp [4]byte
|
||||||
|
binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b)))
|
||||||
|
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(next.b) > 0 {
|
||||||
|
n, err := d.current.crc.Write(next.b)
|
||||||
|
if err == nil {
|
||||||
|
if n != len(next.b) {
|
||||||
|
d.current.err = io.ErrShortWrite
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 {
|
||||||
|
got := d.current.crc.Sum64()
|
||||||
|
var tmp [4]byte
|
||||||
|
binary.LittleEndian.PutUint32(tmp[:], uint32(got))
|
||||||
|
if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC {
|
||||||
|
if debugDecoder {
|
||||||
|
println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
|
||||||
|
}
|
||||||
|
d.current.err = ErrCRCMismatch
|
||||||
|
} else {
|
||||||
|
if debugDecoder {
|
||||||
|
println("CRC ok", tmp[:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) nextBlockSync() (ok bool) {
|
||||||
|
if d.current.d == nil {
|
||||||
|
d.current.d = <-d.decoders
|
||||||
|
}
|
||||||
|
for len(d.current.b) == 0 {
|
||||||
|
if !d.syncStream.inFrame {
|
||||||
|
d.frame.history.reset()
|
||||||
|
d.current.err = d.frame.reset(&d.syncStream.br)
|
||||||
|
if d.current.err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if d.frame.DictionaryID != nil {
|
||||||
|
dict, ok := d.dicts[*d.frame.DictionaryID]
|
||||||
|
if !ok {
|
||||||
|
d.current.err = ErrUnknownDictionary
|
||||||
|
return false
|
||||||
|
} else {
|
||||||
|
d.frame.history.setDict(&dict)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize {
|
||||||
|
d.current.err = ErrDecoderSizeExceeded
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
d.syncStream.decodedFrame = 0
|
||||||
|
d.syncStream.inFrame = true
|
||||||
|
}
|
||||||
|
d.current.err = d.frame.next(d.current.d)
|
||||||
|
if d.current.err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
d.frame.history.ensureBlock()
|
||||||
|
if debugDecoder {
|
||||||
|
println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame)
|
||||||
|
}
|
||||||
|
histBefore := len(d.frame.history.b)
|
||||||
|
d.current.err = d.current.d.decodeBuf(&d.frame.history)
|
||||||
|
|
||||||
|
if d.current.err != nil {
|
||||||
|
println("error after:", d.current.err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
d.current.b = d.frame.history.b[histBefore:]
|
||||||
|
if debugDecoder {
|
||||||
|
println("history after:", len(d.frame.history.b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check frame size (before CRC)
|
||||||
|
d.syncStream.decodedFrame += uint64(len(d.current.b))
|
||||||
|
if d.frame.FrameContentSize > 0 && d.syncStream.decodedFrame > d.frame.FrameContentSize {
|
||||||
|
if debugDecoder {
|
||||||
|
printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
|
||||||
|
}
|
||||||
|
d.current.err = ErrFrameSizeExceeded
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check FCS
|
||||||
|
if d.current.d.Last && d.frame.FrameContentSize > 0 && d.syncStream.decodedFrame != d.frame.FrameContentSize {
|
||||||
|
if debugDecoder {
|
||||||
|
printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
|
||||||
|
}
|
||||||
|
d.current.err = ErrFrameSizeMismatch
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update/Check CRC
|
||||||
|
if d.frame.HasCheckSum {
|
||||||
|
d.frame.crc.Write(d.current.b)
|
||||||
|
if d.current.d.Last {
|
||||||
|
d.current.err = d.frame.checkCRC()
|
||||||
|
if d.current.err != nil {
|
||||||
|
println("CRC error:", d.current.err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.syncStream.inFrame = !d.current.d.Last
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) stashDecoder() {
|
||||||
if d.current.d != nil {
|
if d.current.d != nil {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
printf("re-adding current decoder %p", d.current.d)
|
printf("re-adding current decoder %p", d.current.d)
|
||||||
|
@ -375,24 +555,6 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
|
||||||
d.decoders <- d.current.d
|
d.decoders <- d.current.d
|
||||||
d.current.d = nil
|
d.current.d = nil
|
||||||
}
|
}
|
||||||
if d.current.err != nil {
|
|
||||||
// Keep error state.
|
|
||||||
return blocking
|
|
||||||
}
|
|
||||||
|
|
||||||
if blocking {
|
|
||||||
d.current.decodeOutput = <-d.current.output
|
|
||||||
} else {
|
|
||||||
select {
|
|
||||||
case d.current.decodeOutput = <-d.current.output:
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if debugDecoder {
|
|
||||||
println("got", len(d.current.b), "bytes, error:", d.current.err)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close will release all resources.
|
// Close will release all resources.
|
||||||
|
@ -402,10 +564,10 @@ func (d *Decoder) Close() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
d.drainOutput()
|
d.drainOutput()
|
||||||
if d.stream != nil {
|
if d.current.cancel != nil {
|
||||||
close(d.stream)
|
d.current.cancel()
|
||||||
d.streamWg.Wait()
|
d.streamWg.Wait()
|
||||||
d.stream = nil
|
d.current.cancel = nil
|
||||||
}
|
}
|
||||||
if d.decoders != nil {
|
if d.decoders != nil {
|
||||||
close(d.decoders)
|
close(d.decoders)
|
||||||
|
@ -456,100 +618,306 @@ type decodeOutput struct {
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
type decodeStream struct {
|
func (d *Decoder) startSyncDecoder(r io.Reader) error {
|
||||||
r io.Reader
|
d.frame.history.reset()
|
||||||
|
d.syncStream.br = readerWrapper{r: r}
|
||||||
// Blocks ready to be written to output.
|
d.syncStream.inFrame = false
|
||||||
output chan decodeOutput
|
d.syncStream.enabled = true
|
||||||
|
d.syncStream.decodedFrame = 0
|
||||||
// cancel reading from the input
|
return nil
|
||||||
cancel chan struct{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// errEndOfStream indicates that everything from the stream was read.
|
|
||||||
var errEndOfStream = errors.New("end-of-stream")
|
|
||||||
|
|
||||||
// Create Decoder:
|
// Create Decoder:
|
||||||
// Spawn n block decoders. These accept tasks to decode a block.
|
// ASYNC:
|
||||||
// Create goroutine that handles stream processing, this will send history to decoders as they are available.
|
// Spawn 4 go routines.
|
||||||
// Decoders update the history as they decode.
|
// 0: Read frames and decode blocks.
|
||||||
// When a block is returned:
|
// 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree.
|
||||||
// a) history is sent to the next decoder,
|
// 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets.
|
||||||
// b) content written to CRC.
|
// 3: Wait for stream history, execute sequences, send stream history.
|
||||||
// c) return data to WRITER.
|
func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) {
|
||||||
// d) wait for next block to return data.
|
|
||||||
// Once WRITTEN, the decoders reused by the writer frame decoder for re-use.
|
|
||||||
func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
|
|
||||||
defer d.streamWg.Done()
|
defer d.streamWg.Done()
|
||||||
frame := newFrameDec(d.o)
|
br := readerWrapper{r: r}
|
||||||
for stream := range inStream {
|
|
||||||
if debugDecoder {
|
var seqPrepare = make(chan *blockDec, d.o.concurrent)
|
||||||
println("got new stream")
|
var seqDecode = make(chan *blockDec, d.o.concurrent)
|
||||||
|
var seqExecute = make(chan *blockDec, d.o.concurrent)
|
||||||
|
|
||||||
|
// Async 1: Prepare blocks...
|
||||||
|
go func() {
|
||||||
|
var hist history
|
||||||
|
var hasErr bool
|
||||||
|
for block := range seqPrepare {
|
||||||
|
if hasErr {
|
||||||
|
if block != nil {
|
||||||
|
seqDecode <- block
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if block.async.newHist != nil {
|
||||||
|
if debugDecoder {
|
||||||
|
println("Async 1: new history")
|
||||||
|
}
|
||||||
|
hist.reset()
|
||||||
|
if block.async.newHist.dict != nil {
|
||||||
|
hist.setDict(block.async.newHist.dict)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if block.err != nil || block.Type != blockTypeCompressed {
|
||||||
|
hasErr = block.err != nil
|
||||||
|
seqDecode <- block
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
remain, err := block.decodeLiterals(block.data, &hist)
|
||||||
|
block.err = err
|
||||||
|
hasErr = block.err != nil
|
||||||
|
if err == nil {
|
||||||
|
block.async.literals = hist.decoders.literals
|
||||||
|
block.async.seqData = remain
|
||||||
|
} else if debugDecoder {
|
||||||
|
println("decodeLiterals error:", err)
|
||||||
|
}
|
||||||
|
seqDecode <- block
|
||||||
}
|
}
|
||||||
br := readerWrapper{r: stream.r}
|
close(seqDecode)
|
||||||
decodeStream:
|
}()
|
||||||
for {
|
|
||||||
frame.history.reset()
|
// Async 2: Decode sequences...
|
||||||
err := frame.reset(&br)
|
go func() {
|
||||||
if debugDecoder && err != nil {
|
var hist history
|
||||||
println("Frame decoder returned", err)
|
var hasErr bool
|
||||||
|
|
||||||
|
for block := range seqDecode {
|
||||||
|
if hasErr {
|
||||||
|
if block != nil {
|
||||||
|
seqExecute <- block
|
||||||
|
}
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
if err == nil && frame.DictionaryID != nil {
|
if block.async.newHist != nil {
|
||||||
dict, ok := d.dicts[*frame.DictionaryID]
|
if debugDecoder {
|
||||||
if !ok {
|
println("Async 2: new history, recent:", block.async.newHist.recentOffsets)
|
||||||
err = ErrUnknownDictionary
|
}
|
||||||
|
hist.decoders = block.async.newHist.decoders
|
||||||
|
hist.recentOffsets = block.async.newHist.recentOffsets
|
||||||
|
if block.async.newHist.dict != nil {
|
||||||
|
hist.setDict(block.async.newHist.dict)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if block.err != nil || block.Type != blockTypeCompressed {
|
||||||
|
hasErr = block.err != nil
|
||||||
|
seqExecute <- block
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
hist.decoders.literals = block.async.literals
|
||||||
|
block.err = block.prepareSequences(block.async.seqData, &hist)
|
||||||
|
if debugDecoder && block.err != nil {
|
||||||
|
println("prepareSequences returned:", block.err)
|
||||||
|
}
|
||||||
|
hasErr = block.err != nil
|
||||||
|
if block.err == nil {
|
||||||
|
block.err = block.decodeSequences(&hist)
|
||||||
|
if debugDecoder && block.err != nil {
|
||||||
|
println("decodeSequences returned:", block.err)
|
||||||
|
}
|
||||||
|
hasErr = block.err != nil
|
||||||
|
// block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs]
|
||||||
|
block.async.seqSize = hist.decoders.seqSize
|
||||||
|
}
|
||||||
|
seqExecute <- block
|
||||||
|
}
|
||||||
|
close(seqExecute)
|
||||||
|
}()
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
// Async 3: Execute sequences...
|
||||||
|
frameHistCache := d.frame.history.b
|
||||||
|
go func() {
|
||||||
|
var hist history
|
||||||
|
var decodedFrame uint64
|
||||||
|
var fcs uint64
|
||||||
|
var hasErr bool
|
||||||
|
for block := range seqExecute {
|
||||||
|
out := decodeOutput{err: block.err, d: block}
|
||||||
|
if block.err != nil || hasErr {
|
||||||
|
hasErr = true
|
||||||
|
output <- out
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if block.async.newHist != nil {
|
||||||
|
if debugDecoder {
|
||||||
|
println("Async 3: new history")
|
||||||
|
}
|
||||||
|
hist.windowSize = block.async.newHist.windowSize
|
||||||
|
hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
|
||||||
|
if block.async.newHist.dict != nil {
|
||||||
|
hist.setDict(block.async.newHist.dict)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cap(hist.b) < hist.allocFrameBuffer {
|
||||||
|
if cap(frameHistCache) >= hist.allocFrameBuffer {
|
||||||
|
hist.b = frameHistCache
|
||||||
|
} else {
|
||||||
|
hist.b = make([]byte, 0, hist.allocFrameBuffer)
|
||||||
|
println("Alloc history sized", hist.allocFrameBuffer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
hist.b = hist.b[:0]
|
||||||
|
fcs = block.async.fcs
|
||||||
|
decodedFrame = 0
|
||||||
|
}
|
||||||
|
do := decodeOutput{err: block.err, d: block}
|
||||||
|
switch block.Type {
|
||||||
|
case blockTypeRLE:
|
||||||
|
if debugDecoder {
|
||||||
|
println("add rle block length:", block.RLESize)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cap(block.dst) < int(block.RLESize) {
|
||||||
|
if block.lowMem {
|
||||||
|
block.dst = make([]byte, block.RLESize)
|
||||||
|
} else {
|
||||||
|
block.dst = make([]byte, maxBlockSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
block.dst = block.dst[:block.RLESize]
|
||||||
|
v := block.data[0]
|
||||||
|
for i := range block.dst {
|
||||||
|
block.dst[i] = v
|
||||||
|
}
|
||||||
|
hist.append(block.dst)
|
||||||
|
do.b = block.dst
|
||||||
|
case blockTypeRaw:
|
||||||
|
if debugDecoder {
|
||||||
|
println("add raw block length:", len(block.data))
|
||||||
|
}
|
||||||
|
hist.append(block.data)
|
||||||
|
do.b = block.data
|
||||||
|
case blockTypeCompressed:
|
||||||
|
if debugDecoder {
|
||||||
|
println("execute with history length:", len(hist.b), "window:", hist.windowSize)
|
||||||
|
}
|
||||||
|
hist.decoders.seqSize = block.async.seqSize
|
||||||
|
hist.decoders.literals = block.async.literals
|
||||||
|
do.err = block.executeSequences(&hist)
|
||||||
|
hasErr = do.err != nil
|
||||||
|
if debugDecoder && hasErr {
|
||||||
|
println("executeSequences returned:", do.err)
|
||||||
|
}
|
||||||
|
do.b = block.dst
|
||||||
|
}
|
||||||
|
if !hasErr {
|
||||||
|
decodedFrame += uint64(len(do.b))
|
||||||
|
if fcs > 0 && decodedFrame > fcs {
|
||||||
|
println("fcs exceeded", block.Last, fcs, decodedFrame)
|
||||||
|
do.err = ErrFrameSizeExceeded
|
||||||
|
hasErr = true
|
||||||
|
} else if block.Last && fcs > 0 && decodedFrame != fcs {
|
||||||
|
do.err = ErrFrameSizeMismatch
|
||||||
|
hasErr = true
|
||||||
} else {
|
} else {
|
||||||
frame.history.setDict(&dict)
|
if debugDecoder {
|
||||||
|
println("fcs ok", block.Last, fcs, decodedFrame)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
output <- do
|
||||||
stream.output <- decodeOutput{
|
}
|
||||||
err: err,
|
close(output)
|
||||||
|
frameHistCache = hist.b
|
||||||
|
wg.Done()
|
||||||
|
if debugDecoder {
|
||||||
|
println("decoder goroutines finished")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
decodeStream:
|
||||||
|
for {
|
||||||
|
frame := d.frame
|
||||||
|
if debugDecoder {
|
||||||
|
println("New frame...")
|
||||||
|
}
|
||||||
|
var historySent bool
|
||||||
|
frame.history.reset()
|
||||||
|
err := frame.reset(&br)
|
||||||
|
if debugDecoder && err != nil {
|
||||||
|
println("Frame decoder returned", err)
|
||||||
|
}
|
||||||
|
if err == nil && frame.DictionaryID != nil {
|
||||||
|
dict, ok := d.dicts[*frame.DictionaryID]
|
||||||
|
if !ok {
|
||||||
|
err = ErrUnknownDictionary
|
||||||
|
} else {
|
||||||
|
frame.history.setDict(&dict)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
|
||||||
|
err = ErrDecoderSizeExceeded
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
case dec := <-d.decoders:
|
||||||
|
dec.sendErr(err)
|
||||||
|
seqPrepare <- dec
|
||||||
|
}
|
||||||
|
break decodeStream
|
||||||
|
}
|
||||||
|
|
||||||
|
// Go through all blocks of the frame.
|
||||||
|
for {
|
||||||
|
var dec *blockDec
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
break decodeStream
|
||||||
|
case dec = <-d.decoders:
|
||||||
|
// Once we have a decoder, we MUST return it.
|
||||||
|
}
|
||||||
|
err := frame.next(dec)
|
||||||
|
if !historySent {
|
||||||
|
h := frame.history
|
||||||
|
if debugDecoder {
|
||||||
|
println("Alloc History:", h.allocFrameBuffer)
|
||||||
}
|
}
|
||||||
|
dec.async.newHist = &h
|
||||||
|
dec.async.fcs = frame.FrameContentSize
|
||||||
|
historySent = true
|
||||||
|
} else {
|
||||||
|
dec.async.newHist = nil
|
||||||
|
}
|
||||||
|
if debugDecoder && err != nil {
|
||||||
|
println("next block returned error:", err)
|
||||||
|
}
|
||||||
|
dec.err = err
|
||||||
|
dec.checkCRC = nil
|
||||||
|
if dec.Last && frame.HasCheckSum && err == nil {
|
||||||
|
crc, err := frame.rawInput.readSmall(4)
|
||||||
|
if err != nil {
|
||||||
|
println("CRC missing?", err)
|
||||||
|
dec.err = err
|
||||||
|
}
|
||||||
|
var tmp [4]byte
|
||||||
|
copy(tmp[:], crc)
|
||||||
|
dec.checkCRC = tmp[:]
|
||||||
|
if debugDecoder {
|
||||||
|
println("found crc to check:", dec.checkCRC)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = dec.err
|
||||||
|
last := dec.Last
|
||||||
|
seqPrepare <- dec
|
||||||
|
if err != nil {
|
||||||
|
break decodeStream
|
||||||
|
}
|
||||||
|
if last {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if debugDecoder {
|
|
||||||
println("starting frame decoder")
|
|
||||||
}
|
|
||||||
|
|
||||||
// This goroutine will forward history between frames.
|
|
||||||
frame.frameDone.Add(1)
|
|
||||||
frame.initAsync()
|
|
||||||
|
|
||||||
go frame.startDecoder(stream.output)
|
|
||||||
decodeFrame:
|
|
||||||
// Go through all blocks of the frame.
|
|
||||||
for {
|
|
||||||
dec := <-d.decoders
|
|
||||||
select {
|
|
||||||
case <-stream.cancel:
|
|
||||||
if !frame.sendErr(dec, io.EOF) {
|
|
||||||
// To not let the decoder dangle, send it back.
|
|
||||||
stream.output <- decodeOutput{d: dec}
|
|
||||||
}
|
|
||||||
break decodeStream
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
err := frame.next(dec)
|
|
||||||
switch err {
|
|
||||||
case io.EOF:
|
|
||||||
// End of current frame, no error
|
|
||||||
println("EOF on next block")
|
|
||||||
break decodeFrame
|
|
||||||
case nil:
|
|
||||||
continue
|
|
||||||
default:
|
|
||||||
println("block decoder returned", err)
|
|
||||||
break decodeStream
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// All blocks have started decoding, check if there are more frames.
|
|
||||||
println("waiting for done")
|
|
||||||
frame.frameDone.Wait()
|
|
||||||
println("done waiting...")
|
|
||||||
}
|
}
|
||||||
frame.frameDone.Wait()
|
|
||||||
println("Sending EOS")
|
|
||||||
stream.output <- decodeOutput{err: errEndOfStream}
|
|
||||||
}
|
}
|
||||||
|
close(seqPrepare)
|
||||||
|
wg.Wait()
|
||||||
|
d.frame.history.b = frameHistCache
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,9 @@ func (o *decoderOptions) setDefault() {
|
||||||
concurrent: runtime.GOMAXPROCS(0),
|
concurrent: runtime.GOMAXPROCS(0),
|
||||||
maxWindowSize: MaxWindowSize,
|
maxWindowSize: MaxWindowSize,
|
||||||
}
|
}
|
||||||
|
if o.concurrent > 4 {
|
||||||
|
o.concurrent = 4
|
||||||
|
}
|
||||||
o.maxDecodedSize = 1 << 63
|
o.maxDecodedSize = 1 << 63
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,16 +40,25 @@ func WithDecoderLowmem(b bool) DOption {
|
||||||
return func(o *decoderOptions) error { o.lowMem = b; return nil }
|
return func(o *decoderOptions) error { o.lowMem = b; return nil }
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithDecoderConcurrency will set the concurrency,
|
// WithDecoderConcurrency sets the number of created decoders.
|
||||||
// meaning the maximum number of decoders to run concurrently.
|
// When decoding block with DecodeAll, this will limit the number
|
||||||
// The value supplied must be at least 1.
|
// of possible concurrently running decodes.
|
||||||
// By default this will be set to GOMAXPROCS.
|
// When decoding streams, this will limit the number of
|
||||||
|
// inflight blocks.
|
||||||
|
// When decoding streams and setting maximum to 1,
|
||||||
|
// no async decoding will be done.
|
||||||
|
// When a value of 0 is provided GOMAXPROCS will be used.
|
||||||
|
// By default this will be set to 4 or GOMAXPROCS, whatever is lower.
|
||||||
func WithDecoderConcurrency(n int) DOption {
|
func WithDecoderConcurrency(n int) DOption {
|
||||||
return func(o *decoderOptions) error {
|
return func(o *decoderOptions) error {
|
||||||
if n <= 0 {
|
if n < 0 {
|
||||||
return errors.New("concurrency must be at least 1")
|
return errors.New("concurrency must be at least 1")
|
||||||
}
|
}
|
||||||
o.concurrent = n
|
if n == 0 {
|
||||||
|
o.concurrent = runtime.GOMAXPROCS(0)
|
||||||
|
} else {
|
||||||
|
o.concurrent = n
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -98,23 +98,25 @@ func (e *Encoder) Reset(w io.Writer) {
|
||||||
if cap(s.filling) == 0 {
|
if cap(s.filling) == 0 {
|
||||||
s.filling = make([]byte, 0, e.o.blockSize)
|
s.filling = make([]byte, 0, e.o.blockSize)
|
||||||
}
|
}
|
||||||
if cap(s.current) == 0 {
|
if e.o.concurrent > 1 {
|
||||||
s.current = make([]byte, 0, e.o.blockSize)
|
if cap(s.current) == 0 {
|
||||||
}
|
s.current = make([]byte, 0, e.o.blockSize)
|
||||||
if cap(s.previous) == 0 {
|
}
|
||||||
s.previous = make([]byte, 0, e.o.blockSize)
|
if cap(s.previous) == 0 {
|
||||||
|
s.previous = make([]byte, 0, e.o.blockSize)
|
||||||
|
}
|
||||||
|
s.current = s.current[:0]
|
||||||
|
s.previous = s.previous[:0]
|
||||||
|
if s.writing == nil {
|
||||||
|
s.writing = &blockEnc{lowMem: e.o.lowMem}
|
||||||
|
s.writing.init()
|
||||||
|
}
|
||||||
|
s.writing.initNewEncode()
|
||||||
}
|
}
|
||||||
if s.encoder == nil {
|
if s.encoder == nil {
|
||||||
s.encoder = e.o.encoder()
|
s.encoder = e.o.encoder()
|
||||||
}
|
}
|
||||||
if s.writing == nil {
|
|
||||||
s.writing = &blockEnc{lowMem: e.o.lowMem}
|
|
||||||
s.writing.init()
|
|
||||||
}
|
|
||||||
s.writing.initNewEncode()
|
|
||||||
s.filling = s.filling[:0]
|
s.filling = s.filling[:0]
|
||||||
s.current = s.current[:0]
|
|
||||||
s.previous = s.previous[:0]
|
|
||||||
s.encoder.Reset(e.o.dict, false)
|
s.encoder.Reset(e.o.dict, false)
|
||||||
s.headerWritten = false
|
s.headerWritten = false
|
||||||
s.eofWritten = false
|
s.eofWritten = false
|
||||||
|
@ -258,6 +260,46 @@ func (e *Encoder) nextBlock(final bool) error {
|
||||||
return s.err
|
return s.err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SYNC:
|
||||||
|
if e.o.concurrent == 1 {
|
||||||
|
src := s.filling
|
||||||
|
s.nInput += int64(len(s.filling))
|
||||||
|
if debugEncoder {
|
||||||
|
println("Adding sync block,", len(src), "bytes, final:", final)
|
||||||
|
}
|
||||||
|
enc := s.encoder
|
||||||
|
blk := enc.Block()
|
||||||
|
blk.reset(nil)
|
||||||
|
enc.Encode(blk, src)
|
||||||
|
blk.last = final
|
||||||
|
if final {
|
||||||
|
s.eofWritten = true
|
||||||
|
}
|
||||||
|
|
||||||
|
err := errIncompressible
|
||||||
|
// If we got the exact same number of literals as input,
|
||||||
|
// assume the literals cannot be compressed.
|
||||||
|
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
|
||||||
|
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
||||||
|
}
|
||||||
|
switch err {
|
||||||
|
case errIncompressible:
|
||||||
|
if debugEncoder {
|
||||||
|
println("Storing incompressible block as raw")
|
||||||
|
}
|
||||||
|
blk.encodeRaw(src)
|
||||||
|
// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
|
||||||
|
case nil:
|
||||||
|
default:
|
||||||
|
s.err = err
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, s.err = s.w.Write(blk.output)
|
||||||
|
s.nWritten += int64(len(blk.output))
|
||||||
|
s.filling = s.filling[:0]
|
||||||
|
return s.err
|
||||||
|
}
|
||||||
|
|
||||||
// Move blocks forward.
|
// Move blocks forward.
|
||||||
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
|
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
|
||||||
s.nInput += int64(len(s.current))
|
s.nInput += int64(len(s.current))
|
||||||
|
|
|
@ -76,6 +76,7 @@ func WithEncoderCRC(b bool) EOption {
|
||||||
// WithEncoderConcurrency will set the concurrency,
|
// WithEncoderConcurrency will set the concurrency,
|
||||||
// meaning the maximum number of encoders to run concurrently.
|
// meaning the maximum number of encoders to run concurrently.
|
||||||
// The value supplied must be at least 1.
|
// The value supplied must be at least 1.
|
||||||
|
// For streams, setting a value of 1 will disable async compression.
|
||||||
// By default this will be set to GOMAXPROCS.
|
// By default this will be set to GOMAXPROCS.
|
||||||
func WithEncoderConcurrency(n int) EOption {
|
func WithEncoderConcurrency(n int) EOption {
|
||||||
return func(o *encoderOptions) error {
|
return func(o *encoderOptions) error {
|
||||||
|
|
|
@ -8,23 +8,17 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"hash"
|
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/klauspost/compress/zstd/internal/xxhash"
|
"github.com/klauspost/compress/zstd/internal/xxhash"
|
||||||
)
|
)
|
||||||
|
|
||||||
type frameDec struct {
|
type frameDec struct {
|
||||||
o decoderOptions
|
o decoderOptions
|
||||||
crc hash.Hash64
|
crc *xxhash.Digest
|
||||||
offset int64
|
|
||||||
|
|
||||||
WindowSize uint64
|
WindowSize uint64
|
||||||
|
|
||||||
// In order queue of blocks being decoded.
|
|
||||||
decoding chan *blockDec
|
|
||||||
|
|
||||||
// Frame history passed between blocks
|
// Frame history passed between blocks
|
||||||
history history
|
history history
|
||||||
|
|
||||||
|
@ -34,15 +28,10 @@ type frameDec struct {
|
||||||
bBuf byteBuf
|
bBuf byteBuf
|
||||||
|
|
||||||
FrameContentSize uint64
|
FrameContentSize uint64
|
||||||
frameDone sync.WaitGroup
|
|
||||||
|
|
||||||
DictionaryID *uint32
|
DictionaryID *uint32
|
||||||
HasCheckSum bool
|
HasCheckSum bool
|
||||||
SingleSegment bool
|
SingleSegment bool
|
||||||
|
|
||||||
// asyncRunning indicates whether the async routine processes input on 'decoding'.
|
|
||||||
asyncRunningMu sync.Mutex
|
|
||||||
asyncRunning bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -229,9 +218,10 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||||
d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
|
d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
|
||||||
}
|
}
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize)
|
println("Read FCS:", d.FrameContentSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move this to shared.
|
// Move this to shared.
|
||||||
d.HasCheckSum = fhd&(1<<2) != 0
|
d.HasCheckSum = fhd&(1<<2) != 0
|
||||||
if d.HasCheckSum {
|
if d.HasCheckSum {
|
||||||
|
@ -264,10 +254,16 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||||
}
|
}
|
||||||
d.history.windowSize = int(d.WindowSize)
|
d.history.windowSize = int(d.WindowSize)
|
||||||
if d.o.lowMem && d.history.windowSize < maxBlockSize {
|
if d.o.lowMem && d.history.windowSize < maxBlockSize {
|
||||||
d.history.maxSize = d.history.windowSize * 2
|
d.history.allocFrameBuffer = d.history.windowSize * 2
|
||||||
|
// TODO: Maybe use FrameContent size
|
||||||
} else {
|
} else {
|
||||||
d.history.maxSize = d.history.windowSize + maxBlockSize
|
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if debugDecoder {
|
||||||
|
println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum)
|
||||||
|
}
|
||||||
|
|
||||||
// history contains input - maybe we do something
|
// history contains input - maybe we do something
|
||||||
d.rawInput = br
|
d.rawInput = br
|
||||||
return nil
|
return nil
|
||||||
|
@ -276,49 +272,18 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||||
// next will start decoding the next block from stream.
|
// next will start decoding the next block from stream.
|
||||||
func (d *frameDec) next(block *blockDec) error {
|
func (d *frameDec) next(block *blockDec) error {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
printf("decoding new block %p:%p", block, block.data)
|
println("decoding new block")
|
||||||
}
|
}
|
||||||
err := block.reset(d.rawInput, d.WindowSize)
|
err := block.reset(d.rawInput, d.WindowSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
println("block error:", err)
|
println("block error:", err)
|
||||||
// Signal the frame decoder we have a problem.
|
// Signal the frame decoder we have a problem.
|
||||||
d.sendErr(block, err)
|
block.sendErr(err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
block.input <- struct{}{}
|
|
||||||
if debugDecoder {
|
|
||||||
println("next block:", block)
|
|
||||||
}
|
|
||||||
d.asyncRunningMu.Lock()
|
|
||||||
defer d.asyncRunningMu.Unlock()
|
|
||||||
if !d.asyncRunning {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if block.Last {
|
|
||||||
// We indicate the frame is done by sending io.EOF
|
|
||||||
d.decoding <- block
|
|
||||||
return io.EOF
|
|
||||||
}
|
|
||||||
d.decoding <- block
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendEOF will queue an error block on the frame.
|
|
||||||
// This will cause the frame decoder to return when it encounters the block.
|
|
||||||
// Returns true if the decoder was added.
|
|
||||||
func (d *frameDec) sendErr(block *blockDec, err error) bool {
|
|
||||||
d.asyncRunningMu.Lock()
|
|
||||||
defer d.asyncRunningMu.Unlock()
|
|
||||||
if !d.asyncRunning {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
println("sending error", err.Error())
|
|
||||||
block.sendErr(err)
|
|
||||||
d.decoding <- block
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkCRC will check the checksum if the frame has one.
|
// checkCRC will check the checksum if the frame has one.
|
||||||
// Will return ErrCRCMismatch if crc check failed, otherwise nil.
|
// Will return ErrCRCMismatch if crc check failed, otherwise nil.
|
||||||
func (d *frameDec) checkCRC() error {
|
func (d *frameDec) checkCRC() error {
|
||||||
|
@ -340,7 +305,7 @@ func (d *frameDec) checkCRC() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(tmp[:], want) {
|
if !bytes.Equal(tmp[:], want) && !ignoreCRC {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("CRC Check Failed:", tmp[:], "!=", want)
|
println("CRC Check Failed:", tmp[:], "!=", want)
|
||||||
}
|
}
|
||||||
|
@ -352,131 +317,13 @@ func (d *frameDec) checkCRC() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *frameDec) initAsync() {
|
|
||||||
if !d.o.lowMem && !d.SingleSegment {
|
|
||||||
// set max extra size history to 2MB.
|
|
||||||
d.history.maxSize = d.history.windowSize + maxBlockSize
|
|
||||||
}
|
|
||||||
// re-alloc if more than one extra block size.
|
|
||||||
if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize {
|
|
||||||
d.history.b = make([]byte, 0, d.history.maxSize)
|
|
||||||
}
|
|
||||||
if cap(d.history.b) < d.history.maxSize {
|
|
||||||
d.history.b = make([]byte, 0, d.history.maxSize)
|
|
||||||
}
|
|
||||||
if cap(d.decoding) < d.o.concurrent {
|
|
||||||
d.decoding = make(chan *blockDec, d.o.concurrent)
|
|
||||||
}
|
|
||||||
if debugDecoder {
|
|
||||||
h := d.history
|
|
||||||
printf("history init. len: %d, cap: %d", len(h.b), cap(h.b))
|
|
||||||
}
|
|
||||||
d.asyncRunningMu.Lock()
|
|
||||||
d.asyncRunning = true
|
|
||||||
d.asyncRunningMu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// startDecoder will start decoding blocks and write them to the writer.
|
|
||||||
// The decoder will stop as soon as an error occurs or at end of frame.
|
|
||||||
// When the frame has finished decoding the *bufio.Reader
|
|
||||||
// containing the remaining input will be sent on frameDec.frameDone.
|
|
||||||
func (d *frameDec) startDecoder(output chan decodeOutput) {
|
|
||||||
written := int64(0)
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
d.asyncRunningMu.Lock()
|
|
||||||
d.asyncRunning = false
|
|
||||||
d.asyncRunningMu.Unlock()
|
|
||||||
|
|
||||||
// Drain the currently decoding.
|
|
||||||
d.history.error = true
|
|
||||||
flushdone:
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case b := <-d.decoding:
|
|
||||||
b.history <- &d.history
|
|
||||||
output <- <-b.result
|
|
||||||
default:
|
|
||||||
break flushdone
|
|
||||||
}
|
|
||||||
}
|
|
||||||
println("frame decoder done, signalling done")
|
|
||||||
d.frameDone.Done()
|
|
||||||
}()
|
|
||||||
// Get decoder for first block.
|
|
||||||
block := <-d.decoding
|
|
||||||
block.history <- &d.history
|
|
||||||
for {
|
|
||||||
var next *blockDec
|
|
||||||
// Get result
|
|
||||||
r := <-block.result
|
|
||||||
if r.err != nil {
|
|
||||||
println("Result contained error", r.err)
|
|
||||||
output <- r
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if debugDecoder {
|
|
||||||
println("got result, from ", d.offset, "to", d.offset+int64(len(r.b)))
|
|
||||||
d.offset += int64(len(r.b))
|
|
||||||
}
|
|
||||||
if !block.Last {
|
|
||||||
// Send history to next block
|
|
||||||
select {
|
|
||||||
case next = <-d.decoding:
|
|
||||||
if debugDecoder {
|
|
||||||
println("Sending ", len(d.history.b), "bytes as history")
|
|
||||||
}
|
|
||||||
next.history <- &d.history
|
|
||||||
default:
|
|
||||||
// Wait until we have sent the block, so
|
|
||||||
// other decoders can potentially get the decoder.
|
|
||||||
next = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add checksum, async to decoding.
|
|
||||||
if d.HasCheckSum {
|
|
||||||
n, err := d.crc.Write(r.b)
|
|
||||||
if err != nil {
|
|
||||||
r.err = err
|
|
||||||
if n != len(r.b) {
|
|
||||||
r.err = io.ErrShortWrite
|
|
||||||
}
|
|
||||||
output <- r
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
written += int64(len(r.b))
|
|
||||||
if d.SingleSegment && uint64(written) > d.FrameContentSize {
|
|
||||||
println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize)
|
|
||||||
r.err = ErrFrameSizeExceeded
|
|
||||||
output <- r
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if block.Last {
|
|
||||||
r.err = d.checkCRC()
|
|
||||||
output <- r
|
|
||||||
return
|
|
||||||
}
|
|
||||||
output <- r
|
|
||||||
if next == nil {
|
|
||||||
// There was no decoder available, we wait for one now that we have sent to the writer.
|
|
||||||
if debugDecoder {
|
|
||||||
println("Sending ", len(d.history.b), " bytes as history")
|
|
||||||
}
|
|
||||||
next = <-d.decoding
|
|
||||||
next.history <- &d.history
|
|
||||||
}
|
|
||||||
block = next
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// runDecoder will create a sync decoder that will decode a block of data.
|
// runDecoder will create a sync decoder that will decode a block of data.
|
||||||
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
||||||
saved := d.history.b
|
saved := d.history.b
|
||||||
|
|
||||||
// We use the history for output to avoid copying it.
|
// We use the history for output to avoid copying it.
|
||||||
d.history.b = dst
|
d.history.b = dst
|
||||||
|
d.history.ignoreBuffer = len(dst)
|
||||||
// Store input length, so we only check new data.
|
// Store input length, so we only check new data.
|
||||||
crcStart := len(dst)
|
crcStart := len(dst)
|
||||||
var err error
|
var err error
|
||||||
|
@ -489,7 +336,7 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
||||||
println("next block:", dec)
|
println("next block:", dec)
|
||||||
}
|
}
|
||||||
err = dec.decodeBuf(&d.history)
|
err = dec.decodeBuf(&d.history)
|
||||||
if err != nil || dec.Last {
|
if err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if uint64(len(d.history.b)) > d.o.maxDecodedSize {
|
if uint64(len(d.history.b)) > d.o.maxDecodedSize {
|
||||||
|
@ -501,10 +348,23 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
||||||
err = ErrFrameSizeExceeded
|
err = ErrFrameSizeExceeded
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
if d.FrameContentSize > 0 && uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
|
||||||
|
println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize)
|
||||||
|
err = ErrFrameSizeExceeded
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if dec.Last {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if debugDecoder && d.FrameContentSize > 0 {
|
||||||
|
println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
dst = d.history.b
|
dst = d.history.b
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if d.HasCheckSum {
|
if d.FrameContentSize > 0 && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
|
||||||
|
err = ErrFrameSizeMismatch
|
||||||
|
} else if d.HasCheckSum {
|
||||||
var n int
|
var n int
|
||||||
n, err = d.crc.Write(dst[crcStart:])
|
n, err = d.crc.Write(dst[crcStart:])
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
//go:build gofuzz
|
||||||
|
// +build gofuzz
|
||||||
|
|
||||||
|
// Copyright 2019+ Klaus Post. All rights reserved.
|
||||||
|
// License information can be found in the LICENSE file.
|
||||||
|
// Based on work by Yann Collet, released under BSD License.
|
||||||
|
|
||||||
|
package zstd
|
||||||
|
|
||||||
|
// ignoreCRC can be used for fuzz testing to ignore CRC values...
|
||||||
|
const ignoreCRC = true
|
|
@ -0,0 +1,11 @@
|
||||||
|
//go:build !gofuzz
|
||||||
|
// +build !gofuzz
|
||||||
|
|
||||||
|
// Copyright 2019+ Klaus Post. All rights reserved.
|
||||||
|
// License information can be found in the LICENSE file.
|
||||||
|
// Based on work by Yann Collet, released under BSD License.
|
||||||
|
|
||||||
|
package zstd
|
||||||
|
|
||||||
|
// ignoreCRC can be used for fuzz testing to ignore CRC values...
|
||||||
|
const ignoreCRC = false
|
|
@ -10,20 +10,31 @@ import (
|
||||||
|
|
||||||
// history contains the information transferred between blocks.
|
// history contains the information transferred between blocks.
|
||||||
type history struct {
|
type history struct {
|
||||||
b []byte
|
// Literal decompression
|
||||||
huffTree *huff0.Scratch
|
huffTree *huff0.Scratch
|
||||||
recentOffsets [3]int
|
|
||||||
|
// Sequence decompression
|
||||||
decoders sequenceDecs
|
decoders sequenceDecs
|
||||||
windowSize int
|
recentOffsets [3]int
|
||||||
maxSize int
|
|
||||||
error bool
|
// History buffer...
|
||||||
dict *dict
|
b []byte
|
||||||
|
|
||||||
|
// ignoreBuffer is meant to ignore a number of bytes
|
||||||
|
// when checking for matches in history
|
||||||
|
ignoreBuffer int
|
||||||
|
|
||||||
|
windowSize int
|
||||||
|
allocFrameBuffer int // needed?
|
||||||
|
error bool
|
||||||
|
dict *dict
|
||||||
}
|
}
|
||||||
|
|
||||||
// reset will reset the history to initial state of a frame.
|
// reset will reset the history to initial state of a frame.
|
||||||
// The history must already have been initialized to the desired size.
|
// The history must already have been initialized to the desired size.
|
||||||
func (h *history) reset() {
|
func (h *history) reset() {
|
||||||
h.b = h.b[:0]
|
h.b = h.b[:0]
|
||||||
|
h.ignoreBuffer = 0
|
||||||
h.error = false
|
h.error = false
|
||||||
h.recentOffsets = [3]int{1, 4, 8}
|
h.recentOffsets = [3]int{1, 4, 8}
|
||||||
if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
|
if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
|
||||||
|
@ -35,7 +46,7 @@ func (h *history) reset() {
|
||||||
if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
|
if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
|
||||||
fseDecoderPool.Put(f)
|
fseDecoderPool.Put(f)
|
||||||
}
|
}
|
||||||
h.decoders = sequenceDecs{}
|
h.decoders = sequenceDecs{br: h.decoders.br}
|
||||||
if h.huffTree != nil {
|
if h.huffTree != nil {
|
||||||
if h.dict == nil || h.dict.litEnc != h.huffTree {
|
if h.dict == nil || h.dict.litEnc != h.huffTree {
|
||||||
huffDecoderPool.Put(h.huffTree)
|
huffDecoderPool.Put(h.huffTree)
|
||||||
|
@ -54,6 +65,7 @@ func (h *history) setDict(dict *dict) {
|
||||||
h.decoders.litLengths = dict.llDec
|
h.decoders.litLengths = dict.llDec
|
||||||
h.decoders.offsets = dict.ofDec
|
h.decoders.offsets = dict.ofDec
|
||||||
h.decoders.matchLengths = dict.mlDec
|
h.decoders.matchLengths = dict.mlDec
|
||||||
|
h.decoders.dict = dict.content
|
||||||
h.recentOffsets = dict.offsets
|
h.recentOffsets = dict.offsets
|
||||||
h.huffTree = dict.litEnc
|
h.huffTree = dict.litEnc
|
||||||
}
|
}
|
||||||
|
@ -83,6 +95,24 @@ func (h *history) append(b []byte) {
|
||||||
copy(h.b[h.windowSize-len(b):], b)
|
copy(h.b[h.windowSize-len(b):], b)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ensureBlock will ensure there is space for at least one block...
|
||||||
|
func (h *history) ensureBlock() {
|
||||||
|
if cap(h.b) < h.allocFrameBuffer {
|
||||||
|
h.b = make([]byte, 0, h.allocFrameBuffer)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
avail := cap(h.b) - len(h.b)
|
||||||
|
if avail >= h.windowSize || avail > maxCompressedBlockSize {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Move data down so we only have window size left.
|
||||||
|
// We know we have less than window size in b at this point.
|
||||||
|
discard := len(h.b) - h.windowSize
|
||||||
|
copy(h.b, h.b[discard:])
|
||||||
|
h.b = h.b[:h.windowSize]
|
||||||
|
}
|
||||||
|
|
||||||
// append bytes to history without ever discarding anything.
|
// append bytes to history without ever discarding anything.
|
||||||
func (h *history) appendKeep(b []byte) {
|
func (h *history) appendKeep(b []byte) {
|
||||||
h.b = append(h.b, b...)
|
h.b = append(h.b, b...)
|
||||||
|
|
|
@ -20,6 +20,10 @@ type seq struct {
|
||||||
llCode, mlCode, ofCode uint8
|
llCode, mlCode, ofCode uint8
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type seqVals struct {
|
||||||
|
ll, ml, mo int
|
||||||
|
}
|
||||||
|
|
||||||
func (s seq) String() string {
|
func (s seq) String() string {
|
||||||
if s.offset <= 3 {
|
if s.offset <= 3 {
|
||||||
if s.offset == 0 {
|
if s.offset == 0 {
|
||||||
|
@ -61,16 +65,18 @@ type sequenceDecs struct {
|
||||||
offsets sequenceDec
|
offsets sequenceDec
|
||||||
matchLengths sequenceDec
|
matchLengths sequenceDec
|
||||||
prevOffset [3]int
|
prevOffset [3]int
|
||||||
hist []byte
|
|
||||||
dict []byte
|
dict []byte
|
||||||
literals []byte
|
literals []byte
|
||||||
out []byte
|
out []byte
|
||||||
|
nSeqs int
|
||||||
|
br *bitReader
|
||||||
|
seqSize int
|
||||||
windowSize int
|
windowSize int
|
||||||
maxBits uint8
|
maxBits uint8
|
||||||
}
|
}
|
||||||
|
|
||||||
// initialize all 3 decoders from the stream input.
|
// initialize all 3 decoders from the stream input.
|
||||||
func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error {
|
func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) error {
|
||||||
if err := s.litLengths.init(br); err != nil {
|
if err := s.litLengths.init(br); err != nil {
|
||||||
return errors.New("litLengths:" + err.Error())
|
return errors.New("litLengths:" + err.Error())
|
||||||
}
|
}
|
||||||
|
@ -80,8 +86,7 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []
|
||||||
if err := s.matchLengths.init(br); err != nil {
|
if err := s.matchLengths.init(br); err != nil {
|
||||||
return errors.New("matchLengths:" + err.Error())
|
return errors.New("matchLengths:" + err.Error())
|
||||||
}
|
}
|
||||||
s.literals = literals
|
s.br = br
|
||||||
s.hist = hist.b
|
|
||||||
s.prevOffset = hist.recentOffsets
|
s.prevOffset = hist.recentOffsets
|
||||||
s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits
|
s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits
|
||||||
s.windowSize = hist.windowSize
|
s.windowSize = hist.windowSize
|
||||||
|
@ -94,11 +99,254 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []
|
||||||
}
|
}
|
||||||
|
|
||||||
// decode sequences from the stream with the provided history.
|
// decode sequences from the stream with the provided history.
|
||||||
func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
|
func (s *sequenceDecs) decode(seqs []seqVals) error {
|
||||||
|
br := s.br
|
||||||
|
|
||||||
|
// Grab full sizes tables, to avoid bounds checks.
|
||||||
|
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
|
||||||
|
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
|
||||||
|
s.seqSize = 0
|
||||||
|
litRemain := len(s.literals)
|
||||||
|
|
||||||
|
for i := range seqs {
|
||||||
|
var ll, mo, ml int
|
||||||
|
if br.off > 4+((maxOffsetBits+16+16)>>3) {
|
||||||
|
// inlined function:
|
||||||
|
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
|
||||||
|
|
||||||
|
// Final will not read from stream.
|
||||||
|
var llB, mlB, moB uint8
|
||||||
|
ll, llB = llState.final()
|
||||||
|
ml, mlB = mlState.final()
|
||||||
|
mo, moB = ofState.final()
|
||||||
|
|
||||||
|
// extra bits are stored in reverse order.
|
||||||
|
br.fillFast()
|
||||||
|
mo += br.getBits(moB)
|
||||||
|
if s.maxBits > 32 {
|
||||||
|
br.fillFast()
|
||||||
|
}
|
||||||
|
ml += br.getBits(mlB)
|
||||||
|
ll += br.getBits(llB)
|
||||||
|
|
||||||
|
if moB > 1 {
|
||||||
|
s.prevOffset[2] = s.prevOffset[1]
|
||||||
|
s.prevOffset[1] = s.prevOffset[0]
|
||||||
|
s.prevOffset[0] = mo
|
||||||
|
} else {
|
||||||
|
// mo = s.adjustOffset(mo, ll, moB)
|
||||||
|
// Inlined for rather big speedup
|
||||||
|
if ll == 0 {
|
||||||
|
// There is an exception though, when current sequence's literals_length = 0.
|
||||||
|
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
|
||||||
|
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
|
||||||
|
mo++
|
||||||
|
}
|
||||||
|
|
||||||
|
if mo == 0 {
|
||||||
|
mo = s.prevOffset[0]
|
||||||
|
} else {
|
||||||
|
var temp int
|
||||||
|
if mo == 3 {
|
||||||
|
temp = s.prevOffset[0] - 1
|
||||||
|
} else {
|
||||||
|
temp = s.prevOffset[mo]
|
||||||
|
}
|
||||||
|
|
||||||
|
if temp == 0 {
|
||||||
|
// 0 is not valid; input is corrupted; force offset to 1
|
||||||
|
println("WARNING: temp was 0")
|
||||||
|
temp = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if mo != 1 {
|
||||||
|
s.prevOffset[2] = s.prevOffset[1]
|
||||||
|
}
|
||||||
|
s.prevOffset[1] = s.prevOffset[0]
|
||||||
|
s.prevOffset[0] = temp
|
||||||
|
mo = temp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
br.fillFast()
|
||||||
|
} else {
|
||||||
|
if br.overread() {
|
||||||
|
if debugDecoder {
|
||||||
|
printf("reading sequence %d, exceeded available data\n", i)
|
||||||
|
}
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
ll, mo, ml = s.next(br, llState, mlState, ofState)
|
||||||
|
br.fill()
|
||||||
|
}
|
||||||
|
|
||||||
|
if debugSequences {
|
||||||
|
println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
|
||||||
|
}
|
||||||
|
// Evaluate.
|
||||||
|
// We might be doing this async, so do it early.
|
||||||
|
if mo == 0 && ml > 0 {
|
||||||
|
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
|
||||||
|
}
|
||||||
|
if ml > maxMatchLen {
|
||||||
|
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
|
||||||
|
}
|
||||||
|
s.seqSize += ll + ml
|
||||||
|
if s.seqSize > maxBlockSize {
|
||||||
|
return fmt.Errorf("output (%d) bigger than max block size", s.seqSize)
|
||||||
|
}
|
||||||
|
litRemain -= ll
|
||||||
|
if litRemain < 0 {
|
||||||
|
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
|
||||||
|
}
|
||||||
|
seqs[i] = seqVals{
|
||||||
|
ll: ll,
|
||||||
|
ml: ml,
|
||||||
|
mo: mo,
|
||||||
|
}
|
||||||
|
if i == len(seqs)-1 {
|
||||||
|
// This is the last sequence, so we shouldn't update state.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Manually inlined, ~ 5-20% faster
|
||||||
|
// Update all 3 states at once. Approx 20% faster.
|
||||||
|
nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
|
||||||
|
if nBits == 0 {
|
||||||
|
llState = llTable[llState.newState()&maxTableMask]
|
||||||
|
mlState = mlTable[mlState.newState()&maxTableMask]
|
||||||
|
ofState = ofTable[ofState.newState()&maxTableMask]
|
||||||
|
} else {
|
||||||
|
bits := br.get32BitsFast(nBits)
|
||||||
|
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
|
||||||
|
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
|
||||||
|
|
||||||
|
lowBits = uint16(bits >> (ofState.nbBits() & 31))
|
||||||
|
lowBits &= bitMask[mlState.nbBits()&15]
|
||||||
|
mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
|
||||||
|
|
||||||
|
lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
|
||||||
|
ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.seqSize += litRemain
|
||||||
|
if s.seqSize > maxBlockSize {
|
||||||
|
return fmt.Errorf("output (%d) bigger than max block size", s.seqSize)
|
||||||
|
}
|
||||||
|
err := br.close()
|
||||||
|
if err != nil {
|
||||||
|
printf("Closing sequences: %v, %+v\n", err, *br)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// execute will execute the decoded sequence with the provided history.
|
||||||
|
// The sequence must be evaluated before being sent.
|
||||||
|
func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
|
||||||
|
// Ensure we have enough output size...
|
||||||
|
if len(s.out)+s.seqSize > cap(s.out) {
|
||||||
|
addBytes := s.seqSize + len(s.out)
|
||||||
|
s.out = append(s.out, make([]byte, addBytes)...)
|
||||||
|
s.out = s.out[:len(s.out)-addBytes]
|
||||||
|
}
|
||||||
|
|
||||||
|
if debugDecoder {
|
||||||
|
printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
var t = len(s.out)
|
||||||
|
out := s.out[:t+s.seqSize]
|
||||||
|
|
||||||
|
for _, seq := range seqs {
|
||||||
|
// Add literals
|
||||||
|
copy(out[t:], s.literals[:seq.ll])
|
||||||
|
t += seq.ll
|
||||||
|
s.literals = s.literals[seq.ll:]
|
||||||
|
|
||||||
|
// Copy from dictionary...
|
||||||
|
if seq.mo > t+len(hist) || seq.mo > s.windowSize {
|
||||||
|
if len(s.dict) == 0 {
|
||||||
|
return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
|
||||||
|
}
|
||||||
|
|
||||||
|
// we may be in dictionary.
|
||||||
|
dictO := len(s.dict) - (seq.mo - (t + len(hist)))
|
||||||
|
if dictO < 0 || dictO >= len(s.dict) {
|
||||||
|
return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict))
|
||||||
|
}
|
||||||
|
end := dictO + seq.ml
|
||||||
|
if end > len(s.dict) {
|
||||||
|
n := len(s.dict) - dictO
|
||||||
|
copy(out[t:], s.dict[dictO:])
|
||||||
|
t += n
|
||||||
|
seq.ml -= n
|
||||||
|
} else {
|
||||||
|
copy(out[t:], s.dict[dictO:end])
|
||||||
|
t += end - dictO
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy from history.
|
||||||
|
if v := seq.mo - t; v > 0 {
|
||||||
|
// v is the start position in history from end.
|
||||||
|
start := len(hist) - v
|
||||||
|
if seq.ml > v {
|
||||||
|
// Some goes into current block.
|
||||||
|
// Copy remainder of history
|
||||||
|
copy(out[t:], hist[start:])
|
||||||
|
t += v
|
||||||
|
seq.ml -= v
|
||||||
|
} else {
|
||||||
|
copy(out[t:], hist[start:start+seq.ml])
|
||||||
|
t += seq.ml
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// We must be in current buffer now
|
||||||
|
if seq.ml > 0 {
|
||||||
|
start := t - seq.mo
|
||||||
|
if seq.ml <= t-start {
|
||||||
|
// No overlap
|
||||||
|
copy(out[t:], out[start:start+seq.ml])
|
||||||
|
t += seq.ml
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
// Overlapping copy
|
||||||
|
// Extend destination slice and copy one byte at the time.
|
||||||
|
src := out[start : start+seq.ml]
|
||||||
|
dst := out[t:]
|
||||||
|
dst = dst[:len(src)]
|
||||||
|
t += len(src)
|
||||||
|
// Destination is the space we just added.
|
||||||
|
for i := range src {
|
||||||
|
dst[i] = src[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Add final literals
|
||||||
|
copy(out[t:], s.literals)
|
||||||
|
if debugDecoder {
|
||||||
|
t += len(s.literals)
|
||||||
|
if t != len(out) {
|
||||||
|
panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.out = out
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode sequences from the stream with the provided history.
|
||||||
|
func (s *sequenceDecs) decodeSync(history *history) error {
|
||||||
|
br := s.br
|
||||||
|
seqs := s.nSeqs
|
||||||
startSize := len(s.out)
|
startSize := len(s.out)
|
||||||
// Grab full sizes tables, to avoid bounds checks.
|
// Grab full sizes tables, to avoid bounds checks.
|
||||||
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
|
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
|
||||||
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
|
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
|
||||||
|
hist := history.b[history.ignoreBuffer:]
|
||||||
|
out := s.out
|
||||||
|
|
||||||
for i := seqs - 1; i >= 0; i-- {
|
for i := seqs - 1; i >= 0; i-- {
|
||||||
if br.overread() {
|
if br.overread() {
|
||||||
|
@ -151,7 +399,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
|
||||||
|
|
||||||
if temp == 0 {
|
if temp == 0 {
|
||||||
// 0 is not valid; input is corrupted; force offset to 1
|
// 0 is not valid; input is corrupted; force offset to 1
|
||||||
println("temp was 0")
|
println("WARNING: temp was 0")
|
||||||
temp = 1
|
temp = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -176,51 +424,49 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
|
||||||
if ll > len(s.literals) {
|
if ll > len(s.literals) {
|
||||||
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals))
|
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals))
|
||||||
}
|
}
|
||||||
size := ll + ml + len(s.out)
|
size := ll + ml + len(out)
|
||||||
if size-startSize > maxBlockSize {
|
if size-startSize > maxBlockSize {
|
||||||
return fmt.Errorf("output (%d) bigger than max block size", size)
|
return fmt.Errorf("output (%d) bigger than max block size", size)
|
||||||
}
|
}
|
||||||
if size > cap(s.out) {
|
if size > cap(out) {
|
||||||
// Not enough size, which can happen under high volume block streaming conditions
|
// Not enough size, which can happen under high volume block streaming conditions
|
||||||
// but could be if destination slice is too small for sync operations.
|
// but could be if destination slice is too small for sync operations.
|
||||||
// over-allocating here can create a large amount of GC pressure so we try to keep
|
// over-allocating here can create a large amount of GC pressure so we try to keep
|
||||||
// it as contained as possible
|
// it as contained as possible
|
||||||
used := len(s.out) - startSize
|
used := len(out) - startSize
|
||||||
addBytes := 256 + ll + ml + used>>2
|
addBytes := 256 + ll + ml + used>>2
|
||||||
// Clamp to max block size.
|
// Clamp to max block size.
|
||||||
if used+addBytes > maxBlockSize {
|
if used+addBytes > maxBlockSize {
|
||||||
addBytes = maxBlockSize - used
|
addBytes = maxBlockSize - used
|
||||||
}
|
}
|
||||||
s.out = append(s.out, make([]byte, addBytes)...)
|
out = append(out, make([]byte, addBytes)...)
|
||||||
s.out = s.out[:len(s.out)-addBytes]
|
out = out[:len(out)-addBytes]
|
||||||
}
|
}
|
||||||
if ml > maxMatchLen {
|
if ml > maxMatchLen {
|
||||||
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
|
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add literals
|
// Add literals
|
||||||
s.out = append(s.out, s.literals[:ll]...)
|
out = append(out, s.literals[:ll]...)
|
||||||
s.literals = s.literals[ll:]
|
s.literals = s.literals[ll:]
|
||||||
out := s.out
|
|
||||||
|
|
||||||
if mo == 0 && ml > 0 {
|
if mo == 0 && ml > 0 {
|
||||||
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
|
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
|
||||||
}
|
}
|
||||||
|
|
||||||
if mo > len(s.out)+len(hist) || mo > s.windowSize {
|
if mo > len(out)+len(hist) || mo > s.windowSize {
|
||||||
if len(s.dict) == 0 {
|
if len(s.dict) == 0 {
|
||||||
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
|
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
|
||||||
}
|
}
|
||||||
|
|
||||||
// we may be in dictionary.
|
// we may be in dictionary.
|
||||||
dictO := len(s.dict) - (mo - (len(s.out) + len(hist)))
|
dictO := len(s.dict) - (mo - (len(out) + len(hist)))
|
||||||
if dictO < 0 || dictO >= len(s.dict) {
|
if dictO < 0 || dictO >= len(s.dict) {
|
||||||
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
|
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
|
||||||
}
|
}
|
||||||
end := dictO + ml
|
end := dictO + ml
|
||||||
if end > len(s.dict) {
|
if end > len(s.dict) {
|
||||||
out = append(out, s.dict[dictO:]...)
|
out = append(out, s.dict[dictO:]...)
|
||||||
mo -= len(s.dict) - dictO
|
|
||||||
ml -= len(s.dict) - dictO
|
ml -= len(s.dict) - dictO
|
||||||
} else {
|
} else {
|
||||||
out = append(out, s.dict[dictO:end]...)
|
out = append(out, s.dict[dictO:end]...)
|
||||||
|
@ -231,26 +477,25 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
|
||||||
|
|
||||||
// Copy from history.
|
// Copy from history.
|
||||||
// TODO: Blocks without history could be made to ignore this completely.
|
// TODO: Blocks without history could be made to ignore this completely.
|
||||||
if v := mo - len(s.out); v > 0 {
|
if v := mo - len(out); v > 0 {
|
||||||
// v is the start position in history from end.
|
// v is the start position in history from end.
|
||||||
start := len(s.hist) - v
|
start := len(hist) - v
|
||||||
if ml > v {
|
if ml > v {
|
||||||
// Some goes into current block.
|
// Some goes into current block.
|
||||||
// Copy remainder of history
|
// Copy remainder of history
|
||||||
out = append(out, s.hist[start:]...)
|
out = append(out, hist[start:]...)
|
||||||
mo -= v
|
|
||||||
ml -= v
|
ml -= v
|
||||||
} else {
|
} else {
|
||||||
out = append(out, s.hist[start:start+ml]...)
|
out = append(out, hist[start:start+ml]...)
|
||||||
ml = 0
|
ml = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// We must be in current buffer now
|
// We must be in current buffer now
|
||||||
if ml > 0 {
|
if ml > 0 {
|
||||||
start := len(s.out) - mo
|
start := len(out) - mo
|
||||||
if ml <= len(s.out)-start {
|
if ml <= len(out)-start {
|
||||||
// No overlap
|
// No overlap
|
||||||
out = append(out, s.out[start:start+ml]...)
|
out = append(out, out[start:start+ml]...)
|
||||||
} else {
|
} else {
|
||||||
// Overlapping copy
|
// Overlapping copy
|
||||||
// Extend destination slice and copy one byte at the time.
|
// Extend destination slice and copy one byte at the time.
|
||||||
|
@ -264,7 +509,6 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.out = out
|
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
// This is the last sequence, so we shouldn't update state.
|
// This is the last sequence, so we shouldn't update state.
|
||||||
break
|
break
|
||||||
|
@ -292,8 +536,8 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add final literals
|
// Add final literals
|
||||||
s.out = append(s.out, s.literals...)
|
s.out = append(out, s.literals...)
|
||||||
return nil
|
return br.close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// update states, at least 27 bits must be available.
|
// update states, at least 27 bits must be available.
|
||||||
|
@ -457,36 +701,3 @@ func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int {
|
||||||
s.prevOffset[0] = temp
|
s.prevOffset[0] = temp
|
||||||
return temp
|
return temp
|
||||||
}
|
}
|
||||||
|
|
||||||
// mergeHistory will merge history.
|
|
||||||
func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) {
|
|
||||||
for i := uint(0); i < 3; i++ {
|
|
||||||
var sNew, sHist *sequenceDec
|
|
||||||
switch i {
|
|
||||||
default:
|
|
||||||
// same as "case 0":
|
|
||||||
sNew = &s.litLengths
|
|
||||||
sHist = &hist.litLengths
|
|
||||||
case 1:
|
|
||||||
sNew = &s.offsets
|
|
||||||
sHist = &hist.offsets
|
|
||||||
case 2:
|
|
||||||
sNew = &s.matchLengths
|
|
||||||
sHist = &hist.matchLengths
|
|
||||||
}
|
|
||||||
if sNew.repeat {
|
|
||||||
if sHist.fse == nil {
|
|
||||||
return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if sNew.fse == nil {
|
|
||||||
return nil, fmt.Errorf("sequence stream %d, no fse found", i)
|
|
||||||
}
|
|
||||||
if sHist.fse != nil && !sHist.fse.preDefined {
|
|
||||||
fseDecoderPool.Put(sHist.fse)
|
|
||||||
}
|
|
||||||
sHist.fse = sNew.fse
|
|
||||||
}
|
|
||||||
return hist, nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -75,6 +75,10 @@ var (
|
||||||
// This is only returned if SingleSegment is specified on the frame.
|
// This is only returned if SingleSegment is specified on the frame.
|
||||||
ErrFrameSizeExceeded = errors.New("frame size exceeded")
|
ErrFrameSizeExceeded = errors.New("frame size exceeded")
|
||||||
|
|
||||||
|
// ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size.
|
||||||
|
// This is only returned if SingleSegment is specified on the frame.
|
||||||
|
ErrFrameSizeMismatch = errors.New("frame size does not match size on stream")
|
||||||
|
|
||||||
// ErrCRCMismatch is returned if CRC mismatches.
|
// ErrCRCMismatch is returned if CRC mismatches.
|
||||||
ErrCRCMismatch = errors.New("CRC check failed")
|
ErrCRCMismatch = errors.New("CRC check failed")
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
|
||||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||||
|
|
||||||
package user
|
package user
|
||||||
|
|
|
@ -120,7 +120,7 @@ func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error)
|
||||||
|
|
||||||
func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
|
func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
|
||||||
if r == nil {
|
if r == nil {
|
||||||
return nil, fmt.Errorf("nil source for passwd-formatted data")
|
return nil, errors.New("nil source for passwd-formatted data")
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -178,7 +178,7 @@ func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error)
|
||||||
|
|
||||||
func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
|
func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
|
||||||
if r == nil {
|
if r == nil {
|
||||||
return nil, fmt.Errorf("nil source for group-formatted data")
|
return nil, errors.New("nil source for group-formatted data")
|
||||||
}
|
}
|
||||||
rd := bufio.NewReader(r)
|
rd := bufio.NewReader(r)
|
||||||
out := []Group{}
|
out := []Group{}
|
||||||
|
@ -339,7 +339,7 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (
|
||||||
if userArg == "" {
|
if userArg == "" {
|
||||||
userArg = strconv.Itoa(user.Uid)
|
userArg = strconv.Itoa(user.Uid)
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("unable to find user %s: %v", userArg, err)
|
return nil, fmt.Errorf("unable to find user %s: %w", userArg, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var matchedUserName string
|
var matchedUserName string
|
||||||
|
@ -355,7 +355,7 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (
|
||||||
|
|
||||||
if uidErr != nil {
|
if uidErr != nil {
|
||||||
// Not numeric.
|
// Not numeric.
|
||||||
return nil, fmt.Errorf("unable to find user %s: %v", userArg, ErrNoPasswdEntries)
|
return nil, fmt.Errorf("unable to find user %s: %w", userArg, ErrNoPasswdEntries)
|
||||||
}
|
}
|
||||||
user.Uid = uidArg
|
user.Uid = uidArg
|
||||||
|
|
||||||
|
@ -390,7 +390,7 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (
|
||||||
return g.Name == groupArg
|
return g.Name == groupArg
|
||||||
})
|
})
|
||||||
if err != nil && group != nil {
|
if err != nil && group != nil {
|
||||||
return nil, fmt.Errorf("unable to find groups for spec %v: %v", matchedUserName, err)
|
return nil, fmt.Errorf("unable to find groups for spec %v: %w", matchedUserName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only start modifying user.Gid if it is in explicit form.
|
// Only start modifying user.Gid if it is in explicit form.
|
||||||
|
@ -404,7 +404,7 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (
|
||||||
|
|
||||||
if gidErr != nil {
|
if gidErr != nil {
|
||||||
// Not numeric.
|
// Not numeric.
|
||||||
return nil, fmt.Errorf("unable to find group %s: %v", groupArg, ErrNoGroupEntries)
|
return nil, fmt.Errorf("unable to find group %s: %w", groupArg, ErrNoGroupEntries)
|
||||||
}
|
}
|
||||||
user.Gid = gidArg
|
user.Gid = gidArg
|
||||||
|
|
||||||
|
@ -445,7 +445,7 @@ func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, err
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err)
|
return nil, fmt.Errorf("Unable to find additional groups %v: %w", additionalGroups, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -468,7 +468,8 @@ func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, err
|
||||||
if !found {
|
if !found {
|
||||||
gid, err := strconv.ParseInt(ag, 10, 64)
|
gid, err := strconv.ParseInt(ag, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Unable to find group %s", ag)
|
// Not a numeric ID either.
|
||||||
|
return nil, fmt.Errorf("Unable to find group %s: %w", ag, ErrNoGroupEntries)
|
||||||
}
|
}
|
||||||
// Ensure gid is inside gid range.
|
// Ensure gid is inside gid range.
|
||||||
if gid < minID || gid > maxID {
|
if gid < minID || gid > maxID {
|
||||||
|
@ -521,7 +522,7 @@ func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error)
|
||||||
|
|
||||||
func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) {
|
func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) {
|
||||||
if r == nil {
|
if r == nil {
|
||||||
return nil, fmt.Errorf("nil source for subid-formatted data")
|
return nil, errors.New("nil source for subid-formatted data")
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -574,7 +575,7 @@ func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error)
|
||||||
|
|
||||||
func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) {
|
func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) {
|
||||||
if r == nil {
|
if r == nil {
|
||||||
return nil, fmt.Errorf("nil source for idmap-formatted data")
|
return nil, errors.New("nil source for idmap-formatted data")
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build gofuzz
|
||||||
// +build gofuzz
|
// +build gofuzz
|
||||||
|
|
||||||
package user
|
package user
|
||||||
|
|
|
@ -0,0 +1,170 @@
|
||||||
|
// Copyright 2019 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package raftpb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/gogo/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConfChangeI abstracts over ConfChangeV2 and (legacy) ConfChange to allow
|
||||||
|
// treating them in a unified manner.
|
||||||
|
type ConfChangeI interface {
|
||||||
|
AsV2() ConfChangeV2
|
||||||
|
AsV1() (ConfChange, bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalConfChange calls Marshal on the underlying ConfChange or ConfChangeV2
|
||||||
|
// and returns the result along with the corresponding EntryType.
|
||||||
|
func MarshalConfChange(c ConfChangeI) (EntryType, []byte, error) {
|
||||||
|
var typ EntryType
|
||||||
|
var ccdata []byte
|
||||||
|
var err error
|
||||||
|
if ccv1, ok := c.AsV1(); ok {
|
||||||
|
typ = EntryConfChange
|
||||||
|
ccdata, err = ccv1.Marshal()
|
||||||
|
} else {
|
||||||
|
ccv2 := c.AsV2()
|
||||||
|
typ = EntryConfChangeV2
|
||||||
|
ccdata, err = ccv2.Marshal()
|
||||||
|
}
|
||||||
|
return typ, ccdata, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsV2 returns a V2 configuration change carrying out the same operation.
|
||||||
|
func (c ConfChange) AsV2() ConfChangeV2 {
|
||||||
|
return ConfChangeV2{
|
||||||
|
Changes: []ConfChangeSingle{{
|
||||||
|
Type: c.Type,
|
||||||
|
NodeID: c.NodeID,
|
||||||
|
}},
|
||||||
|
Context: c.Context,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsV1 returns the ConfChange and true.
|
||||||
|
func (c ConfChange) AsV1() (ConfChange, bool) {
|
||||||
|
return c, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsV2 is the identity.
|
||||||
|
func (c ConfChangeV2) AsV2() ConfChangeV2 { return c }
|
||||||
|
|
||||||
|
// AsV1 returns ConfChange{} and false.
|
||||||
|
func (c ConfChangeV2) AsV1() (ConfChange, bool) { return ConfChange{}, false }
|
||||||
|
|
||||||
|
// EnterJoint returns two bools. The second bool is true if and only if this
|
||||||
|
// config change will use Joint Consensus, which is the case if it contains more
|
||||||
|
// than one change or if the use of Joint Consensus was requested explicitly.
|
||||||
|
// The first bool can only be true if second one is, and indicates whether the
|
||||||
|
// Joint State will be left automatically.
|
||||||
|
func (c ConfChangeV2) EnterJoint() (autoLeave bool, ok bool) {
|
||||||
|
// NB: in theory, more config changes could qualify for the "simple"
|
||||||
|
// protocol but it depends on the config on top of which the changes apply.
|
||||||
|
// For example, adding two learners is not OK if both nodes are part of the
|
||||||
|
// base config (i.e. two voters are turned into learners in the process of
|
||||||
|
// applying the conf change). In practice, these distinctions should not
|
||||||
|
// matter, so we keep it simple and use Joint Consensus liberally.
|
||||||
|
if c.Transition != ConfChangeTransitionAuto || len(c.Changes) > 1 {
|
||||||
|
// Use Joint Consensus.
|
||||||
|
var autoLeave bool
|
||||||
|
switch c.Transition {
|
||||||
|
case ConfChangeTransitionAuto:
|
||||||
|
autoLeave = true
|
||||||
|
case ConfChangeTransitionJointImplicit:
|
||||||
|
autoLeave = true
|
||||||
|
case ConfChangeTransitionJointExplicit:
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unknown transition: %+v", c))
|
||||||
|
}
|
||||||
|
return autoLeave, true
|
||||||
|
}
|
||||||
|
return false, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeaveJoint is true if the configuration change leaves a joint configuration.
|
||||||
|
// This is the case if the ConfChangeV2 is zero, with the possible exception of
|
||||||
|
// the Context field.
|
||||||
|
func (c ConfChangeV2) LeaveJoint() bool {
|
||||||
|
// NB: c is already a copy.
|
||||||
|
c.Context = nil
|
||||||
|
return proto.Equal(&c, &ConfChangeV2{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfChangesFromString parses a Space-delimited sequence of operations into a
|
||||||
|
// slice of ConfChangeSingle. The supported operations are:
|
||||||
|
// - vn: make n a voter,
|
||||||
|
// - ln: make n a learner,
|
||||||
|
// - rn: remove n, and
|
||||||
|
// - un: update n.
|
||||||
|
func ConfChangesFromString(s string) ([]ConfChangeSingle, error) {
|
||||||
|
var ccs []ConfChangeSingle
|
||||||
|
toks := strings.Split(strings.TrimSpace(s), " ")
|
||||||
|
if toks[0] == "" {
|
||||||
|
toks = nil
|
||||||
|
}
|
||||||
|
for _, tok := range toks {
|
||||||
|
if len(tok) < 2 {
|
||||||
|
return nil, fmt.Errorf("unknown token %s", tok)
|
||||||
|
}
|
||||||
|
var cc ConfChangeSingle
|
||||||
|
switch tok[0] {
|
||||||
|
case 'v':
|
||||||
|
cc.Type = ConfChangeAddNode
|
||||||
|
case 'l':
|
||||||
|
cc.Type = ConfChangeAddLearnerNode
|
||||||
|
case 'r':
|
||||||
|
cc.Type = ConfChangeRemoveNode
|
||||||
|
case 'u':
|
||||||
|
cc.Type = ConfChangeUpdateNode
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown input: %s", tok)
|
||||||
|
}
|
||||||
|
id, err := strconv.ParseUint(tok[1:], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cc.NodeID = id
|
||||||
|
ccs = append(ccs, cc)
|
||||||
|
}
|
||||||
|
return ccs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfChangesToString is the inverse to ConfChangesFromString.
|
||||||
|
func ConfChangesToString(ccs []ConfChangeSingle) string {
|
||||||
|
var buf strings.Builder
|
||||||
|
for i, cc := range ccs {
|
||||||
|
if i > 0 {
|
||||||
|
buf.WriteByte(' ')
|
||||||
|
}
|
||||||
|
switch cc.Type {
|
||||||
|
case ConfChangeAddNode:
|
||||||
|
buf.WriteByte('v')
|
||||||
|
case ConfChangeAddLearnerNode:
|
||||||
|
buf.WriteByte('l')
|
||||||
|
case ConfChangeRemoveNode:
|
||||||
|
buf.WriteByte('r')
|
||||||
|
case ConfChangeUpdateNode:
|
||||||
|
buf.WriteByte('u')
|
||||||
|
default:
|
||||||
|
buf.WriteString("unknown")
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&buf, "%d", cc.NodeID)
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
|
@ -0,0 +1,44 @@
|
||||||
|
// Copyright 2019 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package raftpb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Equivalent returns a nil error if the inputs describe the same configuration.
|
||||||
|
// On mismatch, returns a descriptive error showing the differences.
|
||||||
|
func (cs ConfState) Equivalent(cs2 ConfState) error {
|
||||||
|
cs1 := cs
|
||||||
|
orig1, orig2 := cs1, cs2
|
||||||
|
s := func(sl *[]uint64) {
|
||||||
|
*sl = append([]uint64(nil), *sl...)
|
||||||
|
sort.Slice(*sl, func(i, j int) bool { return (*sl)[i] < (*sl)[j] })
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, cs := range []*ConfState{&cs1, &cs2} {
|
||||||
|
s(&cs.Voters)
|
||||||
|
s(&cs.Learners)
|
||||||
|
s(&cs.VotersOutgoing)
|
||||||
|
s(&cs.LearnersNext)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(cs1, cs2) {
|
||||||
|
return fmt.Errorf("ConfStates not equivalent after sorting:\n%+#v\n%+#v\nInputs were:\n%+#v\n%+#v", cs1, cs2, orig1, orig2)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,187 @@
|
||||||
|
syntax = "proto2";
|
||||||
|
package raftpb;
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
option (gogoproto.marshaler_all) = true;
|
||||||
|
option (gogoproto.sizer_all) = true;
|
||||||
|
option (gogoproto.unmarshaler_all) = true;
|
||||||
|
option (gogoproto.goproto_getters_all) = false;
|
||||||
|
option (gogoproto.goproto_enum_prefix_all) = false;
|
||||||
|
option (gogoproto.goproto_unkeyed_all) = false;
|
||||||
|
option (gogoproto.goproto_unrecognized_all) = false;
|
||||||
|
option (gogoproto.goproto_sizecache_all) = false;
|
||||||
|
|
||||||
|
enum EntryType {
|
||||||
|
EntryNormal = 0;
|
||||||
|
EntryConfChange = 1; // corresponds to pb.ConfChange
|
||||||
|
EntryConfChangeV2 = 2; // corresponds to pb.ConfChangeV2
|
||||||
|
}
|
||||||
|
|
||||||
|
message Entry {
|
||||||
|
optional uint64 Term = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
|
||||||
|
optional uint64 Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
|
||||||
|
optional EntryType Type = 1 [(gogoproto.nullable) = false];
|
||||||
|
optional bytes Data = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SnapshotMetadata {
|
||||||
|
optional ConfState conf_state = 1 [(gogoproto.nullable) = false];
|
||||||
|
optional uint64 index = 2 [(gogoproto.nullable) = false];
|
||||||
|
optional uint64 term = 3 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message Snapshot {
|
||||||
|
optional bytes data = 1;
|
||||||
|
optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
// For description of different message types, see:
|
||||||
|
// https://pkg.go.dev/go.etcd.io/etcd/raft/v3#hdr-MessageType
|
||||||
|
enum MessageType {
|
||||||
|
MsgHup = 0;
|
||||||
|
MsgBeat = 1;
|
||||||
|
MsgProp = 2;
|
||||||
|
MsgApp = 3;
|
||||||
|
MsgAppResp = 4;
|
||||||
|
MsgVote = 5;
|
||||||
|
MsgVoteResp = 6;
|
||||||
|
MsgSnap = 7;
|
||||||
|
MsgHeartbeat = 8;
|
||||||
|
MsgHeartbeatResp = 9;
|
||||||
|
MsgUnreachable = 10;
|
||||||
|
MsgSnapStatus = 11;
|
||||||
|
MsgCheckQuorum = 12;
|
||||||
|
MsgTransferLeader = 13;
|
||||||
|
MsgTimeoutNow = 14;
|
||||||
|
MsgReadIndex = 15;
|
||||||
|
MsgReadIndexResp = 16;
|
||||||
|
MsgPreVote = 17;
|
||||||
|
MsgPreVoteResp = 18;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Message {
|
||||||
|
optional MessageType type = 1 [(gogoproto.nullable) = false];
|
||||||
|
optional uint64 to = 2 [(gogoproto.nullable) = false];
|
||||||
|
optional uint64 from = 3 [(gogoproto.nullable) = false];
|
||||||
|
optional uint64 term = 4 [(gogoproto.nullable) = false];
|
||||||
|
// logTerm is generally used for appending Raft logs to followers. For example,
|
||||||
|
// (type=MsgApp,index=100,logTerm=5) means leader appends entries starting at
|
||||||
|
// index=101, and the term of entry at index 100 is 5.
|
||||||
|
// (type=MsgAppResp,reject=true,index=100,logTerm=5) means follower rejects some
|
||||||
|
// entries from its leader as it already has an entry with term 5 at index 100.
|
||||||
|
optional uint64 logTerm = 5 [(gogoproto.nullable) = false];
|
||||||
|
optional uint64 index = 6 [(gogoproto.nullable) = false];
|
||||||
|
repeated Entry entries = 7 [(gogoproto.nullable) = false];
|
||||||
|
optional uint64 commit = 8 [(gogoproto.nullable) = false];
|
||||||
|
optional Snapshot snapshot = 9 [(gogoproto.nullable) = false];
|
||||||
|
optional bool reject = 10 [(gogoproto.nullable) = false];
|
||||||
|
optional uint64 rejectHint = 11 [(gogoproto.nullable) = false];
|
||||||
|
optional bytes context = 12;
|
||||||
|
}
|
||||||
|
|
||||||
|
message HardState {
|
||||||
|
optional uint64 term = 1 [(gogoproto.nullable) = false];
|
||||||
|
optional uint64 vote = 2 [(gogoproto.nullable) = false];
|
||||||
|
optional uint64 commit = 3 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfChangeTransition specifies the behavior of a configuration change with
|
||||||
|
// respect to joint consensus.
|
||||||
|
enum ConfChangeTransition {
|
||||||
|
// Automatically use the simple protocol if possible, otherwise fall back
|
||||||
|
// to ConfChangeJointImplicit. Most applications will want to use this.
|
||||||
|
ConfChangeTransitionAuto = 0;
|
||||||
|
// Use joint consensus unconditionally, and transition out of them
|
||||||
|
// automatically (by proposing a zero configuration change).
|
||||||
|
//
|
||||||
|
// This option is suitable for applications that want to minimize the time
|
||||||
|
// spent in the joint configuration and do not store the joint configuration
|
||||||
|
// in the state machine (outside of InitialState).
|
||||||
|
ConfChangeTransitionJointImplicit = 1;
|
||||||
|
// Use joint consensus and remain in the joint configuration until the
|
||||||
|
// application proposes a no-op configuration change. This is suitable for
|
||||||
|
// applications that want to explicitly control the transitions, for example
|
||||||
|
// to use a custom payload (via the Context field).
|
||||||
|
ConfChangeTransitionJointExplicit = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ConfState {
|
||||||
|
// The voters in the incoming config. (If the configuration is not joint,
|
||||||
|
// then the outgoing config is empty).
|
||||||
|
repeated uint64 voters = 1;
|
||||||
|
// The learners in the incoming config.
|
||||||
|
repeated uint64 learners = 2;
|
||||||
|
// The voters in the outgoing config.
|
||||||
|
repeated uint64 voters_outgoing = 3;
|
||||||
|
// The nodes that will become learners when the outgoing config is removed.
|
||||||
|
// These nodes are necessarily currently in nodes_joint (or they would have
|
||||||
|
// been added to the incoming config right away).
|
||||||
|
repeated uint64 learners_next = 4;
|
||||||
|
// If set, the config is joint and Raft will automatically transition into
|
||||||
|
// the final config (i.e. remove the outgoing config) when this is safe.
|
||||||
|
optional bool auto_leave = 5 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
enum ConfChangeType {
|
||||||
|
ConfChangeAddNode = 0;
|
||||||
|
ConfChangeRemoveNode = 1;
|
||||||
|
ConfChangeUpdateNode = 2;
|
||||||
|
ConfChangeAddLearnerNode = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ConfChange {
|
||||||
|
optional ConfChangeType type = 2 [(gogoproto.nullable) = false];
|
||||||
|
optional uint64 node_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID" ];
|
||||||
|
optional bytes context = 4;
|
||||||
|
|
||||||
|
// NB: this is used only by etcd to thread through a unique identifier.
|
||||||
|
// Ideally it should really use the Context instead. No counterpart to
|
||||||
|
// this field exists in ConfChangeV2.
|
||||||
|
optional uint64 id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID" ];
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfChangeSingle is an individual configuration change operation. Multiple
|
||||||
|
// such operations can be carried out atomically via a ConfChangeV2.
|
||||||
|
message ConfChangeSingle {
|
||||||
|
optional ConfChangeType type = 1 [(gogoproto.nullable) = false];
|
||||||
|
optional uint64 node_id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID"];
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfChangeV2 messages initiate configuration changes. They support both the
|
||||||
|
// simple "one at a time" membership change protocol and full Joint Consensus
|
||||||
|
// allowing for arbitrary changes in membership.
|
||||||
|
//
|
||||||
|
// The supplied context is treated as an opaque payload and can be used to
|
||||||
|
// attach an action on the state machine to the application of the config change
|
||||||
|
// proposal. Note that contrary to Joint Consensus as outlined in the Raft
|
||||||
|
// paper[1], configuration changes become active when they are *applied* to the
|
||||||
|
// state machine (not when they are appended to the log).
|
||||||
|
//
|
||||||
|
// The simple protocol can be used whenever only a single change is made.
|
||||||
|
//
|
||||||
|
// Non-simple changes require the use of Joint Consensus, for which two
|
||||||
|
// configuration changes are run. The first configuration change specifies the
|
||||||
|
// desired changes and transitions the Raft group into the joint configuration,
|
||||||
|
// in which quorum requires a majority of both the pre-changes and post-changes
|
||||||
|
// configuration. Joint Consensus avoids entering fragile intermediate
|
||||||
|
// configurations that could compromise survivability. For example, without the
|
||||||
|
// use of Joint Consensus and running across three availability zones with a
|
||||||
|
// replication factor of three, it is not possible to replace a voter without
|
||||||
|
// entering an intermediate configuration that does not survive the outage of
|
||||||
|
// one availability zone.
|
||||||
|
//
|
||||||
|
// The provided ConfChangeTransition specifies how (and whether) Joint Consensus
|
||||||
|
// is used, and assigns the task of leaving the joint configuration either to
|
||||||
|
// Raft or the application. Leaving the joint configuration is accomplished by
|
||||||
|
// proposing a ConfChangeV2 with only and optionally the Context field
|
||||||
|
// populated.
|
||||||
|
//
|
||||||
|
// For details on Raft membership changes, see:
|
||||||
|
//
|
||||||
|
// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf
|
||||||
|
message ConfChangeV2 {
|
||||||
|
optional ConfChangeTransition transition = 1 [(gogoproto.nullable) = false];
|
||||||
|
repeated ConfChangeSingle changes = 2 [(gogoproto.nullable) = false];
|
||||||
|
optional bytes context = 3;
|
||||||
|
}
|
|
@ -137,11 +137,13 @@ func trimOWS(x string) string {
|
||||||
// contains token amongst its comma-separated tokens, ASCII
|
// contains token amongst its comma-separated tokens, ASCII
|
||||||
// case-insensitively.
|
// case-insensitively.
|
||||||
func headerValueContainsToken(v string, token string) bool {
|
func headerValueContainsToken(v string, token string) bool {
|
||||||
v = trimOWS(v)
|
for comma := strings.IndexByte(v, ','); comma != -1; comma = strings.IndexByte(v, ',') {
|
||||||
if comma := strings.IndexByte(v, ','); comma != -1 {
|
if tokenEqual(trimOWS(v[:comma]), token) {
|
||||||
return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token)
|
return true
|
||||||
|
}
|
||||||
|
v = v[comma+1:]
|
||||||
}
|
}
|
||||||
return tokenEqual(v, token)
|
return tokenEqual(trimOWS(v), token)
|
||||||
}
|
}
|
||||||
|
|
||||||
// lowerASCII returns the ASCII lowercase version of b.
|
// lowerASCII returns the ASCII lowercase version of b.
|
||||||
|
|
|
@ -38,7 +38,7 @@ RUN make
|
||||||
RUN make install
|
RUN make install
|
||||||
|
|
||||||
WORKDIR /root
|
WORKDIR /root
|
||||||
RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz
|
RUN wget https://curl.se/download/curl-7.45.0.tar.gz
|
||||||
RUN tar -zxvf curl-7.45.0.tar.gz
|
RUN tar -zxvf curl-7.45.0.tar.gz
|
||||||
WORKDIR /root/curl-7.45.0
|
WORKDIR /root/curl-7.45.0
|
||||||
RUN ./configure --with-ssl --with-nghttp2=/usr/local
|
RUN ./configure --with-ssl --with-nghttp2=/usr/local
|
||||||
|
|
|
@ -1,20 +0,0 @@
|
||||||
This is a work-in-progress HTTP/2 implementation for Go.
|
|
||||||
|
|
||||||
It will eventually live in the Go standard library and won't require
|
|
||||||
any changes to your code to use. It will just be automatic.
|
|
||||||
|
|
||||||
Status:
|
|
||||||
|
|
||||||
* The server support is pretty good. A few things are missing
|
|
||||||
but are being worked on.
|
|
||||||
* The client work has just started but shares a lot of code
|
|
||||||
is coming along much quicker.
|
|
||||||
|
|
||||||
Docs are at https://godoc.org/golang.org/x/net/http2
|
|
||||||
|
|
||||||
Demo test server at https://http2.golang.org/
|
|
||||||
|
|
||||||
Help & bug reports welcome!
|
|
||||||
|
|
||||||
Contributing: https://golang.org/doc/contribute.html
|
|
||||||
Bugs: https://golang.org/issue/new?title=x/net/http2:+
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
// Copyright 2021 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import "strings"
|
||||||
|
|
||||||
|
// The HTTP protocols are defined in terms of ASCII, not Unicode. This file
|
||||||
|
// contains helper functions which may use Unicode-aware functions which would
|
||||||
|
// otherwise be unsafe and could introduce vulnerabilities if used improperly.
|
||||||
|
|
||||||
|
// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
|
||||||
|
// are equal, ASCII-case-insensitively.
|
||||||
|
func asciiEqualFold(s, t string) bool {
|
||||||
|
if len(s) != len(t) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
if lower(s[i]) != lower(t[i]) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// lower returns the ASCII lowercase version of b.
|
||||||
|
func lower(b byte) byte {
|
||||||
|
if 'A' <= b && b <= 'Z' {
|
||||||
|
return b + ('a' - 'A')
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// isASCIIPrint returns whether s is ASCII and printable according to
|
||||||
|
// https://tools.ietf.org/html/rfc20#section-4.2.
|
||||||
|
func isASCIIPrint(s string) bool {
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
if s[i] < ' ' || s[i] > '~' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// asciiToLower returns the lowercase version of s if s is ASCII and printable,
|
||||||
|
// and whether or not it was.
|
||||||
|
func asciiToLower(s string) (lower string, ok bool) {
|
||||||
|
if !isASCIIPrint(s) {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
return strings.ToLower(s), true
|
||||||
|
}
|
|
@ -7,13 +7,21 @@
|
||||||
package http2
|
package http2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
"errors"
|
||||||
"net/http"
|
"net/http"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ClientConnPool manages a pool of HTTP/2 client connections.
|
// ClientConnPool manages a pool of HTTP/2 client connections.
|
||||||
type ClientConnPool interface {
|
type ClientConnPool interface {
|
||||||
|
// GetClientConn returns a specific HTTP/2 connection (usually
|
||||||
|
// a TLS-TCP connection) to an HTTP/2 server. On success, the
|
||||||
|
// returned ClientConn accounts for the upcoming RoundTrip
|
||||||
|
// call, so the caller should not omit it. If the caller needs
|
||||||
|
// to, ClientConn.RoundTrip can be called with a bogus
|
||||||
|
// new(http.Request) to release the stream reservation.
|
||||||
GetClientConn(req *http.Request, addr string) (*ClientConn, error)
|
GetClientConn(req *http.Request, addr string) (*ClientConn, error)
|
||||||
MarkDead(*ClientConn)
|
MarkDead(*ClientConn)
|
||||||
}
|
}
|
||||||
|
@ -40,7 +48,7 @@ type clientConnPool struct {
|
||||||
conns map[string][]*ClientConn // key is host:port
|
conns map[string][]*ClientConn // key is host:port
|
||||||
dialing map[string]*dialCall // currently in-flight dials
|
dialing map[string]*dialCall // currently in-flight dials
|
||||||
keys map[*ClientConn][]string
|
keys map[*ClientConn][]string
|
||||||
addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls
|
addConnCalls map[string]*addConnCall // in-flight addConnIfNeeded calls
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
||||||
|
@ -52,87 +60,85 @@ const (
|
||||||
noDialOnMiss = false
|
noDialOnMiss = false
|
||||||
)
|
)
|
||||||
|
|
||||||
// shouldTraceGetConn reports whether getClientConn should call any
|
|
||||||
// ClientTrace.GetConn hook associated with the http.Request.
|
|
||||||
//
|
|
||||||
// This complexity is needed to avoid double calls of the GetConn hook
|
|
||||||
// during the back-and-forth between net/http and x/net/http2 (when the
|
|
||||||
// net/http.Transport is upgraded to also speak http2), as well as support
|
|
||||||
// the case where x/net/http2 is being used directly.
|
|
||||||
func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool {
|
|
||||||
// If our Transport wasn't made via ConfigureTransport, always
|
|
||||||
// trace the GetConn hook if provided, because that means the
|
|
||||||
// http2 package is being used directly and it's the one
|
|
||||||
// dialing, as opposed to net/http.
|
|
||||||
if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Otherwise, only use the GetConn hook if this connection has
|
|
||||||
// been used previously for other requests. For fresh
|
|
||||||
// connections, the net/http package does the dialing.
|
|
||||||
return !st.freshConn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
|
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
|
||||||
|
// TODO(dneil): Dial a new connection when t.DisableKeepAlives is set?
|
||||||
if isConnectionCloseRequest(req) && dialOnMiss {
|
if isConnectionCloseRequest(req) && dialOnMiss {
|
||||||
// It gets its own connection.
|
// It gets its own connection.
|
||||||
traceGetConn(req, addr)
|
traceGetConn(req, addr)
|
||||||
const singleUse = true
|
const singleUse = true
|
||||||
cc, err := p.t.dialClientConn(addr, singleUse)
|
cc, err := p.t.dialClientConn(req.Context(), addr, singleUse)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return cc, nil
|
return cc, nil
|
||||||
}
|
}
|
||||||
p.mu.Lock()
|
for {
|
||||||
for _, cc := range p.conns[addr] {
|
p.mu.Lock()
|
||||||
if st := cc.idleState(); st.canTakeNewRequest {
|
for _, cc := range p.conns[addr] {
|
||||||
if p.shouldTraceGetConn(st) {
|
if cc.ReserveNewRequest() {
|
||||||
traceGetConn(req, addr)
|
// When a connection is presented to us by the net/http package,
|
||||||
|
// the GetConn hook has already been called.
|
||||||
|
// Don't call it a second time here.
|
||||||
|
if !cc.getConnCalled {
|
||||||
|
traceGetConn(req, addr)
|
||||||
|
}
|
||||||
|
cc.getConnCalled = false
|
||||||
|
p.mu.Unlock()
|
||||||
|
return cc, nil
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
if !dialOnMiss {
|
||||||
p.mu.Unlock()
|
p.mu.Unlock()
|
||||||
|
return nil, ErrNoCachedConn
|
||||||
|
}
|
||||||
|
traceGetConn(req, addr)
|
||||||
|
call := p.getStartDialLocked(req.Context(), addr)
|
||||||
|
p.mu.Unlock()
|
||||||
|
<-call.done
|
||||||
|
if shouldRetryDial(call, req) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
cc, err := call.res, call.err
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if cc.ReserveNewRequest() {
|
||||||
return cc, nil
|
return cc, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !dialOnMiss {
|
|
||||||
p.mu.Unlock()
|
|
||||||
return nil, ErrNoCachedConn
|
|
||||||
}
|
|
||||||
traceGetConn(req, addr)
|
|
||||||
call := p.getStartDialLocked(addr)
|
|
||||||
p.mu.Unlock()
|
|
||||||
<-call.done
|
|
||||||
return call.res, call.err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// dialCall is an in-flight Transport dial call to a host.
|
// dialCall is an in-flight Transport dial call to a host.
|
||||||
type dialCall struct {
|
type dialCall struct {
|
||||||
_ incomparable
|
_ incomparable
|
||||||
p *clientConnPool
|
p *clientConnPool
|
||||||
|
// the context associated with the request
|
||||||
|
// that created this dialCall
|
||||||
|
ctx context.Context
|
||||||
done chan struct{} // closed when done
|
done chan struct{} // closed when done
|
||||||
res *ClientConn // valid after done is closed
|
res *ClientConn // valid after done is closed
|
||||||
err error // valid after done is closed
|
err error // valid after done is closed
|
||||||
}
|
}
|
||||||
|
|
||||||
// requires p.mu is held.
|
// requires p.mu is held.
|
||||||
func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
|
func (p *clientConnPool) getStartDialLocked(ctx context.Context, addr string) *dialCall {
|
||||||
if call, ok := p.dialing[addr]; ok {
|
if call, ok := p.dialing[addr]; ok {
|
||||||
// A dial is already in-flight. Don't start another.
|
// A dial is already in-flight. Don't start another.
|
||||||
return call
|
return call
|
||||||
}
|
}
|
||||||
call := &dialCall{p: p, done: make(chan struct{})}
|
call := &dialCall{p: p, done: make(chan struct{}), ctx: ctx}
|
||||||
if p.dialing == nil {
|
if p.dialing == nil {
|
||||||
p.dialing = make(map[string]*dialCall)
|
p.dialing = make(map[string]*dialCall)
|
||||||
}
|
}
|
||||||
p.dialing[addr] = call
|
p.dialing[addr] = call
|
||||||
go call.dial(addr)
|
go call.dial(call.ctx, addr)
|
||||||
return call
|
return call
|
||||||
}
|
}
|
||||||
|
|
||||||
// run in its own goroutine.
|
// run in its own goroutine.
|
||||||
func (c *dialCall) dial(addr string) {
|
func (c *dialCall) dial(ctx context.Context, addr string) {
|
||||||
const singleUse = false // shared conn
|
const singleUse = false // shared conn
|
||||||
c.res, c.err = c.p.t.dialClientConn(addr, singleUse)
|
c.res, c.err = c.p.t.dialClientConn(ctx, addr, singleUse)
|
||||||
close(c.done)
|
close(c.done)
|
||||||
|
|
||||||
c.p.mu.Lock()
|
c.p.mu.Lock()
|
||||||
|
@ -195,6 +201,7 @@ func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.err = err
|
c.err = err
|
||||||
} else {
|
} else {
|
||||||
|
cc.getConnCalled = true // already called by the net/http package
|
||||||
p.addConnLocked(key, cc)
|
p.addConnLocked(key, cc)
|
||||||
}
|
}
|
||||||
delete(p.addConnCalls, key)
|
delete(p.addConnCalls, key)
|
||||||
|
@ -276,3 +283,28 @@ type noDialClientConnPool struct{ *clientConnPool }
|
||||||
func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
||||||
return p.getClientConn(req, addr, noDialOnMiss)
|
return p.getClientConn(req, addr, noDialOnMiss)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// shouldRetryDial reports whether the current request should
|
||||||
|
// retry dialing after the call finished unsuccessfully, for example
|
||||||
|
// if the dial was canceled because of a context cancellation or
|
||||||
|
// deadline expiry.
|
||||||
|
func shouldRetryDial(call *dialCall, req *http.Request) bool {
|
||||||
|
if call.err == nil {
|
||||||
|
// No error, no need to retry
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if call.ctx == req.Context() {
|
||||||
|
// If the call has the same context as the request, the dial
|
||||||
|
// should not be retried, since any cancellation will have come
|
||||||
|
// from this request.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !errors.Is(call.err, context.Canceled) && !errors.Is(call.err, context.DeadlineExceeded) {
|
||||||
|
// If the call error is not because of a context cancellation or a deadline expiry,
|
||||||
|
// the dial should not be retried.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Only retry if the error is a context cancellation error or deadline expiry
|
||||||
|
// and the context associated with the call was canceled or expired.
|
||||||
|
return call.ctx.Err() != nil
|
||||||
|
}
|
||||||
|
|
|
@ -53,6 +53,13 @@ func (e ErrCode) String() string {
|
||||||
return fmt.Sprintf("unknown error code 0x%x", uint32(e))
|
return fmt.Sprintf("unknown error code 0x%x", uint32(e))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e ErrCode) stringToken() string {
|
||||||
|
if s, ok := errCodeName[e]; ok {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("ERR_UNKNOWN_%d", uint32(e))
|
||||||
|
}
|
||||||
|
|
||||||
// ConnectionError is an error that results in the termination of the
|
// ConnectionError is an error that results in the termination of the
|
||||||
// entire connection.
|
// entire connection.
|
||||||
type ConnectionError ErrCode
|
type ConnectionError ErrCode
|
||||||
|
@ -67,6 +74,11 @@ type StreamError struct {
|
||||||
Cause error // optional additional detail
|
Cause error // optional additional detail
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// errFromPeer is a sentinel error value for StreamError.Cause to
|
||||||
|
// indicate that the StreamError was sent from the peer over the wire
|
||||||
|
// and wasn't locally generated in the Transport.
|
||||||
|
var errFromPeer = errors.New("received from peer")
|
||||||
|
|
||||||
func streamError(id uint32, code ErrCode) StreamError {
|
func streamError(id uint32, code ErrCode) StreamError {
|
||||||
return StreamError{StreamID: id, Code: code}
|
return StreamError{StreamID: id, Code: code}
|
||||||
}
|
}
|
||||||
|
|
|
@ -122,7 +122,7 @@ var flagName = map[FrameType]map[Flags]string{
|
||||||
// a frameParser parses a frame given its FrameHeader and payload
|
// a frameParser parses a frame given its FrameHeader and payload
|
||||||
// bytes. The length of payload will always equal fh.Length (which
|
// bytes. The length of payload will always equal fh.Length (which
|
||||||
// might be 0).
|
// might be 0).
|
||||||
type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error)
|
type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error)
|
||||||
|
|
||||||
var frameParsers = map[FrameType]frameParser{
|
var frameParsers = map[FrameType]frameParser{
|
||||||
FrameData: parseDataFrame,
|
FrameData: parseDataFrame,
|
||||||
|
@ -267,6 +267,11 @@ type Framer struct {
|
||||||
lastFrame Frame
|
lastFrame Frame
|
||||||
errDetail error
|
errDetail error
|
||||||
|
|
||||||
|
// countError is a non-nil func that's called on a frame parse
|
||||||
|
// error with some unique error path token. It's initialized
|
||||||
|
// from Transport.CountError or Server.CountError.
|
||||||
|
countError func(errToken string)
|
||||||
|
|
||||||
// lastHeaderStream is non-zero if the last frame was an
|
// lastHeaderStream is non-zero if the last frame was an
|
||||||
// unfinished HEADERS/CONTINUATION.
|
// unfinished HEADERS/CONTINUATION.
|
||||||
lastHeaderStream uint32
|
lastHeaderStream uint32
|
||||||
|
@ -426,6 +431,7 @@ func NewFramer(w io.Writer, r io.Reader) *Framer {
|
||||||
fr := &Framer{
|
fr := &Framer{
|
||||||
w: w,
|
w: w,
|
||||||
r: r,
|
r: r,
|
||||||
|
countError: func(string) {},
|
||||||
logReads: logFrameReads,
|
logReads: logFrameReads,
|
||||||
logWrites: logFrameWrites,
|
logWrites: logFrameWrites,
|
||||||
debugReadLoggerf: log.Printf,
|
debugReadLoggerf: log.Printf,
|
||||||
|
@ -500,7 +506,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
|
||||||
if _, err := io.ReadFull(fr.r, payload); err != nil {
|
if _, err := io.ReadFull(fr.r, payload); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload)
|
f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if ce, ok := err.(connError); ok {
|
if ce, ok := err.(connError); ok {
|
||||||
return nil, fr.connError(ce.Code, ce.Reason)
|
return nil, fr.connError(ce.Code, ce.Reason)
|
||||||
|
@ -588,13 +594,14 @@ func (f *DataFrame) Data() []byte {
|
||||||
return f.data
|
return f.data
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
|
func parseDataFrame(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) {
|
||||||
if fh.StreamID == 0 {
|
if fh.StreamID == 0 {
|
||||||
// DATA frames MUST be associated with a stream. If a
|
// DATA frames MUST be associated with a stream. If a
|
||||||
// DATA frame is received whose stream identifier
|
// DATA frame is received whose stream identifier
|
||||||
// field is 0x0, the recipient MUST respond with a
|
// field is 0x0, the recipient MUST respond with a
|
||||||
// connection error (Section 5.4.1) of type
|
// connection error (Section 5.4.1) of type
|
||||||
// PROTOCOL_ERROR.
|
// PROTOCOL_ERROR.
|
||||||
|
countError("frame_data_stream_0")
|
||||||
return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
|
return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
|
||||||
}
|
}
|
||||||
f := fc.getDataFrame()
|
f := fc.getDataFrame()
|
||||||
|
@ -605,6 +612,7 @@ func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, erro
|
||||||
var err error
|
var err error
|
||||||
payload, padSize, err = readByte(payload)
|
payload, padSize, err = readByte(payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
countError("frame_data_pad_byte_short")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -613,6 +621,7 @@ func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, erro
|
||||||
// length of the frame payload, the recipient MUST
|
// length of the frame payload, the recipient MUST
|
||||||
// treat this as a connection error.
|
// treat this as a connection error.
|
||||||
// Filed: https://github.com/http2/http2-spec/issues/610
|
// Filed: https://github.com/http2/http2-spec/issues/610
|
||||||
|
countError("frame_data_pad_too_big")
|
||||||
return nil, connError{ErrCodeProtocol, "pad size larger than data payload"}
|
return nil, connError{ErrCodeProtocol, "pad size larger than data payload"}
|
||||||
}
|
}
|
||||||
f.data = payload[:len(payload)-int(padSize)]
|
f.data = payload[:len(payload)-int(padSize)]
|
||||||
|
@ -695,7 +704,7 @@ type SettingsFrame struct {
|
||||||
p []byte
|
p []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
func parseSettingsFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
|
||||||
if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
|
if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
|
||||||
// When this (ACK 0x1) bit is set, the payload of the
|
// When this (ACK 0x1) bit is set, the payload of the
|
||||||
// SETTINGS frame MUST be empty. Receipt of a
|
// SETTINGS frame MUST be empty. Receipt of a
|
||||||
|
@ -703,6 +712,7 @@ func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error)
|
||||||
// field value other than 0 MUST be treated as a
|
// field value other than 0 MUST be treated as a
|
||||||
// connection error (Section 5.4.1) of type
|
// connection error (Section 5.4.1) of type
|
||||||
// FRAME_SIZE_ERROR.
|
// FRAME_SIZE_ERROR.
|
||||||
|
countError("frame_settings_ack_with_length")
|
||||||
return nil, ConnectionError(ErrCodeFrameSize)
|
return nil, ConnectionError(ErrCodeFrameSize)
|
||||||
}
|
}
|
||||||
if fh.StreamID != 0 {
|
if fh.StreamID != 0 {
|
||||||
|
@ -713,14 +723,17 @@ func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error)
|
||||||
// field is anything other than 0x0, the endpoint MUST
|
// field is anything other than 0x0, the endpoint MUST
|
||||||
// respond with a connection error (Section 5.4.1) of
|
// respond with a connection error (Section 5.4.1) of
|
||||||
// type PROTOCOL_ERROR.
|
// type PROTOCOL_ERROR.
|
||||||
|
countError("frame_settings_has_stream")
|
||||||
return nil, ConnectionError(ErrCodeProtocol)
|
return nil, ConnectionError(ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
if len(p)%6 != 0 {
|
if len(p)%6 != 0 {
|
||||||
|
countError("frame_settings_mod_6")
|
||||||
// Expecting even number of 6 byte settings.
|
// Expecting even number of 6 byte settings.
|
||||||
return nil, ConnectionError(ErrCodeFrameSize)
|
return nil, ConnectionError(ErrCodeFrameSize)
|
||||||
}
|
}
|
||||||
f := &SettingsFrame{FrameHeader: fh, p: p}
|
f := &SettingsFrame{FrameHeader: fh, p: p}
|
||||||
if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 {
|
if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 {
|
||||||
|
countError("frame_settings_window_size_too_big")
|
||||||
// Values above the maximum flow control window size of 2^31 - 1 MUST
|
// Values above the maximum flow control window size of 2^31 - 1 MUST
|
||||||
// be treated as a connection error (Section 5.4.1) of type
|
// be treated as a connection error (Section 5.4.1) of type
|
||||||
// FLOW_CONTROL_ERROR.
|
// FLOW_CONTROL_ERROR.
|
||||||
|
@ -832,11 +845,13 @@ type PingFrame struct {
|
||||||
|
|
||||||
func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
|
func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
|
||||||
|
|
||||||
func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
|
func parsePingFrame(_ *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) {
|
||||||
if len(payload) != 8 {
|
if len(payload) != 8 {
|
||||||
|
countError("frame_ping_length")
|
||||||
return nil, ConnectionError(ErrCodeFrameSize)
|
return nil, ConnectionError(ErrCodeFrameSize)
|
||||||
}
|
}
|
||||||
if fh.StreamID != 0 {
|
if fh.StreamID != 0 {
|
||||||
|
countError("frame_ping_has_stream")
|
||||||
return nil, ConnectionError(ErrCodeProtocol)
|
return nil, ConnectionError(ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
f := &PingFrame{FrameHeader: fh}
|
f := &PingFrame{FrameHeader: fh}
|
||||||
|
@ -872,11 +887,13 @@ func (f *GoAwayFrame) DebugData() []byte {
|
||||||
return f.debugData
|
return f.debugData
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
func parseGoAwayFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
|
||||||
if fh.StreamID != 0 {
|
if fh.StreamID != 0 {
|
||||||
|
countError("frame_goaway_has_stream")
|
||||||
return nil, ConnectionError(ErrCodeProtocol)
|
return nil, ConnectionError(ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
if len(p) < 8 {
|
if len(p) < 8 {
|
||||||
|
countError("frame_goaway_short")
|
||||||
return nil, ConnectionError(ErrCodeFrameSize)
|
return nil, ConnectionError(ErrCodeFrameSize)
|
||||||
}
|
}
|
||||||
return &GoAwayFrame{
|
return &GoAwayFrame{
|
||||||
|
@ -912,7 +929,7 @@ func (f *UnknownFrame) Payload() []byte {
|
||||||
return f.p
|
return f.p
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
func parseUnknownFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
|
||||||
return &UnknownFrame{fh, p}, nil
|
return &UnknownFrame{fh, p}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -923,8 +940,9 @@ type WindowUpdateFrame struct {
|
||||||
Increment uint32 // never read with high bit set
|
Increment uint32 // never read with high bit set
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
|
||||||
if len(p) != 4 {
|
if len(p) != 4 {
|
||||||
|
countError("frame_windowupdate_bad_len")
|
||||||
return nil, ConnectionError(ErrCodeFrameSize)
|
return nil, ConnectionError(ErrCodeFrameSize)
|
||||||
}
|
}
|
||||||
inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit
|
inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit
|
||||||
|
@ -936,8 +954,10 @@ func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, err
|
||||||
// control window MUST be treated as a connection
|
// control window MUST be treated as a connection
|
||||||
// error (Section 5.4.1).
|
// error (Section 5.4.1).
|
||||||
if fh.StreamID == 0 {
|
if fh.StreamID == 0 {
|
||||||
|
countError("frame_windowupdate_zero_inc_conn")
|
||||||
return nil, ConnectionError(ErrCodeProtocol)
|
return nil, ConnectionError(ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
|
countError("frame_windowupdate_zero_inc_stream")
|
||||||
return nil, streamError(fh.StreamID, ErrCodeProtocol)
|
return nil, streamError(fh.StreamID, ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
return &WindowUpdateFrame{
|
return &WindowUpdateFrame{
|
||||||
|
@ -988,7 +1008,7 @@ func (f *HeadersFrame) HasPriority() bool {
|
||||||
return f.FrameHeader.Flags.Has(FlagHeadersPriority)
|
return f.FrameHeader.Flags.Has(FlagHeadersPriority)
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
|
func parseHeadersFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (_ Frame, err error) {
|
||||||
hf := &HeadersFrame{
|
hf := &HeadersFrame{
|
||||||
FrameHeader: fh,
|
FrameHeader: fh,
|
||||||
}
|
}
|
||||||
|
@ -997,11 +1017,13 @@ func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err er
|
||||||
// is received whose stream identifier field is 0x0, the recipient MUST
|
// is received whose stream identifier field is 0x0, the recipient MUST
|
||||||
// respond with a connection error (Section 5.4.1) of type
|
// respond with a connection error (Section 5.4.1) of type
|
||||||
// PROTOCOL_ERROR.
|
// PROTOCOL_ERROR.
|
||||||
|
countError("frame_headers_zero_stream")
|
||||||
return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"}
|
return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"}
|
||||||
}
|
}
|
||||||
var padLength uint8
|
var padLength uint8
|
||||||
if fh.Flags.Has(FlagHeadersPadded) {
|
if fh.Flags.Has(FlagHeadersPadded) {
|
||||||
if p, padLength, err = readByte(p); err != nil {
|
if p, padLength, err = readByte(p); err != nil {
|
||||||
|
countError("frame_headers_pad_short")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1009,16 +1031,19 @@ func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err er
|
||||||
var v uint32
|
var v uint32
|
||||||
p, v, err = readUint32(p)
|
p, v, err = readUint32(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
countError("frame_headers_prio_short")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
hf.Priority.StreamDep = v & 0x7fffffff
|
hf.Priority.StreamDep = v & 0x7fffffff
|
||||||
hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set
|
hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set
|
||||||
p, hf.Priority.Weight, err = readByte(p)
|
p, hf.Priority.Weight, err = readByte(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
countError("frame_headers_prio_weight_short")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(p)-int(padLength) <= 0 {
|
if len(p)-int(padLength) < 0 {
|
||||||
|
countError("frame_headers_pad_too_big")
|
||||||
return nil, streamError(fh.StreamID, ErrCodeProtocol)
|
return nil, streamError(fh.StreamID, ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
hf.headerFragBuf = p[:len(p)-int(padLength)]
|
hf.headerFragBuf = p[:len(p)-int(padLength)]
|
||||||
|
@ -1125,11 +1150,13 @@ func (p PriorityParam) IsZero() bool {
|
||||||
return p == PriorityParam{}
|
return p == PriorityParam{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
|
func parsePriorityFrame(_ *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) {
|
||||||
if fh.StreamID == 0 {
|
if fh.StreamID == 0 {
|
||||||
|
countError("frame_priority_zero_stream")
|
||||||
return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
|
return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
|
||||||
}
|
}
|
||||||
if len(payload) != 5 {
|
if len(payload) != 5 {
|
||||||
|
countError("frame_priority_bad_length")
|
||||||
return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))}
|
return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))}
|
||||||
}
|
}
|
||||||
v := binary.BigEndian.Uint32(payload[:4])
|
v := binary.BigEndian.Uint32(payload[:4])
|
||||||
|
@ -1172,11 +1199,13 @@ type RSTStreamFrame struct {
|
||||||
ErrCode ErrCode
|
ErrCode ErrCode
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
|
||||||
if len(p) != 4 {
|
if len(p) != 4 {
|
||||||
|
countError("frame_rststream_bad_len")
|
||||||
return nil, ConnectionError(ErrCodeFrameSize)
|
return nil, ConnectionError(ErrCodeFrameSize)
|
||||||
}
|
}
|
||||||
if fh.StreamID == 0 {
|
if fh.StreamID == 0 {
|
||||||
|
countError("frame_rststream_zero_stream")
|
||||||
return nil, ConnectionError(ErrCodeProtocol)
|
return nil, ConnectionError(ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil
|
return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil
|
||||||
|
@ -1202,8 +1231,9 @@ type ContinuationFrame struct {
|
||||||
headerFragBuf []byte
|
headerFragBuf []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
func parseContinuationFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
|
||||||
if fh.StreamID == 0 {
|
if fh.StreamID == 0 {
|
||||||
|
countError("frame_continuation_zero_stream")
|
||||||
return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
|
return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
|
||||||
}
|
}
|
||||||
return &ContinuationFrame{fh, p}, nil
|
return &ContinuationFrame{fh, p}, nil
|
||||||
|
@ -1252,7 +1282,7 @@ func (f *PushPromiseFrame) HeadersEnded() bool {
|
||||||
return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
|
return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
|
||||||
}
|
}
|
||||||
|
|
||||||
func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
|
func parsePushPromise(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (_ Frame, err error) {
|
||||||
pp := &PushPromiseFrame{
|
pp := &PushPromiseFrame{
|
||||||
FrameHeader: fh,
|
FrameHeader: fh,
|
||||||
}
|
}
|
||||||
|
@ -1263,6 +1293,7 @@ func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err err
|
||||||
// with. If the stream identifier field specifies the value
|
// with. If the stream identifier field specifies the value
|
||||||
// 0x0, a recipient MUST respond with a connection error
|
// 0x0, a recipient MUST respond with a connection error
|
||||||
// (Section 5.4.1) of type PROTOCOL_ERROR.
|
// (Section 5.4.1) of type PROTOCOL_ERROR.
|
||||||
|
countError("frame_pushpromise_zero_stream")
|
||||||
return nil, ConnectionError(ErrCodeProtocol)
|
return nil, ConnectionError(ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
// The PUSH_PROMISE frame includes optional padding.
|
// The PUSH_PROMISE frame includes optional padding.
|
||||||
|
@ -1270,18 +1301,21 @@ func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err err
|
||||||
var padLength uint8
|
var padLength uint8
|
||||||
if fh.Flags.Has(FlagPushPromisePadded) {
|
if fh.Flags.Has(FlagPushPromisePadded) {
|
||||||
if p, padLength, err = readByte(p); err != nil {
|
if p, padLength, err = readByte(p); err != nil {
|
||||||
|
countError("frame_pushpromise_pad_short")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
p, pp.PromiseID, err = readUint32(p)
|
p, pp.PromiseID, err = readUint32(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
countError("frame_pushpromise_promiseid_short")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
pp.PromiseID = pp.PromiseID & (1<<31 - 1)
|
pp.PromiseID = pp.PromiseID & (1<<31 - 1)
|
||||||
|
|
||||||
if int(padLength) > len(p) {
|
if int(padLength) > len(p) {
|
||||||
// like the DATA frame, error out if padding is longer than the body.
|
// like the DATA frame, error out if padding is longer than the body.
|
||||||
|
countError("frame_pushpromise_pad_too_big")
|
||||||
return nil, ConnectionError(ErrCodeProtocol)
|
return nil, ConnectionError(ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
pp.headerFragBuf = p[:len(p)-int(padLength)]
|
pp.headerFragBuf = p[:len(p)-int(padLength)]
|
||||||
|
|
|
@ -0,0 +1,27 @@
|
||||||
|
// Copyright 2021 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build go1.15
|
||||||
|
// +build go1.15
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
)
|
||||||
|
|
||||||
|
// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
|
||||||
|
// connection.
|
||||||
|
func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
|
||||||
|
dialer := &tls.Dialer{
|
||||||
|
Config: cfg,
|
||||||
|
}
|
||||||
|
cn, err := dialer.DialContext(ctx, network, addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
|
||||||
|
return tlsCn, nil
|
||||||
|
}
|
|
@ -6,7 +6,6 @@ package http2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -79,10 +78,10 @@ func buildCommonHeaderMaps() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func lowerHeader(v string) string {
|
func lowerHeader(v string) (lower string, ascii bool) {
|
||||||
buildCommonHeaderMapsOnce()
|
buildCommonHeaderMapsOnce()
|
||||||
if s, ok := commonLowerHeader[v]; ok {
|
if s, ok := commonLowerHeader[v]; ok {
|
||||||
return s
|
return s, true
|
||||||
}
|
}
|
||||||
return strings.ToLower(v)
|
return asciiToLower(v)
|
||||||
}
|
}
|
||||||
|
|
|
@ -140,25 +140,29 @@ func buildRootHuffmanNode() {
|
||||||
panic("unexpected size")
|
panic("unexpected size")
|
||||||
}
|
}
|
||||||
lazyRootHuffmanNode = newInternalNode()
|
lazyRootHuffmanNode = newInternalNode()
|
||||||
for i, code := range huffmanCodes {
|
// allocate a leaf node for each of the 256 symbols
|
||||||
addDecoderNode(byte(i), code, huffmanCodeLen[i])
|
leaves := new([256]node)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func addDecoderNode(sym byte, code uint32, codeLen uint8) {
|
for sym, code := range huffmanCodes {
|
||||||
cur := lazyRootHuffmanNode
|
codeLen := huffmanCodeLen[sym]
|
||||||
for codeLen > 8 {
|
|
||||||
codeLen -= 8
|
cur := lazyRootHuffmanNode
|
||||||
i := uint8(code >> codeLen)
|
for codeLen > 8 {
|
||||||
if cur.children[i] == nil {
|
codeLen -= 8
|
||||||
cur.children[i] = newInternalNode()
|
i := uint8(code >> codeLen)
|
||||||
|
if cur.children[i] == nil {
|
||||||
|
cur.children[i] = newInternalNode()
|
||||||
|
}
|
||||||
|
cur = cur.children[i]
|
||||||
|
}
|
||||||
|
shift := 8 - codeLen
|
||||||
|
start, end := int(uint8(code<<shift)), int(1<<shift)
|
||||||
|
|
||||||
|
leaves[sym].sym = byte(sym)
|
||||||
|
leaves[sym].codeLen = codeLen
|
||||||
|
for i := start; i < start+end; i++ {
|
||||||
|
cur.children[i] = &leaves[sym]
|
||||||
}
|
}
|
||||||
cur = cur.children[i]
|
|
||||||
}
|
|
||||||
shift := 8 - codeLen
|
|
||||||
start, end := int(uint8(code<<shift)), int(1<<shift)
|
|
||||||
for i := start; i < start+end; i++ {
|
|
||||||
cur.children[i] = &node{sym: sym, codeLen: codeLen}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,31 @@
|
||||||
|
// Copyright 2021 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build !go1.15
|
||||||
|
// +build !go1.15
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
)
|
||||||
|
|
||||||
|
// dialTLSWithContext opens a TLS connection.
|
||||||
|
func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
|
||||||
|
cn, err := tls.Dial(network, addr, cfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := cn.Handshake(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if cfg.InsecureSkipVerify {
|
||||||
|
return cn, nil
|
||||||
|
}
|
||||||
|
if err := cn.VerifyHostname(cfg.ServerName); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return cn, nil
|
||||||
|
}
|
|
@ -30,6 +30,17 @@ type pipeBuffer interface {
|
||||||
io.Reader
|
io.Reader
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setBuffer initializes the pipe buffer.
|
||||||
|
// It has no effect if the pipe is already closed.
|
||||||
|
func (p *pipe) setBuffer(b pipeBuffer) {
|
||||||
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
|
if p.err != nil || p.breakErr != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.b = b
|
||||||
|
}
|
||||||
|
|
||||||
func (p *pipe) Len() int {
|
func (p *pipe) Len() int {
|
||||||
p.mu.Lock()
|
p.mu.Lock()
|
||||||
defer p.mu.Unlock()
|
defer p.mu.Unlock()
|
||||||
|
|
|
@ -130,6 +130,12 @@ type Server struct {
|
||||||
// If nil, a default scheduler is chosen.
|
// If nil, a default scheduler is chosen.
|
||||||
NewWriteScheduler func() WriteScheduler
|
NewWriteScheduler func() WriteScheduler
|
||||||
|
|
||||||
|
// CountError, if non-nil, is called on HTTP/2 server errors.
|
||||||
|
// It's intended to increment a metric for monitoring, such
|
||||||
|
// as an expvar or Prometheus metric.
|
||||||
|
// The errType consists of only ASCII word characters.
|
||||||
|
CountError func(errType string)
|
||||||
|
|
||||||
// Internal state. This is a pointer (rather than embedded directly)
|
// Internal state. This is a pointer (rather than embedded directly)
|
||||||
// so that we don't embed a Mutex in this struct, which will make the
|
// so that we don't embed a Mutex in this struct, which will make the
|
||||||
// struct non-copyable, which might break some callers.
|
// struct non-copyable, which might break some callers.
|
||||||
|
@ -231,13 +237,12 @@ func ConfigureServer(s *http.Server, conf *Server) error {
|
||||||
|
|
||||||
if s.TLSConfig == nil {
|
if s.TLSConfig == nil {
|
||||||
s.TLSConfig = new(tls.Config)
|
s.TLSConfig = new(tls.Config)
|
||||||
} else if s.TLSConfig.CipherSuites != nil {
|
} else if s.TLSConfig.CipherSuites != nil && s.TLSConfig.MinVersion < tls.VersionTLS13 {
|
||||||
// If they already provided a CipherSuite list, return
|
// If they already provided a TLS 1.0–1.2 CipherSuite list, return an
|
||||||
// an error if it has a bad order or is missing
|
// error if it is missing ECDHE_RSA_WITH_AES_128_GCM_SHA256 or
|
||||||
// ECDHE_RSA_WITH_AES_128_GCM_SHA256 or ECDHE_ECDSA_WITH_AES_128_GCM_SHA256.
|
// ECDHE_ECDSA_WITH_AES_128_GCM_SHA256.
|
||||||
haveRequired := false
|
haveRequired := false
|
||||||
sawBad := false
|
for _, cs := range s.TLSConfig.CipherSuites {
|
||||||
for i, cs := range s.TLSConfig.CipherSuites {
|
|
||||||
switch cs {
|
switch cs {
|
||||||
case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
// Alternative MTI cipher to not discourage ECDSA-only servers.
|
// Alternative MTI cipher to not discourage ECDSA-only servers.
|
||||||
|
@ -245,14 +250,9 @@ func ConfigureServer(s *http.Server, conf *Server) error {
|
||||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
|
||||||
haveRequired = true
|
haveRequired = true
|
||||||
}
|
}
|
||||||
if isBadCipher(cs) {
|
|
||||||
sawBad = true
|
|
||||||
} else if sawBad {
|
|
||||||
return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if !haveRequired {
|
if !haveRequired {
|
||||||
return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).")
|
return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256)")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -265,16 +265,12 @@ func ConfigureServer(s *http.Server, conf *Server) error {
|
||||||
|
|
||||||
s.TLSConfig.PreferServerCipherSuites = true
|
s.TLSConfig.PreferServerCipherSuites = true
|
||||||
|
|
||||||
haveNPN := false
|
if !strSliceContains(s.TLSConfig.NextProtos, NextProtoTLS) {
|
||||||
for _, p := range s.TLSConfig.NextProtos {
|
|
||||||
if p == NextProtoTLS {
|
|
||||||
haveNPN = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !haveNPN {
|
|
||||||
s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
|
s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
|
||||||
}
|
}
|
||||||
|
if !strSliceContains(s.TLSConfig.NextProtos, "http/1.1") {
|
||||||
|
s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "http/1.1")
|
||||||
|
}
|
||||||
|
|
||||||
if s.TLSNextProto == nil {
|
if s.TLSNextProto == nil {
|
||||||
s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
|
s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
|
||||||
|
@ -415,6 +411,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
||||||
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
|
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
|
||||||
|
|
||||||
fr := NewFramer(sc.bw, c)
|
fr := NewFramer(sc.bw, c)
|
||||||
|
if s.CountError != nil {
|
||||||
|
fr.countError = s.CountError
|
||||||
|
}
|
||||||
fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
|
fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
|
||||||
fr.MaxHeaderListSize = sc.maxHeaderListSize()
|
fr.MaxHeaderListSize = sc.maxHeaderListSize()
|
||||||
fr.SetMaxReadFrameSize(s.maxReadFrameSize())
|
fr.SetMaxReadFrameSize(s.maxReadFrameSize())
|
||||||
|
@ -720,7 +719,15 @@ func (sc *serverConn) canonicalHeader(v string) string {
|
||||||
sc.canonHeader = make(map[string]string)
|
sc.canonHeader = make(map[string]string)
|
||||||
}
|
}
|
||||||
cv = http.CanonicalHeaderKey(v)
|
cv = http.CanonicalHeaderKey(v)
|
||||||
sc.canonHeader[v] = cv
|
// maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of
|
||||||
|
// entries in the canonHeader cache. This should be larger than the number
|
||||||
|
// of unique, uncommon header keys likely to be sent by the peer, while not
|
||||||
|
// so high as to permit unreasonable memory usage if the peer sends an unbounded
|
||||||
|
// number of unique header keys.
|
||||||
|
const maxCachedCanonicalHeaders = 32
|
||||||
|
if len(sc.canonHeader) < maxCachedCanonicalHeaders {
|
||||||
|
sc.canonHeader[v] = cv
|
||||||
|
}
|
||||||
return cv
|
return cv
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -826,7 +833,7 @@ func (sc *serverConn) serve() {
|
||||||
})
|
})
|
||||||
sc.unackedSettings++
|
sc.unackedSettings++
|
||||||
|
|
||||||
// Each connection starts with intialWindowSize inflow tokens.
|
// Each connection starts with initialWindowSize inflow tokens.
|
||||||
// If a higher value is configured, we add more tokens.
|
// If a higher value is configured, we add more tokens.
|
||||||
if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
|
if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
|
||||||
sc.sendWindowUpdate(nil, int(diff))
|
sc.sendWindowUpdate(nil, int(diff))
|
||||||
|
@ -866,6 +873,15 @@ func (sc *serverConn) serve() {
|
||||||
case res := <-sc.wroteFrameCh:
|
case res := <-sc.wroteFrameCh:
|
||||||
sc.wroteFrame(res)
|
sc.wroteFrame(res)
|
||||||
case res := <-sc.readFrameCh:
|
case res := <-sc.readFrameCh:
|
||||||
|
// Process any written frames before reading new frames from the client since a
|
||||||
|
// written frame could have triggered a new stream to be started.
|
||||||
|
if sc.writingFrameAsync {
|
||||||
|
select {
|
||||||
|
case wroteRes := <-sc.wroteFrameCh:
|
||||||
|
sc.wroteFrame(wroteRes)
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
if !sc.processFrameFromReader(res) {
|
if !sc.processFrameFromReader(res) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1400,7 +1416,7 @@ func (sc *serverConn) processFrame(f Frame) error {
|
||||||
// First frame received must be SETTINGS.
|
// First frame received must be SETTINGS.
|
||||||
if !sc.sawFirstSettings {
|
if !sc.sawFirstSettings {
|
||||||
if _, ok := f.(*SettingsFrame); !ok {
|
if _, ok := f.(*SettingsFrame); !ok {
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("first_settings", ConnectionError(ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
sc.sawFirstSettings = true
|
sc.sawFirstSettings = true
|
||||||
}
|
}
|
||||||
|
@ -1425,7 +1441,7 @@ func (sc *serverConn) processFrame(f Frame) error {
|
||||||
case *PushPromiseFrame:
|
case *PushPromiseFrame:
|
||||||
// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
|
// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
|
||||||
// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
|
// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("push_promise", ConnectionError(ErrCodeProtocol))
|
||||||
default:
|
default:
|
||||||
sc.vlogf("http2: server ignoring frame: %v", f.Header())
|
sc.vlogf("http2: server ignoring frame: %v", f.Header())
|
||||||
return nil
|
return nil
|
||||||
|
@ -1445,7 +1461,7 @@ func (sc *serverConn) processPing(f *PingFrame) error {
|
||||||
// identifier field value other than 0x0, the recipient MUST
|
// identifier field value other than 0x0, the recipient MUST
|
||||||
// respond with a connection error (Section 5.4.1) of type
|
// respond with a connection error (Section 5.4.1) of type
|
||||||
// PROTOCOL_ERROR."
|
// PROTOCOL_ERROR."
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("ping_on_stream", ConnectionError(ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
|
if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
|
||||||
return nil
|
return nil
|
||||||
|
@ -1464,7 +1480,7 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
|
||||||
// or PRIORITY on a stream in this state MUST be
|
// or PRIORITY on a stream in this state MUST be
|
||||||
// treated as a connection error (Section 5.4.1) of
|
// treated as a connection error (Section 5.4.1) of
|
||||||
// type PROTOCOL_ERROR."
|
// type PROTOCOL_ERROR."
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("stream_idle", ConnectionError(ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
if st == nil {
|
if st == nil {
|
||||||
// "WINDOW_UPDATE can be sent by a peer that has sent a
|
// "WINDOW_UPDATE can be sent by a peer that has sent a
|
||||||
|
@ -1475,7 +1491,7 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if !st.flow.add(int32(f.Increment)) {
|
if !st.flow.add(int32(f.Increment)) {
|
||||||
return streamError(f.StreamID, ErrCodeFlowControl)
|
return sc.countError("bad_flow", streamError(f.StreamID, ErrCodeFlowControl))
|
||||||
}
|
}
|
||||||
default: // connection-level flow control
|
default: // connection-level flow control
|
||||||
if !sc.flow.add(int32(f.Increment)) {
|
if !sc.flow.add(int32(f.Increment)) {
|
||||||
|
@ -1496,7 +1512,7 @@ func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
|
||||||
// identifying an idle stream is received, the
|
// identifying an idle stream is received, the
|
||||||
// recipient MUST treat this as a connection error
|
// recipient MUST treat this as a connection error
|
||||||
// (Section 5.4.1) of type PROTOCOL_ERROR.
|
// (Section 5.4.1) of type PROTOCOL_ERROR.
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("reset_idle_stream", ConnectionError(ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
if st != nil {
|
if st != nil {
|
||||||
st.cancelCtx()
|
st.cancelCtx()
|
||||||
|
@ -1548,7 +1564,7 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error {
|
||||||
// Why is the peer ACKing settings we never sent?
|
// Why is the peer ACKing settings we never sent?
|
||||||
// The spec doesn't mention this case, but
|
// The spec doesn't mention this case, but
|
||||||
// hang up on them anyway.
|
// hang up on them anyway.
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("ack_mystery", ConnectionError(ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1556,7 +1572,7 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error {
|
||||||
// This isn't actually in the spec, but hang up on
|
// This isn't actually in the spec, but hang up on
|
||||||
// suspiciously large settings frames or those with
|
// suspiciously large settings frames or those with
|
||||||
// duplicate entries.
|
// duplicate entries.
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("settings_big_or_dups", ConnectionError(ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
if err := f.ForeachSetting(sc.processSetting); err != nil {
|
if err := f.ForeachSetting(sc.processSetting); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -1623,7 +1639,7 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
|
||||||
// control window to exceed the maximum size as a
|
// control window to exceed the maximum size as a
|
||||||
// connection error (Section 5.4.1) of type
|
// connection error (Section 5.4.1) of type
|
||||||
// FLOW_CONTROL_ERROR."
|
// FLOW_CONTROL_ERROR."
|
||||||
return ConnectionError(ErrCodeFlowControl)
|
return sc.countError("setting_win_size", ConnectionError(ErrCodeFlowControl))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -1656,7 +1672,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
||||||
// or PRIORITY on a stream in this state MUST be
|
// or PRIORITY on a stream in this state MUST be
|
||||||
// treated as a connection error (Section 5.4.1) of
|
// treated as a connection error (Section 5.4.1) of
|
||||||
// type PROTOCOL_ERROR."
|
// type PROTOCOL_ERROR."
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("data_on_idle", ConnectionError(ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
|
|
||||||
// "If a DATA frame is received whose stream is not in "open"
|
// "If a DATA frame is received whose stream is not in "open"
|
||||||
|
@ -1673,7 +1689,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
||||||
// and return any flow control bytes since we're not going
|
// and return any flow control bytes since we're not going
|
||||||
// to consume them.
|
// to consume them.
|
||||||
if sc.inflow.available() < int32(f.Length) {
|
if sc.inflow.available() < int32(f.Length) {
|
||||||
return streamError(id, ErrCodeFlowControl)
|
return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
|
||||||
}
|
}
|
||||||
// Deduct the flow control from inflow, since we're
|
// Deduct the flow control from inflow, since we're
|
||||||
// going to immediately add it back in
|
// going to immediately add it back in
|
||||||
|
@ -1686,7 +1702,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
||||||
// Already have a stream error in flight. Don't send another.
|
// Already have a stream error in flight. Don't send another.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return streamError(id, ErrCodeStreamClosed)
|
return sc.countError("closed", streamError(id, ErrCodeStreamClosed))
|
||||||
}
|
}
|
||||||
if st.body == nil {
|
if st.body == nil {
|
||||||
panic("internal error: should have a body in this state")
|
panic("internal error: should have a body in this state")
|
||||||
|
@ -1698,12 +1714,12 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
||||||
// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
|
// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
|
||||||
// value of a content-length header field does not equal the sum of the
|
// value of a content-length header field does not equal the sum of the
|
||||||
// DATA frame payload lengths that form the body.
|
// DATA frame payload lengths that form the body.
|
||||||
return streamError(id, ErrCodeProtocol)
|
return sc.countError("send_too_much", streamError(id, ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
if f.Length > 0 {
|
if f.Length > 0 {
|
||||||
// Check whether the client has flow control quota.
|
// Check whether the client has flow control quota.
|
||||||
if st.inflow.available() < int32(f.Length) {
|
if st.inflow.available() < int32(f.Length) {
|
||||||
return streamError(id, ErrCodeFlowControl)
|
return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl))
|
||||||
}
|
}
|
||||||
st.inflow.take(int32(f.Length))
|
st.inflow.take(int32(f.Length))
|
||||||
|
|
||||||
|
@ -1711,7 +1727,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
||||||
wrote, err := st.body.Write(data)
|
wrote, err := st.body.Write(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
sc.sendWindowUpdate(nil, int(f.Length)-wrote)
|
sc.sendWindowUpdate(nil, int(f.Length)-wrote)
|
||||||
return streamError(id, ErrCodeStreamClosed)
|
return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed))
|
||||||
}
|
}
|
||||||
if wrote != len(data) {
|
if wrote != len(data) {
|
||||||
panic("internal error: bad Writer")
|
panic("internal error: bad Writer")
|
||||||
|
@ -1797,7 +1813,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
||||||
// stream identifier MUST respond with a connection error
|
// stream identifier MUST respond with a connection error
|
||||||
// (Section 5.4.1) of type PROTOCOL_ERROR.
|
// (Section 5.4.1) of type PROTOCOL_ERROR.
|
||||||
if id%2 != 1 {
|
if id%2 != 1 {
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("headers_even", ConnectionError(ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
// A HEADERS frame can be used to create a new stream or
|
// A HEADERS frame can be used to create a new stream or
|
||||||
// send a trailer for an open one. If we already have a stream
|
// send a trailer for an open one. If we already have a stream
|
||||||
|
@ -1814,7 +1830,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
||||||
// this state, it MUST respond with a stream error (Section 5.4.2) of
|
// this state, it MUST respond with a stream error (Section 5.4.2) of
|
||||||
// type STREAM_CLOSED.
|
// type STREAM_CLOSED.
|
||||||
if st.state == stateHalfClosedRemote {
|
if st.state == stateHalfClosedRemote {
|
||||||
return streamError(id, ErrCodeStreamClosed)
|
return sc.countError("headers_half_closed", streamError(id, ErrCodeStreamClosed))
|
||||||
}
|
}
|
||||||
return st.processTrailerHeaders(f)
|
return st.processTrailerHeaders(f)
|
||||||
}
|
}
|
||||||
|
@ -1825,7 +1841,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
||||||
// receives an unexpected stream identifier MUST respond with
|
// receives an unexpected stream identifier MUST respond with
|
||||||
// a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
|
// a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
|
||||||
if id <= sc.maxClientStreamID {
|
if id <= sc.maxClientStreamID {
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("stream_went_down", ConnectionError(ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
sc.maxClientStreamID = id
|
sc.maxClientStreamID = id
|
||||||
|
|
||||||
|
@ -1842,14 +1858,14 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
||||||
if sc.curClientStreams+1 > sc.advMaxStreams {
|
if sc.curClientStreams+1 > sc.advMaxStreams {
|
||||||
if sc.unackedSettings == 0 {
|
if sc.unackedSettings == 0 {
|
||||||
// They should know better.
|
// They should know better.
|
||||||
return streamError(id, ErrCodeProtocol)
|
return sc.countError("over_max_streams", streamError(id, ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
// Assume it's a network race, where they just haven't
|
// Assume it's a network race, where they just haven't
|
||||||
// received our last SETTINGS update. But actually
|
// received our last SETTINGS update. But actually
|
||||||
// this can't happen yet, because we don't yet provide
|
// this can't happen yet, because we don't yet provide
|
||||||
// a way for users to adjust server parameters at
|
// a way for users to adjust server parameters at
|
||||||
// runtime.
|
// runtime.
|
||||||
return streamError(id, ErrCodeRefusedStream)
|
return sc.countError("over_max_streams_race", streamError(id, ErrCodeRefusedStream))
|
||||||
}
|
}
|
||||||
|
|
||||||
initialState := stateOpen
|
initialState := stateOpen
|
||||||
|
@ -1859,7 +1875,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
||||||
st := sc.newStream(id, 0, initialState)
|
st := sc.newStream(id, 0, initialState)
|
||||||
|
|
||||||
if f.HasPriority() {
|
if f.HasPriority() {
|
||||||
if err := checkPriority(f.StreamID, f.Priority); err != nil {
|
if err := sc.checkPriority(f.StreamID, f.Priority); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sc.writeSched.AdjustStream(st.id, f.Priority)
|
sc.writeSched.AdjustStream(st.id, f.Priority)
|
||||||
|
@ -1903,15 +1919,15 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
|
||||||
sc := st.sc
|
sc := st.sc
|
||||||
sc.serveG.check()
|
sc.serveG.check()
|
||||||
if st.gotTrailerHeader {
|
if st.gotTrailerHeader {
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("dup_trailers", ConnectionError(ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
st.gotTrailerHeader = true
|
st.gotTrailerHeader = true
|
||||||
if !f.StreamEnded() {
|
if !f.StreamEnded() {
|
||||||
return streamError(st.id, ErrCodeProtocol)
|
return sc.countError("trailers_not_ended", streamError(st.id, ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(f.PseudoFields()) > 0 {
|
if len(f.PseudoFields()) > 0 {
|
||||||
return streamError(st.id, ErrCodeProtocol)
|
return sc.countError("trailers_pseudo", streamError(st.id, ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
if st.trailer != nil {
|
if st.trailer != nil {
|
||||||
for _, hf := range f.RegularFields() {
|
for _, hf := range f.RegularFields() {
|
||||||
|
@ -1920,7 +1936,7 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
|
||||||
// TODO: send more details to the peer somehow. But http2 has
|
// TODO: send more details to the peer somehow. But http2 has
|
||||||
// no way to send debug data at a stream level. Discuss with
|
// no way to send debug data at a stream level. Discuss with
|
||||||
// HTTP folk.
|
// HTTP folk.
|
||||||
return streamError(st.id, ErrCodeProtocol)
|
return sc.countError("trailers_bogus", streamError(st.id, ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
st.trailer[key] = append(st.trailer[key], hf.Value)
|
st.trailer[key] = append(st.trailer[key], hf.Value)
|
||||||
}
|
}
|
||||||
|
@ -1929,13 +1945,13 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkPriority(streamID uint32, p PriorityParam) error {
|
func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error {
|
||||||
if streamID == p.StreamDep {
|
if streamID == p.StreamDep {
|
||||||
// Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
|
// Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
|
||||||
// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
|
// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
|
||||||
// Section 5.3.3 says that a stream can depend on one of its dependencies,
|
// Section 5.3.3 says that a stream can depend on one of its dependencies,
|
||||||
// so it's only self-dependencies that are forbidden.
|
// so it's only self-dependencies that are forbidden.
|
||||||
return streamError(streamID, ErrCodeProtocol)
|
return sc.countError("priority", streamError(streamID, ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1944,7 +1960,7 @@ func (sc *serverConn) processPriority(f *PriorityFrame) error {
|
||||||
if sc.inGoAway {
|
if sc.inGoAway {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if err := checkPriority(f.StreamID, f.PriorityParam); err != nil {
|
if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)
|
sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)
|
||||||
|
@ -2001,7 +2017,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
|
||||||
isConnect := rp.method == "CONNECT"
|
isConnect := rp.method == "CONNECT"
|
||||||
if isConnect {
|
if isConnect {
|
||||||
if rp.path != "" || rp.scheme != "" || rp.authority == "" {
|
if rp.path != "" || rp.scheme != "" || rp.authority == "" {
|
||||||
return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
|
return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
|
} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
|
||||||
// See 8.1.2.6 Malformed Requests and Responses:
|
// See 8.1.2.6 Malformed Requests and Responses:
|
||||||
|
@ -2014,13 +2030,13 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
|
||||||
// "All HTTP/2 requests MUST include exactly one valid
|
// "All HTTP/2 requests MUST include exactly one valid
|
||||||
// value for the :method, :scheme, and :path
|
// value for the :method, :scheme, and :path
|
||||||
// pseudo-header fields"
|
// pseudo-header fields"
|
||||||
return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
|
return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
|
|
||||||
bodyOpen := !f.StreamEnded()
|
bodyOpen := !f.StreamEnded()
|
||||||
if rp.method == "HEAD" && bodyOpen {
|
if rp.method == "HEAD" && bodyOpen {
|
||||||
// HEAD requests can't have bodies
|
// HEAD requests can't have bodies
|
||||||
return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
|
return nil, nil, sc.countError("head_body", streamError(f.StreamID, ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
|
|
||||||
rp.header = make(http.Header)
|
rp.header = make(http.Header)
|
||||||
|
@ -2103,7 +2119,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
|
||||||
var err error
|
var err error
|
||||||
url_, err = url.ParseRequestURI(rp.path)
|
url_, err = url.ParseRequestURI(rp.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, streamError(st.id, ErrCodeProtocol)
|
return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
requestURI = rp.path
|
requestURI = rp.path
|
||||||
}
|
}
|
||||||
|
@ -2789,8 +2805,12 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
|
||||||
// but PUSH_PROMISE requests cannot have a body.
|
// but PUSH_PROMISE requests cannot have a body.
|
||||||
// http://tools.ietf.org/html/rfc7540#section-8.2
|
// http://tools.ietf.org/html/rfc7540#section-8.2
|
||||||
// Also disallow Host, since the promised URL must be absolute.
|
// Also disallow Host, since the promised URL must be absolute.
|
||||||
switch strings.ToLower(k) {
|
if asciiEqualFold(k, "content-length") ||
|
||||||
case "content-length", "content-encoding", "trailer", "te", "expect", "host":
|
asciiEqualFold(k, "content-encoding") ||
|
||||||
|
asciiEqualFold(k, "trailer") ||
|
||||||
|
asciiEqualFold(k, "te") ||
|
||||||
|
asciiEqualFold(k, "expect") ||
|
||||||
|
asciiEqualFold(k, "host") {
|
||||||
return fmt.Errorf("promised request headers cannot include %q", k)
|
return fmt.Errorf("promised request headers cannot include %q", k)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2982,3 +3002,31 @@ func h1ServerKeepAlivesDisabled(hs *http.Server) bool {
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (sc *serverConn) countError(name string, err error) error {
|
||||||
|
if sc == nil || sc.srv == nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f := sc.srv.CountError
|
||||||
|
if f == nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var typ string
|
||||||
|
var code ErrCode
|
||||||
|
switch e := err.(type) {
|
||||||
|
case ConnectionError:
|
||||||
|
typ = "conn"
|
||||||
|
code = ErrCode(e)
|
||||||
|
case StreamError:
|
||||||
|
typ = "stream"
|
||||||
|
code = ErrCode(e.Code)
|
||||||
|
default:
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
codeStr := errCodeName[code]
|
||||||
|
if codeStr == "" {
|
||||||
|
codeStr = strconv.Itoa(int(code))
|
||||||
|
}
|
||||||
|
f(fmt.Sprintf("%s_%s_%s", typ, codeStr, name))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -341,7 +341,12 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
|
||||||
}
|
}
|
||||||
for _, k := range keys {
|
for _, k := range keys {
|
||||||
vv := h[k]
|
vv := h[k]
|
||||||
k = lowerHeader(k)
|
k, ascii := lowerHeader(k)
|
||||||
|
if !ascii {
|
||||||
|
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
|
||||||
|
// field names have to be ASCII characters (just as in HTTP/1.x).
|
||||||
|
continue
|
||||||
|
}
|
||||||
if !validWireHeaderFieldName(k) {
|
if !validWireHeaderFieldName(k) {
|
||||||
// Skip it as backup paranoia. Per
|
// Skip it as backup paranoia. Per
|
||||||
// golang.org/issue/14048, these should
|
// golang.org/issue/14048, these should
|
||||||
|
|
|
@ -32,7 +32,8 @@ type WriteScheduler interface {
|
||||||
|
|
||||||
// Pop dequeues the next frame to write. Returns false if no frames can
|
// Pop dequeues the next frame to write. Returns false if no frames can
|
||||||
// be written. Frames with a given wr.StreamID() are Pop'd in the same
|
// be written. Frames with a given wr.StreamID() are Pop'd in the same
|
||||||
// order they are Push'd. No frames should be discarded except by CloseStream.
|
// order they are Push'd, except RST_STREAM frames. No frames should be
|
||||||
|
// discarded except by CloseStream.
|
||||||
Pop() (wr FrameWriteRequest, ok bool)
|
Pop() (wr FrameWriteRequest, ok bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,6 +53,7 @@ type FrameWriteRequest struct {
|
||||||
|
|
||||||
// stream is the stream on which this frame will be written.
|
// stream is the stream on which this frame will be written.
|
||||||
// nil for non-stream frames like PING and SETTINGS.
|
// nil for non-stream frames like PING and SETTINGS.
|
||||||
|
// nil for RST_STREAM streams, which use the StreamError.StreamID field instead.
|
||||||
stream *stream
|
stream *stream
|
||||||
|
|
||||||
// done, if non-nil, must be a buffered channel with space for
|
// done, if non-nil, must be a buffered channel with space for
|
||||||
|
|
|
@ -45,11 +45,11 @@ func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityP
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
|
func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
|
||||||
id := wr.StreamID()
|
if wr.isControl() {
|
||||||
if id == 0 {
|
|
||||||
ws.zero.push(wr)
|
ws.zero.push(wr)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
id := wr.StreamID()
|
||||||
q, ok := ws.sq[id]
|
q, ok := ws.sq[id]
|
||||||
if !ok {
|
if !ok {
|
||||||
q = ws.queuePool.get()
|
q = ws.queuePool.get()
|
||||||
|
@ -59,7 +59,7 @@ func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
|
func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
|
||||||
// Control frames first.
|
// Control and RST_STREAM frames first.
|
||||||
if !ws.zero.empty() {
|
if !ws.zero.empty() {
|
||||||
return ws.zero.shift(), true
|
return ws.zero.shift(), true
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||||
|
|
||||||
|
// Copyright 2021 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build go1.18
|
||||||
|
// +build go1.18
|
||||||
|
|
||||||
|
package idna
|
||||||
|
|
||||||
|
// Transitional processing is disabled by default in Go 1.18.
|
||||||
|
// https://golang.org/issue/47510
|
||||||
|
const transitionalLookup = false
|
|
@ -59,23 +59,22 @@ type Option func(*options)
|
||||||
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
|
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
|
||||||
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
|
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
|
||||||
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
|
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
|
||||||
// compatibility. It is used by most browsers when resolving domain names. This
|
// compatibility. It is used by some browsers when resolving domain names. This
|
||||||
// option is only meaningful if combined with MapForLookup.
|
// option is only meaningful if combined with MapForLookup.
|
||||||
func Transitional(transitional bool) Option {
|
func Transitional(transitional bool) Option {
|
||||||
return func(o *options) { o.transitional = true }
|
return func(o *options) { o.transitional = transitional }
|
||||||
}
|
}
|
||||||
|
|
||||||
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
||||||
// are longer than allowed by the RFC.
|
// are longer than allowed by the RFC.
|
||||||
|
//
|
||||||
|
// This option corresponds to the VerifyDnsLength flag in UTS #46.
|
||||||
func VerifyDNSLength(verify bool) Option {
|
func VerifyDNSLength(verify bool) Option {
|
||||||
return func(o *options) { o.verifyDNSLength = verify }
|
return func(o *options) { o.verifyDNSLength = verify }
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveLeadingDots removes leading label separators. Leading runes that map to
|
// RemoveLeadingDots removes leading label separators. Leading runes that map to
|
||||||
// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
|
// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
|
||||||
//
|
|
||||||
// This is the behavior suggested by the UTS #46 and is adopted by some
|
|
||||||
// browsers.
|
|
||||||
func RemoveLeadingDots(remove bool) Option {
|
func RemoveLeadingDots(remove bool) Option {
|
||||||
return func(o *options) { o.removeLeadingDots = remove }
|
return func(o *options) { o.removeLeadingDots = remove }
|
||||||
}
|
}
|
||||||
|
@ -83,6 +82,8 @@ func RemoveLeadingDots(remove bool) Option {
|
||||||
// ValidateLabels sets whether to check the mandatory label validation criteria
|
// ValidateLabels sets whether to check the mandatory label validation criteria
|
||||||
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
|
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
|
||||||
// of hyphens ('-'), normalization, validity of runes, and the context rules.
|
// of hyphens ('-'), normalization, validity of runes, and the context rules.
|
||||||
|
// In particular, ValidateLabels also sets the CheckHyphens and CheckJoiners flags
|
||||||
|
// in UTS #46.
|
||||||
func ValidateLabels(enable bool) Option {
|
func ValidateLabels(enable bool) Option {
|
||||||
return func(o *options) {
|
return func(o *options) {
|
||||||
// Don't override existing mappings, but set one that at least checks
|
// Don't override existing mappings, but set one that at least checks
|
||||||
|
@ -91,25 +92,48 @@ func ValidateLabels(enable bool) Option {
|
||||||
o.mapping = normalize
|
o.mapping = normalize
|
||||||
}
|
}
|
||||||
o.trie = trie
|
o.trie = trie
|
||||||
o.validateLabels = enable
|
o.checkJoiners = enable
|
||||||
o.fromPuny = validateFromPunycode
|
o.checkHyphens = enable
|
||||||
|
if enable {
|
||||||
|
o.fromPuny = validateFromPunycode
|
||||||
|
} else {
|
||||||
|
o.fromPuny = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckHyphens sets whether to check for correct use of hyphens ('-') in
|
||||||
|
// labels. Most web browsers do not have this option set, since labels such as
|
||||||
|
// "r3---sn-apo3qvuoxuxbt-j5pe" are in common use.
|
||||||
|
//
|
||||||
|
// This option corresponds to the CheckHyphens flag in UTS #46.
|
||||||
|
func CheckHyphens(enable bool) Option {
|
||||||
|
return func(o *options) { o.checkHyphens = enable }
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckJoiners sets whether to check the ContextJ rules as defined in Appendix
|
||||||
|
// A of RFC 5892, concerning the use of joiner runes.
|
||||||
|
//
|
||||||
|
// This option corresponds to the CheckJoiners flag in UTS #46.
|
||||||
|
func CheckJoiners(enable bool) Option {
|
||||||
|
return func(o *options) {
|
||||||
|
o.trie = trie
|
||||||
|
o.checkJoiners = enable
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// StrictDomainName limits the set of permissible ASCII characters to those
|
// StrictDomainName limits the set of permissible ASCII characters to those
|
||||||
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
|
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
|
||||||
// hyphen). This is set by default for MapForLookup and ValidateForRegistration.
|
// hyphen). This is set by default for MapForLookup and ValidateForRegistration,
|
||||||
|
// but is only useful if ValidateLabels is set.
|
||||||
//
|
//
|
||||||
// This option is useful, for instance, for browsers that allow characters
|
// This option is useful, for instance, for browsers that allow characters
|
||||||
// outside this range, for example a '_' (U+005F LOW LINE). See
|
// outside this range, for example a '_' (U+005F LOW LINE). See
|
||||||
// http://www.rfc-editor.org/std/std3.txt for more details This option
|
// http://www.rfc-editor.org/std/std3.txt for more details.
|
||||||
// corresponds to the UseSTD3ASCIIRules option in UTS #46.
|
//
|
||||||
|
// This option corresponds to the UseSTD3ASCIIRules flag in UTS #46.
|
||||||
func StrictDomainName(use bool) Option {
|
func StrictDomainName(use bool) Option {
|
||||||
return func(o *options) {
|
return func(o *options) { o.useSTD3Rules = use }
|
||||||
o.trie = trie
|
|
||||||
o.useSTD3Rules = use
|
|
||||||
o.fromPuny = validateFromPunycode
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: the following options pull in tables. The tables should not be linked
|
// NOTE: the following options pull in tables. The tables should not be linked
|
||||||
|
@ -117,6 +141,8 @@ func StrictDomainName(use bool) Option {
|
||||||
|
|
||||||
// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
|
// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
|
||||||
// that relies on proper validation of labels should include this rule.
|
// that relies on proper validation of labels should include this rule.
|
||||||
|
//
|
||||||
|
// This option corresponds to the CheckBidi flag in UTS #46.
|
||||||
func BidiRule() Option {
|
func BidiRule() Option {
|
||||||
return func(o *options) { o.bidirule = bidirule.ValidString }
|
return func(o *options) { o.bidirule = bidirule.ValidString }
|
||||||
}
|
}
|
||||||
|
@ -152,7 +178,8 @@ func MapForLookup() Option {
|
||||||
type options struct {
|
type options struct {
|
||||||
transitional bool
|
transitional bool
|
||||||
useSTD3Rules bool
|
useSTD3Rules bool
|
||||||
validateLabels bool
|
checkHyphens bool
|
||||||
|
checkJoiners bool
|
||||||
verifyDNSLength bool
|
verifyDNSLength bool
|
||||||
removeLeadingDots bool
|
removeLeadingDots bool
|
||||||
|
|
||||||
|
@ -225,8 +252,11 @@ func (p *Profile) String() string {
|
||||||
if p.useSTD3Rules {
|
if p.useSTD3Rules {
|
||||||
s += ":UseSTD3Rules"
|
s += ":UseSTD3Rules"
|
||||||
}
|
}
|
||||||
if p.validateLabels {
|
if p.checkHyphens {
|
||||||
s += ":ValidateLabels"
|
s += ":CheckHyphens"
|
||||||
|
}
|
||||||
|
if p.checkJoiners {
|
||||||
|
s += ":CheckJoiners"
|
||||||
}
|
}
|
||||||
if p.verifyDNSLength {
|
if p.verifyDNSLength {
|
||||||
s += ":VerifyDNSLength"
|
s += ":VerifyDNSLength"
|
||||||
|
@ -254,26 +284,29 @@ var (
|
||||||
|
|
||||||
punycode = &Profile{}
|
punycode = &Profile{}
|
||||||
lookup = &Profile{options{
|
lookup = &Profile{options{
|
||||||
transitional: true,
|
transitional: transitionalLookup,
|
||||||
useSTD3Rules: true,
|
useSTD3Rules: true,
|
||||||
validateLabels: true,
|
checkHyphens: true,
|
||||||
trie: trie,
|
checkJoiners: true,
|
||||||
fromPuny: validateFromPunycode,
|
trie: trie,
|
||||||
mapping: validateAndMap,
|
fromPuny: validateFromPunycode,
|
||||||
bidirule: bidirule.ValidString,
|
mapping: validateAndMap,
|
||||||
|
bidirule: bidirule.ValidString,
|
||||||
}}
|
}}
|
||||||
display = &Profile{options{
|
display = &Profile{options{
|
||||||
useSTD3Rules: true,
|
useSTD3Rules: true,
|
||||||
validateLabels: true,
|
checkHyphens: true,
|
||||||
trie: trie,
|
checkJoiners: true,
|
||||||
fromPuny: validateFromPunycode,
|
trie: trie,
|
||||||
mapping: validateAndMap,
|
fromPuny: validateFromPunycode,
|
||||||
bidirule: bidirule.ValidString,
|
mapping: validateAndMap,
|
||||||
|
bidirule: bidirule.ValidString,
|
||||||
}}
|
}}
|
||||||
registration = &Profile{options{
|
registration = &Profile{options{
|
||||||
useSTD3Rules: true,
|
useSTD3Rules: true,
|
||||||
validateLabels: true,
|
|
||||||
verifyDNSLength: true,
|
verifyDNSLength: true,
|
||||||
|
checkHyphens: true,
|
||||||
|
checkJoiners: true,
|
||||||
trie: trie,
|
trie: trie,
|
||||||
fromPuny: validateFromPunycode,
|
fromPuny: validateFromPunycode,
|
||||||
mapping: validateRegistration,
|
mapping: validateRegistration,
|
||||||
|
@ -340,7 +373,7 @@ func (p *Profile) process(s string, toASCII bool) (string, error) {
|
||||||
}
|
}
|
||||||
isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight
|
isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight
|
||||||
labels.set(u)
|
labels.set(u)
|
||||||
if err == nil && p.validateLabels {
|
if err == nil && p.fromPuny != nil {
|
||||||
err = p.fromPuny(p, u)
|
err = p.fromPuny(p, u)
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -681,16 +714,18 @@ func (p *Profile) validateLabel(s string) (err error) {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if !p.validateLabels {
|
if p.checkHyphens {
|
||||||
|
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
|
||||||
|
return &labelError{s, "V2"}
|
||||||
|
}
|
||||||
|
if s[0] == '-' || s[len(s)-1] == '-' {
|
||||||
|
return &labelError{s, "V3"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !p.checkJoiners {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
trie := p.trie // p.validateLabels is only set if trie is set.
|
trie := p.trie // p.checkJoiners is only set if trie is set.
|
||||||
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
|
|
||||||
return &labelError{s, "V2"}
|
|
||||||
}
|
|
||||||
if s[0] == '-' || s[len(s)-1] == '-' {
|
|
||||||
return &labelError{s, "V3"}
|
|
||||||
}
|
|
||||||
// TODO: merge the use of this in the trie.
|
// TODO: merge the use of this in the trie.
|
||||||
v, sz := trie.lookupString(s)
|
v, sz := trie.lookupString(s)
|
||||||
x := info(v)
|
x := info(v)
|
||||||
|
|
|
@ -58,23 +58,22 @@ type Option func(*options)
|
||||||
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
|
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
|
||||||
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
|
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
|
||||||
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
|
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
|
||||||
// compatibility. It is used by most browsers when resolving domain names. This
|
// compatibility. It is used by some browsers when resolving domain names. This
|
||||||
// option is only meaningful if combined with MapForLookup.
|
// option is only meaningful if combined with MapForLookup.
|
||||||
func Transitional(transitional bool) Option {
|
func Transitional(transitional bool) Option {
|
||||||
return func(o *options) { o.transitional = true }
|
return func(o *options) { o.transitional = transitional }
|
||||||
}
|
}
|
||||||
|
|
||||||
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
||||||
// are longer than allowed by the RFC.
|
// are longer than allowed by the RFC.
|
||||||
|
//
|
||||||
|
// This option corresponds to the VerifyDnsLength flag in UTS #46.
|
||||||
func VerifyDNSLength(verify bool) Option {
|
func VerifyDNSLength(verify bool) Option {
|
||||||
return func(o *options) { o.verifyDNSLength = verify }
|
return func(o *options) { o.verifyDNSLength = verify }
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveLeadingDots removes leading label separators. Leading runes that map to
|
// RemoveLeadingDots removes leading label separators. Leading runes that map to
|
||||||
// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
|
// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
|
||||||
//
|
|
||||||
// This is the behavior suggested by the UTS #46 and is adopted by some
|
|
||||||
// browsers.
|
|
||||||
func RemoveLeadingDots(remove bool) Option {
|
func RemoveLeadingDots(remove bool) Option {
|
||||||
return func(o *options) { o.removeLeadingDots = remove }
|
return func(o *options) { o.removeLeadingDots = remove }
|
||||||
}
|
}
|
||||||
|
@ -82,6 +81,8 @@ func RemoveLeadingDots(remove bool) Option {
|
||||||
// ValidateLabels sets whether to check the mandatory label validation criteria
|
// ValidateLabels sets whether to check the mandatory label validation criteria
|
||||||
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
|
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
|
||||||
// of hyphens ('-'), normalization, validity of runes, and the context rules.
|
// of hyphens ('-'), normalization, validity of runes, and the context rules.
|
||||||
|
// In particular, ValidateLabels also sets the CheckHyphens and CheckJoiners flags
|
||||||
|
// in UTS #46.
|
||||||
func ValidateLabels(enable bool) Option {
|
func ValidateLabels(enable bool) Option {
|
||||||
return func(o *options) {
|
return func(o *options) {
|
||||||
// Don't override existing mappings, but set one that at least checks
|
// Don't override existing mappings, but set one that at least checks
|
||||||
|
@ -90,25 +91,48 @@ func ValidateLabels(enable bool) Option {
|
||||||
o.mapping = normalize
|
o.mapping = normalize
|
||||||
}
|
}
|
||||||
o.trie = trie
|
o.trie = trie
|
||||||
o.validateLabels = enable
|
o.checkJoiners = enable
|
||||||
o.fromPuny = validateFromPunycode
|
o.checkHyphens = enable
|
||||||
|
if enable {
|
||||||
|
o.fromPuny = validateFromPunycode
|
||||||
|
} else {
|
||||||
|
o.fromPuny = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckHyphens sets whether to check for correct use of hyphens ('-') in
|
||||||
|
// labels. Most web browsers do not have this option set, since labels such as
|
||||||
|
// "r3---sn-apo3qvuoxuxbt-j5pe" are in common use.
|
||||||
|
//
|
||||||
|
// This option corresponds to the CheckHyphens flag in UTS #46.
|
||||||
|
func CheckHyphens(enable bool) Option {
|
||||||
|
return func(o *options) { o.checkHyphens = enable }
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckJoiners sets whether to check the ContextJ rules as defined in Appendix
|
||||||
|
// A of RFC 5892, concerning the use of joiner runes.
|
||||||
|
//
|
||||||
|
// This option corresponds to the CheckJoiners flag in UTS #46.
|
||||||
|
func CheckJoiners(enable bool) Option {
|
||||||
|
return func(o *options) {
|
||||||
|
o.trie = trie
|
||||||
|
o.checkJoiners = enable
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// StrictDomainName limits the set of permissable ASCII characters to those
|
// StrictDomainName limits the set of permissable ASCII characters to those
|
||||||
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
|
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
|
||||||
// hyphen). This is set by default for MapForLookup and ValidateForRegistration.
|
// hyphen). This is set by default for MapForLookup and ValidateForRegistration,
|
||||||
|
// but is only useful if ValidateLabels is set.
|
||||||
//
|
//
|
||||||
// This option is useful, for instance, for browsers that allow characters
|
// This option is useful, for instance, for browsers that allow characters
|
||||||
// outside this range, for example a '_' (U+005F LOW LINE). See
|
// outside this range, for example a '_' (U+005F LOW LINE). See
|
||||||
// http://www.rfc-editor.org/std/std3.txt for more details This option
|
// http://www.rfc-editor.org/std/std3.txt for more details.
|
||||||
// corresponds to the UseSTD3ASCIIRules option in UTS #46.
|
//
|
||||||
|
// This option corresponds to the UseSTD3ASCIIRules flag in UTS #46.
|
||||||
func StrictDomainName(use bool) Option {
|
func StrictDomainName(use bool) Option {
|
||||||
return func(o *options) {
|
return func(o *options) { o.useSTD3Rules = use }
|
||||||
o.trie = trie
|
|
||||||
o.useSTD3Rules = use
|
|
||||||
o.fromPuny = validateFromPunycode
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: the following options pull in tables. The tables should not be linked
|
// NOTE: the following options pull in tables. The tables should not be linked
|
||||||
|
@ -116,6 +140,8 @@ func StrictDomainName(use bool) Option {
|
||||||
|
|
||||||
// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
|
// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
|
||||||
// that relies on proper validation of labels should include this rule.
|
// that relies on proper validation of labels should include this rule.
|
||||||
|
//
|
||||||
|
// This option corresponds to the CheckBidi flag in UTS #46.
|
||||||
func BidiRule() Option {
|
func BidiRule() Option {
|
||||||
return func(o *options) { o.bidirule = bidirule.ValidString }
|
return func(o *options) { o.bidirule = bidirule.ValidString }
|
||||||
}
|
}
|
||||||
|
@ -152,7 +178,8 @@ func MapForLookup() Option {
|
||||||
type options struct {
|
type options struct {
|
||||||
transitional bool
|
transitional bool
|
||||||
useSTD3Rules bool
|
useSTD3Rules bool
|
||||||
validateLabels bool
|
checkHyphens bool
|
||||||
|
checkJoiners bool
|
||||||
verifyDNSLength bool
|
verifyDNSLength bool
|
||||||
removeLeadingDots bool
|
removeLeadingDots bool
|
||||||
|
|
||||||
|
@ -225,8 +252,11 @@ func (p *Profile) String() string {
|
||||||
if p.useSTD3Rules {
|
if p.useSTD3Rules {
|
||||||
s += ":UseSTD3Rules"
|
s += ":UseSTD3Rules"
|
||||||
}
|
}
|
||||||
if p.validateLabels {
|
if p.checkHyphens {
|
||||||
s += ":ValidateLabels"
|
s += ":CheckHyphens"
|
||||||
|
}
|
||||||
|
if p.checkJoiners {
|
||||||
|
s += ":CheckJoiners"
|
||||||
}
|
}
|
||||||
if p.verifyDNSLength {
|
if p.verifyDNSLength {
|
||||||
s += ":VerifyDNSLength"
|
s += ":VerifyDNSLength"
|
||||||
|
@ -255,9 +285,10 @@ var (
|
||||||
punycode = &Profile{}
|
punycode = &Profile{}
|
||||||
lookup = &Profile{options{
|
lookup = &Profile{options{
|
||||||
transitional: true,
|
transitional: true,
|
||||||
useSTD3Rules: true,
|
|
||||||
validateLabels: true,
|
|
||||||
removeLeadingDots: true,
|
removeLeadingDots: true,
|
||||||
|
useSTD3Rules: true,
|
||||||
|
checkHyphens: true,
|
||||||
|
checkJoiners: true,
|
||||||
trie: trie,
|
trie: trie,
|
||||||
fromPuny: validateFromPunycode,
|
fromPuny: validateFromPunycode,
|
||||||
mapping: validateAndMap,
|
mapping: validateAndMap,
|
||||||
|
@ -265,8 +296,9 @@ var (
|
||||||
}}
|
}}
|
||||||
display = &Profile{options{
|
display = &Profile{options{
|
||||||
useSTD3Rules: true,
|
useSTD3Rules: true,
|
||||||
validateLabels: true,
|
|
||||||
removeLeadingDots: true,
|
removeLeadingDots: true,
|
||||||
|
checkHyphens: true,
|
||||||
|
checkJoiners: true,
|
||||||
trie: trie,
|
trie: trie,
|
||||||
fromPuny: validateFromPunycode,
|
fromPuny: validateFromPunycode,
|
||||||
mapping: validateAndMap,
|
mapping: validateAndMap,
|
||||||
|
@ -274,8 +306,9 @@ var (
|
||||||
}}
|
}}
|
||||||
registration = &Profile{options{
|
registration = &Profile{options{
|
||||||
useSTD3Rules: true,
|
useSTD3Rules: true,
|
||||||
validateLabels: true,
|
|
||||||
verifyDNSLength: true,
|
verifyDNSLength: true,
|
||||||
|
checkHyphens: true,
|
||||||
|
checkJoiners: true,
|
||||||
trie: trie,
|
trie: trie,
|
||||||
fromPuny: validateFromPunycode,
|
fromPuny: validateFromPunycode,
|
||||||
mapping: validateRegistration,
|
mapping: validateRegistration,
|
||||||
|
@ -339,7 +372,7 @@ func (p *Profile) process(s string, toASCII bool) (string, error) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
labels.set(u)
|
labels.set(u)
|
||||||
if err == nil && p.validateLabels {
|
if err == nil && p.fromPuny != nil {
|
||||||
err = p.fromPuny(p, u)
|
err = p.fromPuny(p, u)
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -629,16 +662,18 @@ func (p *Profile) validateLabel(s string) error {
|
||||||
if p.bidirule != nil && !p.bidirule(s) {
|
if p.bidirule != nil && !p.bidirule(s) {
|
||||||
return &labelError{s, "B"}
|
return &labelError{s, "B"}
|
||||||
}
|
}
|
||||||
if !p.validateLabels {
|
if p.checkHyphens {
|
||||||
|
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
|
||||||
|
return &labelError{s, "V2"}
|
||||||
|
}
|
||||||
|
if s[0] == '-' || s[len(s)-1] == '-' {
|
||||||
|
return &labelError{s, "V3"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !p.checkJoiners {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
trie := p.trie // p.validateLabels is only set if trie is set.
|
trie := p.trie // p.checkJoiners is only set if trie is set.
|
||||||
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
|
|
||||||
return &labelError{s, "V2"}
|
|
||||||
}
|
|
||||||
if s[0] == '-' || s[len(s)-1] == '-' {
|
|
||||||
return &labelError{s, "V3"}
|
|
||||||
}
|
|
||||||
// TODO: merge the use of this in the trie.
|
// TODO: merge the use of this in the trie.
|
||||||
v, sz := trie.lookupString(s)
|
v, sz := trie.lookupString(s)
|
||||||
x := info(v)
|
x := info(v)
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||||
|
|
||||||
|
// Copyright 2021 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build !go1.18
|
||||||
|
// +build !go1.18
|
||||||
|
|
||||||
|
package idna
|
||||||
|
|
||||||
|
const transitionalLookup = true
|
|
@ -49,6 +49,7 @@ func decode(encoded string) (string, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
i, n, bias := int32(0), initialN, initialBias
|
i, n, bias := int32(0), initialN, initialBias
|
||||||
|
overflow := false
|
||||||
for pos < len(encoded) {
|
for pos < len(encoded) {
|
||||||
oldI, w := i, int32(1)
|
oldI, w := i, int32(1)
|
||||||
for k := base; ; k += base {
|
for k := base; ; k += base {
|
||||||
|
@ -60,29 +61,32 @@ func decode(encoded string) (string, error) {
|
||||||
return "", punyError(encoded)
|
return "", punyError(encoded)
|
||||||
}
|
}
|
||||||
pos++
|
pos++
|
||||||
i += digit * w
|
i, overflow = madd(i, digit, w)
|
||||||
if i < 0 {
|
if overflow {
|
||||||
return "", punyError(encoded)
|
return "", punyError(encoded)
|
||||||
}
|
}
|
||||||
t := k - bias
|
t := k - bias
|
||||||
if t < tmin {
|
if k <= bias {
|
||||||
t = tmin
|
t = tmin
|
||||||
} else if t > tmax {
|
} else if k >= bias+tmax {
|
||||||
t = tmax
|
t = tmax
|
||||||
}
|
}
|
||||||
if digit < t {
|
if digit < t {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
w *= base - t
|
w, overflow = madd(0, w, base-t)
|
||||||
if w >= math.MaxInt32/base {
|
if overflow {
|
||||||
return "", punyError(encoded)
|
return "", punyError(encoded)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(output) >= 1024 {
|
||||||
|
return "", punyError(encoded)
|
||||||
|
}
|
||||||
x := int32(len(output) + 1)
|
x := int32(len(output) + 1)
|
||||||
bias = adapt(i-oldI, x, oldI == 0)
|
bias = adapt(i-oldI, x, oldI == 0)
|
||||||
n += i / x
|
n += i / x
|
||||||
i %= x
|
i %= x
|
||||||
if n > utf8.MaxRune || len(output) >= 1024 {
|
if n < 0 || n > utf8.MaxRune {
|
||||||
return "", punyError(encoded)
|
return "", punyError(encoded)
|
||||||
}
|
}
|
||||||
output = append(output, 0)
|
output = append(output, 0)
|
||||||
|
@ -115,6 +119,7 @@ func encode(prefix, s string) (string, error) {
|
||||||
if b > 0 {
|
if b > 0 {
|
||||||
output = append(output, '-')
|
output = append(output, '-')
|
||||||
}
|
}
|
||||||
|
overflow := false
|
||||||
for remaining != 0 {
|
for remaining != 0 {
|
||||||
m := int32(0x7fffffff)
|
m := int32(0x7fffffff)
|
||||||
for _, r := range s {
|
for _, r := range s {
|
||||||
|
@ -122,8 +127,8 @@ func encode(prefix, s string) (string, error) {
|
||||||
m = r
|
m = r
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
delta += (m - n) * (h + 1)
|
delta, overflow = madd(delta, m-n, h+1)
|
||||||
if delta < 0 {
|
if overflow {
|
||||||
return "", punyError(s)
|
return "", punyError(s)
|
||||||
}
|
}
|
||||||
n = m
|
n = m
|
||||||
|
@ -141,9 +146,9 @@ func encode(prefix, s string) (string, error) {
|
||||||
q := delta
|
q := delta
|
||||||
for k := base; ; k += base {
|
for k := base; ; k += base {
|
||||||
t := k - bias
|
t := k - bias
|
||||||
if t < tmin {
|
if k <= bias {
|
||||||
t = tmin
|
t = tmin
|
||||||
} else if t > tmax {
|
} else if k >= bias+tmax {
|
||||||
t = tmax
|
t = tmax
|
||||||
}
|
}
|
||||||
if q < t {
|
if q < t {
|
||||||
|
@ -164,6 +169,15 @@ func encode(prefix, s string) (string, error) {
|
||||||
return string(output), nil
|
return string(output), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// madd computes a + (b * c), detecting overflow.
|
||||||
|
func madd(a, b, c int32) (next int32, overflow bool) {
|
||||||
|
p := int64(b) * int64(c)
|
||||||
|
if p > math.MaxInt32-int64(a) {
|
||||||
|
return 0, true
|
||||||
|
}
|
||||||
|
return a + int32(p), false
|
||||||
|
}
|
||||||
|
|
||||||
func decodeDigit(x byte) (digit int32, ok bool) {
|
func decodeDigit(x byte) (digit int32, ok bool) {
|
||||||
switch {
|
switch {
|
||||||
case '0' <= x && x <= '9':
|
case '0' <= x && x <= '9':
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
# Go terminal/console support
|
# Go terminal/console support
|
||||||
|
|
||||||
|
[![Go Reference](https://pkg.go.dev/badge/golang.org/x/term.svg)](https://pkg.go.dev/golang.org/x/term)
|
||||||
|
|
||||||
This repository provides Go terminal and console support packages.
|
This repository provides Go terminal and console support packages.
|
||||||
|
|
||||||
## Download/Install
|
## Download/Install
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
module golang.org/x/term
|
module golang.org/x/term
|
||||||
|
|
||||||
go 1.11
|
go 1.17
|
||||||
|
|
||||||
require golang.org/x/sys v0.0.0-20201119102817-f84b799fce68
|
require golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1
|
||||||
|
|
|
@ -1,2 +1,2 @@
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw=
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4=
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
|
|
@ -7,11 +7,11 @@
|
||||||
//
|
//
|
||||||
// Putting a terminal into raw mode is the most common requirement:
|
// Putting a terminal into raw mode is the most common requirement:
|
||||||
//
|
//
|
||||||
// oldState, err := terminal.MakeRaw(0)
|
// oldState, err := term.MakeRaw(int(os.Stdin.Fd()))
|
||||||
// if err != nil {
|
// if err != nil {
|
||||||
// panic(err)
|
// panic(err)
|
||||||
// }
|
// }
|
||||||
// defer terminal.Restore(0, oldState)
|
// defer term.Restore(int(os.Stdin.Fd()), oldState)
|
||||||
package term
|
package term
|
||||||
|
|
||||||
// State contains the state of a terminal.
|
// State contains the state of a terminal.
|
||||||
|
|
|
@ -1,111 +0,0 @@
|
||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package term
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
// State contains the state of a terminal.
|
|
||||||
type state struct {
|
|
||||||
termios unix.Termios
|
|
||||||
}
|
|
||||||
|
|
||||||
func isTerminal(fd int) bool {
|
|
||||||
_, err := unix.IoctlGetTermio(fd, unix.TCGETA)
|
|
||||||
return err == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func readPassword(fd int) ([]byte, error) {
|
|
||||||
// see also: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c
|
|
||||||
val, err := unix.IoctlGetTermios(fd, unix.TCGETS)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
oldState := *val
|
|
||||||
|
|
||||||
newState := oldState
|
|
||||||
newState.Lflag &^= syscall.ECHO
|
|
||||||
newState.Lflag |= syscall.ICANON | syscall.ISIG
|
|
||||||
newState.Iflag |= syscall.ICRNL
|
|
||||||
err = unix.IoctlSetTermios(fd, unix.TCSETS, &newState)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer unix.IoctlSetTermios(fd, unix.TCSETS, &oldState)
|
|
||||||
|
|
||||||
var buf [16]byte
|
|
||||||
var ret []byte
|
|
||||||
for {
|
|
||||||
n, err := syscall.Read(fd, buf[:])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if n == 0 {
|
|
||||||
if len(ret) == 0 {
|
|
||||||
return nil, io.EOF
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if buf[n-1] == '\n' {
|
|
||||||
n--
|
|
||||||
}
|
|
||||||
ret = append(ret, buf[:n]...)
|
|
||||||
if n < len(buf) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeRaw(fd int) (*State, error) {
|
|
||||||
// see http://cr.illumos.org/~webrev/andy_js/1060/
|
|
||||||
termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
oldState := State{state{termios: *termios}}
|
|
||||||
|
|
||||||
termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON
|
|
||||||
termios.Oflag &^= unix.OPOST
|
|
||||||
termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN
|
|
||||||
termios.Cflag &^= unix.CSIZE | unix.PARENB
|
|
||||||
termios.Cflag |= unix.CS8
|
|
||||||
termios.Cc[unix.VMIN] = 1
|
|
||||||
termios.Cc[unix.VTIME] = 0
|
|
||||||
|
|
||||||
if err := unix.IoctlSetTermios(fd, unix.TCSETS, termios); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &oldState, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func restore(fd int, oldState *State) error {
|
|
||||||
return unix.IoctlSetTermios(fd, unix.TCSETS, &oldState.termios)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getState(fd int) (*State, error) {
|
|
||||||
termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &State{state{termios: *termios}}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getSize(fd int) (width, height int, err error) {
|
|
||||||
ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
|
|
||||||
if err != nil {
|
|
||||||
return 0, 0, err
|
|
||||||
}
|
|
||||||
return int(ws.Col), int(ws.Row), nil
|
|
||||||
}
|
|
|
@ -2,7 +2,8 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// +build aix darwin dragonfly freebsd linux netbsd openbsd zos
|
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
|
||||||
|
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
|
||||||
|
|
||||||
package term
|
package term
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build darwin || dragonfly || freebsd || netbsd || openbsd
|
||||||
// +build darwin dragonfly freebsd netbsd openbsd
|
// +build darwin dragonfly freebsd netbsd openbsd
|
||||||
|
|
||||||
package term
|
package term
|
||||||
|
|
|
@ -1,10 +0,0 @@
|
||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package term
|
|
||||||
|
|
||||||
import "golang.org/x/sys/unix"
|
|
||||||
|
|
||||||
const ioctlReadTermios = unix.TCGETS
|
|
||||||
const ioctlWriteTermios = unix.TCSETS
|
|
|
@ -1,7 +1,10 @@
|
||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
// Copyright 2021 The Go Authors. All rights reserved.
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build aix || linux || solaris || zos
|
||||||
|
// +build aix linux solaris zos
|
||||||
|
|
||||||
package term
|
package term
|
||||||
|
|
||||||
import "golang.org/x/sys/unix"
|
import "golang.org/x/sys/unix"
|
|
@ -1,10 +0,0 @@
|
||||||
// Copyright 2020 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package term
|
|
||||||
|
|
||||||
import "golang.org/x/sys/unix"
|
|
||||||
|
|
||||||
const ioctlReadTermios = unix.TCGETS
|
|
||||||
const ioctlWriteTermios = unix.TCSETS
|
|
|
@ -2,6 +2,7 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !zos && !windows && !solaris && !plan9
|
||||||
// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!zos,!windows,!solaris,!plan9
|
// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!zos,!windows,!solaris,!plan9
|
||||||
|
|
||||||
package term
|
package term
|
||||||
|
|
|
@ -145,7 +145,6 @@ func (r *Reservation) DelayFrom(now time.Time) time.Duration {
|
||||||
// Cancel is shorthand for CancelAt(time.Now()).
|
// Cancel is shorthand for CancelAt(time.Now()).
|
||||||
func (r *Reservation) Cancel() {
|
func (r *Reservation) Cancel() {
|
||||||
r.CancelAt(time.Now())
|
r.CancelAt(time.Now())
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CancelAt indicates that the reservation holder will not perform the reserved action
|
// CancelAt indicates that the reservation holder will not perform the reserved action
|
||||||
|
@ -186,8 +185,6 @@ func (r *Reservation) CancelAt(now time.Time) {
|
||||||
r.lim.lastEvent = prevEvent
|
r.lim.lastEvent = prevEvent
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reserve is shorthand for ReserveN(time.Now(), 1).
|
// Reserve is shorthand for ReserveN(time.Now(), 1).
|
||||||
|
@ -367,20 +364,13 @@ func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time,
|
||||||
last = now
|
last = now
|
||||||
}
|
}
|
||||||
|
|
||||||
// Avoid making delta overflow below when last is very old.
|
|
||||||
maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens)
|
|
||||||
elapsed := now.Sub(last)
|
|
||||||
if elapsed > maxElapsed {
|
|
||||||
elapsed = maxElapsed
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate the new number of tokens, due to time that passed.
|
// Calculate the new number of tokens, due to time that passed.
|
||||||
|
elapsed := now.Sub(last)
|
||||||
delta := lim.limit.tokensFromDuration(elapsed)
|
delta := lim.limit.tokensFromDuration(elapsed)
|
||||||
tokens := lim.tokens + delta
|
tokens := lim.tokens + delta
|
||||||
if burst := float64(lim.burst); tokens > burst {
|
if burst := float64(lim.burst); tokens > burst {
|
||||||
tokens = burst
|
tokens = burst
|
||||||
}
|
}
|
||||||
|
|
||||||
return now, last, tokens
|
return now, last, tokens
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -388,15 +378,11 @@ func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time,
|
||||||
// of time it takes to accumulate them at a rate of limit tokens per second.
|
// of time it takes to accumulate them at a rate of limit tokens per second.
|
||||||
func (limit Limit) durationFromTokens(tokens float64) time.Duration {
|
func (limit Limit) durationFromTokens(tokens float64) time.Duration {
|
||||||
seconds := tokens / float64(limit)
|
seconds := tokens / float64(limit)
|
||||||
return time.Nanosecond * time.Duration(1e9*seconds)
|
return time.Duration(float64(time.Second) * seconds)
|
||||||
}
|
}
|
||||||
|
|
||||||
// tokensFromDuration is a unit conversion function from a time duration to the number of tokens
|
// tokensFromDuration is a unit conversion function from a time duration to the number of tokens
|
||||||
// which could be accumulated during that duration at a rate of limit tokens per second.
|
// which could be accumulated during that duration at a rate of limit tokens per second.
|
||||||
func (limit Limit) tokensFromDuration(d time.Duration) float64 {
|
func (limit Limit) tokensFromDuration(d time.Duration) float64 {
|
||||||
// Split the integer and fractional parts ourself to minimize rounding errors.
|
return d.Seconds() * float64(limit)
|
||||||
// See golang.org/issues/34861.
|
|
||||||
sec := float64(d/time.Second) * float64(limit)
|
|
||||||
nsec := float64(d%time.Second) * float64(limit)
|
|
||||||
return sec + nsec/1e9
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,7 +7,8 @@ github.com/Microsoft/go-winio
|
||||||
github.com/Microsoft/go-winio/pkg/guid
|
github.com/Microsoft/go-winio/pkg/guid
|
||||||
# github.com/beorn7/perks v1.0.1
|
# github.com/beorn7/perks v1.0.1
|
||||||
github.com/beorn7/perks/quantile
|
github.com/beorn7/perks/quantile
|
||||||
# github.com/cespare/xxhash/v2 v2.1.1
|
# github.com/cespare/xxhash/v2 v2.1.2
|
||||||
|
## explicit
|
||||||
github.com/cespare/xxhash/v2
|
github.com/cespare/xxhash/v2
|
||||||
# github.com/containerd/containerd v1.5.10
|
# github.com/containerd/containerd v1.5.10
|
||||||
## explicit
|
## explicit
|
||||||
|
@ -15,9 +16,6 @@ github.com/containerd/containerd/errdefs
|
||||||
github.com/containerd/containerd/log
|
github.com/containerd/containerd/log
|
||||||
github.com/containerd/containerd/pkg/userns
|
github.com/containerd/containerd/pkg/userns
|
||||||
github.com/containerd/containerd/platforms
|
github.com/containerd/containerd/platforms
|
||||||
# github.com/coreos/etcd v3.3.27+incompatible
|
|
||||||
## explicit
|
|
||||||
github.com/coreos/etcd/raft/raftpb
|
|
||||||
# github.com/creack/pty v1.1.11
|
# github.com/creack/pty v1.1.11
|
||||||
## explicit
|
## explicit
|
||||||
github.com/creack/pty
|
github.com/creack/pty
|
||||||
|
@ -94,7 +92,7 @@ github.com/docker/go-metrics
|
||||||
# github.com/docker/go-units v0.4.0
|
# github.com/docker/go-units v0.4.0
|
||||||
## explicit
|
## explicit
|
||||||
github.com/docker/go-units
|
github.com/docker/go-units
|
||||||
# github.com/docker/swarmkit v1.12.1-0.20210726173615-3629f50980f6
|
# github.com/docker/swarmkit v1.12.1-0.20220307221335-616e8db4c3b0
|
||||||
## explicit
|
## explicit
|
||||||
github.com/docker/swarmkit/api
|
github.com/docker/swarmkit/api
|
||||||
github.com/docker/swarmkit/api/deepcopy
|
github.com/docker/swarmkit/api/deepcopy
|
||||||
|
@ -113,7 +111,6 @@ github.com/gogo/protobuf/protoc-gen-gogo/descriptor
|
||||||
github.com/gogo/protobuf/sortkeys
|
github.com/gogo/protobuf/sortkeys
|
||||||
github.com/gogo/protobuf/types
|
github.com/gogo/protobuf/types
|
||||||
# github.com/golang/protobuf v1.5.2
|
# github.com/golang/protobuf v1.5.2
|
||||||
## explicit
|
|
||||||
github.com/golang/protobuf/proto
|
github.com/golang/protobuf/proto
|
||||||
github.com/golang/protobuf/ptypes
|
github.com/golang/protobuf/ptypes
|
||||||
github.com/golang/protobuf/ptypes/any
|
github.com/golang/protobuf/ptypes/any
|
||||||
|
@ -137,7 +134,7 @@ github.com/gorilla/mux
|
||||||
github.com/imdario/mergo
|
github.com/imdario/mergo
|
||||||
# github.com/inconshreveable/mousetrap v1.0.0
|
# github.com/inconshreveable/mousetrap v1.0.0
|
||||||
github.com/inconshreveable/mousetrap
|
github.com/inconshreveable/mousetrap
|
||||||
# github.com/klauspost/compress v1.14.3
|
# github.com/klauspost/compress v1.15.0
|
||||||
## explicit
|
## explicit
|
||||||
github.com/klauspost/compress
|
github.com/klauspost/compress
|
||||||
github.com/klauspost/compress/fse
|
github.com/klauspost/compress/fse
|
||||||
|
@ -176,7 +173,8 @@ github.com/opencontainers/go-digest
|
||||||
## explicit
|
## explicit
|
||||||
github.com/opencontainers/image-spec/specs-go
|
github.com/opencontainers/image-spec/specs-go
|
||||||
github.com/opencontainers/image-spec/specs-go/v1
|
github.com/opencontainers/image-spec/specs-go/v1
|
||||||
# github.com/opencontainers/runc v1.0.2
|
# github.com/opencontainers/runc v1.1.0
|
||||||
|
## explicit
|
||||||
github.com/opencontainers/runc/libcontainer/user
|
github.com/opencontainers/runc/libcontainer/user
|
||||||
# github.com/pkg/errors v0.9.1
|
# github.com/pkg/errors v0.9.1
|
||||||
## explicit
|
## explicit
|
||||||
|
@ -231,11 +229,16 @@ github.com/xeipuuv/gojsonreference
|
||||||
# github.com/xeipuuv/gojsonschema v1.2.0
|
# github.com/xeipuuv/gojsonschema v1.2.0
|
||||||
## explicit
|
## explicit
|
||||||
github.com/xeipuuv/gojsonschema
|
github.com/xeipuuv/gojsonschema
|
||||||
# golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
|
# go.etcd.io/etcd/raft/v3 v3.5.2
|
||||||
|
## explicit
|
||||||
|
go.etcd.io/etcd/raft/v3/raftpb
|
||||||
|
# golang.org/x/crypto v0.0.0-20211202192323-5770296d904e
|
||||||
|
## explicit
|
||||||
golang.org/x/crypto/ed25519
|
golang.org/x/crypto/ed25519
|
||||||
golang.org/x/crypto/ed25519/internal/edwards25519
|
golang.org/x/crypto/ed25519/internal/edwards25519
|
||||||
golang.org/x/crypto/pbkdf2
|
golang.org/x/crypto/pbkdf2
|
||||||
# golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
|
# golang.org/x/net v0.0.0-20211216030914-fe4d6282115f
|
||||||
|
## explicit
|
||||||
golang.org/x/net/http/httpguts
|
golang.org/x/net/http/httpguts
|
||||||
golang.org/x/net/http2
|
golang.org/x/net/http2
|
||||||
golang.org/x/net/http2/hpack
|
golang.org/x/net/http2/hpack
|
||||||
|
@ -251,7 +254,7 @@ golang.org/x/sys/internal/unsafeheader
|
||||||
golang.org/x/sys/plan9
|
golang.org/x/sys/plan9
|
||||||
golang.org/x/sys/unix
|
golang.org/x/sys/unix
|
||||||
golang.org/x/sys/windows
|
golang.org/x/sys/windows
|
||||||
# golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1
|
# golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b
|
||||||
## explicit
|
## explicit
|
||||||
golang.org/x/term
|
golang.org/x/term
|
||||||
# golang.org/x/text v0.3.7
|
# golang.org/x/text v0.3.7
|
||||||
|
@ -261,7 +264,8 @@ golang.org/x/text/transform
|
||||||
golang.org/x/text/unicode/bidi
|
golang.org/x/text/unicode/bidi
|
||||||
golang.org/x/text/unicode/norm
|
golang.org/x/text/unicode/norm
|
||||||
golang.org/x/text/width
|
golang.org/x/text/width
|
||||||
# golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
|
# golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac
|
||||||
|
## explicit
|
||||||
golang.org/x/time/rate
|
golang.org/x/time/rate
|
||||||
# golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
|
# golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
|
||||||
golang.org/x/xerrors
|
golang.org/x/xerrors
|
||||||
|
|
Loading…
Reference in New Issue