mirror of https://github.com/docker/cli.git
Merge pull request #3929 from thaJeztah/update_engine
vendor: update docker/docker to tip of v23.0 branch
This commit is contained in:
commit
139e924690
24
vendor.mod
24
vendor.mod
|
@ -7,10 +7,10 @@ module github.com/docker/cli
|
|||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/containerd/containerd v1.6.10
|
||||
github.com/containerd/containerd v1.6.14
|
||||
github.com/creack/pty v1.1.11
|
||||
github.com/docker/distribution v2.8.1+incompatible
|
||||
github.com/docker/docker v23.0.0-beta.1+incompatible
|
||||
github.com/docker/docker v23.0.0-beta.1.0.20221221173850-cba986b34090+incompatible
|
||||
github.com/docker/docker-credential-helpers v0.7.0
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/go-units v0.5.0
|
||||
|
@ -23,7 +23,7 @@ require (
|
|||
github.com/mitchellh/mapstructure v1.3.2
|
||||
github.com/moby/buildkit v0.10.6
|
||||
github.com/moby/patternmatcher v0.5.0
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20221123162438-b17f02f0a054
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20221215132206-0da442b2780f
|
||||
github.com/moby/sys/sequential v0.5.0
|
||||
github.com/moby/sys/signal v0.7.0
|
||||
github.com/moby/term v0.0.0-20221128092401-c43b287e0e0f
|
||||
|
@ -37,9 +37,9 @@ require (
|
|||
github.com/theupdateframework/notary v0.7.1-0.20210315103452-bf96a202a09a
|
||||
github.com/tonistiigi/go-rosetta v0.0.0-20200727161949-f79598599c5d
|
||||
github.com/xeipuuv/gojsonschema v1.2.0
|
||||
golang.org/x/sys v0.2.0
|
||||
golang.org/x/term v0.1.0
|
||||
golang.org/x/text v0.4.0
|
||||
golang.org/x/sys v0.3.0
|
||||
golang.org/x/term v0.3.0
|
||||
golang.org/x/text v0.5.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gotest.tools/v3 v3.4.0
|
||||
)
|
||||
|
@ -56,21 +56,21 @@ require (
|
|||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/klauspost/compress v1.15.9 // indirect
|
||||
github.com/klauspost/compress v1.15.12 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/moby/sys/symlink v0.2.0 // indirect
|
||||
github.com/opencontainers/runc v1.1.2 // indirect
|
||||
github.com/prometheus/client_golang v1.13.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/opencontainers/runc v1.1.3 // indirect
|
||||
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
go.etcd.io/etcd/raft/v3 v3.5.6 // indirect
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/net v0.1.0 // indirect
|
||||
golang.org/x/crypto v0.2.0 // indirect
|
||||
golang.org/x/net v0.4.0 // indirect
|
||||
golang.org/x/time v0.1.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220706185917-7780775163c4 // indirect
|
||||
google.golang.org/grpc v1.48.0 // indirect
|
||||
|
|
55
vendor.sum
55
vendor.sum
|
@ -38,7 +38,7 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
|||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/Microsoft/hcsshim v0.9.5 h1:AbV+VPfTrIVffukazHcpxmz/sRiE6YaMDzHWR9BXZHo=
|
||||
github.com/Microsoft/hcsshim v0.9.6 h1:VwnDOgLeoi2du6dAznfmspNqTiwczvjv4K7NxuY9jsY=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d h1:hi6J4K6DKrR4/ljxn6SF6nURyu785wKMuQcjt7H3VCQ=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
|
@ -84,8 +84,8 @@ github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h
|
|||
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
|
||||
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
|
||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||
github.com/containerd/containerd v1.6.10 h1:8aiav7I2ZyQLbTlNMcBXyAU1FtFvp6VuyuW13qSd6Hk=
|
||||
github.com/containerd/containerd v1.6.10/go.mod h1:CVqfxdJ95PDgORwA219AwwLrREZgrTFybXu2HfMKRG0=
|
||||
github.com/containerd/containerd v1.6.14 h1:W+d0AJKVG3ioTZZyQwcw1Y3vvo6ZDYzAcjDcY4tkgGI=
|
||||
github.com/containerd/containerd v1.6.14/go.mod h1:U2NnBPIhzJDm59xF7xB2MMHnKtggpZ+phKg8o2TKj2c=
|
||||
github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
|
@ -101,8 +101,8 @@ github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xb
|
|||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v23.0.0-beta.1+incompatible h1:0Xv+AFPWxTbmohdLK57pYRPmefCKthtfRF/qQwXHolg=
|
||||
github.com/docker/docker v23.0.0-beta.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v23.0.0-beta.1.0.20221221173850-cba986b34090+incompatible h1:6TPj/7/lTaDyIPzW2f0Q8uwuDXmSfmUMd/BEH+SBfNs=
|
||||
github.com/docker/docker v23.0.0-beta.1.0.20221221173850-cba986b34090+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
|
||||
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
|
||||
|
@ -144,7 +144,7 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
|
|||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
|
@ -251,8 +251,8 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8
|
|||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY=
|
||||
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
|
||||
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
|
@ -282,8 +282,8 @@ github.com/moby/buildkit v0.10.6 h1:DJlEuLIgnu34HQKF4n9Eg6q2YqQVC0eOpMb4p2eRS2w=
|
|||
github.com/moby/buildkit v0.10.6/go.mod h1:tQuuyTWtOb9D+RE425cwOCUkX0/oZ+5iBZ+uWpWQ9bU=
|
||||
github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo=
|
||||
github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20221123162438-b17f02f0a054 h1:ny1MdKQaQI/i+i7YrwO2zPpfW2ET1QBR59HqZd+ozOI=
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20221123162438-b17f02f0a054/go.mod h1:rUZl7gR5C7156P2qEE6wnx4riFgBjqmsQaUqo/WeyBw=
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20221215132206-0da442b2780f h1:BTVdvWS36nfgQBEUjNxQGmEyqWLG/B//4cMENoFhLnQ=
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20221215132206-0da442b2780f/go.mod h1:khWPPVLudKat1BTpVcw7tI4hRYOECDuwIT4kyxQwgg8=
|
||||
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
||||
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
|
||||
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
|
||||
|
@ -314,8 +314,8 @@ github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3I
|
|||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1 h1:9iFHD5Kt9hkOfeawBNiEeEaV7bmC4/Z5wJp8E9BptMs=
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1/go.mod h1:K/JAU0m27RFhDRX4PcFdIKntROP6y5Ed6O91aZYDQfs=
|
||||
github.com/opencontainers/runc v1.1.2 h1:2VSZwLx5k/BfsBxMMipG/LYUnmqOD/BPkIVgQUcTlLw=
|
||||
github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
|
||||
github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
|
||||
github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
||||
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
|
||||
|
@ -333,14 +333,15 @@ github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQ
|
|||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
|
||||
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||
|
@ -365,7 +366,7 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
|
|||
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
|
@ -395,7 +396,7 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
|
|||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.5 h1:s5PTfem8p8EbKQOctVV53k6jCJt3UX4IEJzwh+C324Q=
|
||||
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/theupdateframework/notary v0.7.1-0.20210315103452-bf96a202a09a h1:tlJ7tGUHvcvL1v3yR6NcCc9nOqh2L+CG6HWrYQtwzQ0=
|
||||
github.com/theupdateframework/notary v0.7.1-0.20210315103452-bf96a202a09a/go.mod h1:Y94A6rPp2OwNfP/7vmf8O2xx2IykP8pPXQ1DLouGnEw=
|
||||
|
@ -436,8 +437,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.2.0 h1:BRXPfhNivWL5Yq0BGQ39a2sW6t44aODpfxkWjYdzewE=
|
||||
golang.org/x/crypto v0.2.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -503,8 +504,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
|
|||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
|
||||
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -576,13 +577,13 @@ golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -591,8 +592,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
|
|
@ -2,14 +2,12 @@ package errdefs // import "github.com/docker/docker/errdefs"
|
|||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// FromStatusCode creates an errdef error, based on the provided HTTP status-code
|
||||
func FromStatusCode(err error, statusCode int) error {
|
||||
if err == nil {
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
switch statusCode {
|
||||
case http.StatusNotFound:
|
||||
|
@ -33,11 +31,6 @@ func FromStatusCode(err error, statusCode int) error {
|
|||
err = System(err)
|
||||
}
|
||||
default:
|
||||
logrus.WithError(err).WithFields(logrus.Fields{
|
||||
"module": "api",
|
||||
"status_code": statusCode,
|
||||
}).Debug("FIXME: Got an status-code for which error does not match any expected type!!!")
|
||||
|
||||
switch {
|
||||
case statusCode >= 200 && statusCode < 400:
|
||||
// it's a client error
|
||||
|
|
|
@ -17,6 +17,30 @@ This package provides various compression algorithms.
|
|||
|
||||
# changelog
|
||||
|
||||
* Sept 26, 2022 (v1.15.11)
|
||||
|
||||
* flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678
|
||||
* zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677
|
||||
* zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668
|
||||
* zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667
|
||||
|
||||
* Sept 16, 2022 (v1.15.10)
|
||||
|
||||
* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
|
||||
* Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
|
||||
* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
|
||||
* zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657
|
||||
* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
|
||||
* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
|
||||
* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
|
||||
* Use arrays for constant size copies https://github.com/klauspost/compress/pull/659
|
||||
|
||||
* July 21, 2022 (v1.15.9)
|
||||
|
||||
* zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
|
||||
* zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
|
||||
* zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643
|
||||
|
||||
* July 13, 2022 (v1.15.8)
|
||||
|
||||
* gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641
|
||||
|
@ -91,15 +115,15 @@ This package provides various compression algorithms.
|
|||
* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
|
||||
* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
|
||||
|
||||
<details>
|
||||
<summary>See Details</summary>
|
||||
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
|
||||
|
||||
Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
|
||||
|
||||
While the release has been extensively tested, it is recommended to testing when upgrading.
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.14.x</summary>
|
||||
|
||||
* Feb 22, 2022 (v1.14.4)
|
||||
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
|
||||
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
|
||||
|
@ -125,6 +149,7 @@ While the release has been extensively tested, it is recommended to testing when
|
|||
* zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)
|
||||
* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
|
||||
* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.13.x</summary>
|
||||
|
|
|
@ -763,17 +763,20 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
|||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 1")
|
||||
}
|
||||
copy(out, buf[0][:])
|
||||
copy(out[dstEvery:], buf[1][:])
|
||||
copy(out[dstEvery*2:], buf[2][:])
|
||||
copy(out[dstEvery*3:], buf[3][:])
|
||||
out = out[bufoff:]
|
||||
decoded += bufoff * 4
|
||||
// There must at least be 3 buffers left.
|
||||
if len(out) < dstEvery*3 {
|
||||
if len(out)-bufoff < dstEvery*3 {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 2")
|
||||
}
|
||||
//copy(out, buf[0][:])
|
||||
//copy(out[dstEvery:], buf[1][:])
|
||||
//copy(out[dstEvery*2:], buf[2][:])
|
||||
*(*[bufoff]byte)(out) = buf[0]
|
||||
*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
|
||||
*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
|
||||
*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
|
||||
out = out[bufoff:]
|
||||
decoded += bufoff * 4
|
||||
}
|
||||
}
|
||||
if off > 0 {
|
||||
|
@ -997,17 +1000,22 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
|||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 1")
|
||||
}
|
||||
copy(out, buf[0][:])
|
||||
copy(out[dstEvery:], buf[1][:])
|
||||
copy(out[dstEvery*2:], buf[2][:])
|
||||
copy(out[dstEvery*3:], buf[3][:])
|
||||
out = out[bufoff:]
|
||||
decoded += bufoff * 4
|
||||
// There must at least be 3 buffers left.
|
||||
if len(out) < dstEvery*3 {
|
||||
if len(out)-bufoff < dstEvery*3 {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 2")
|
||||
}
|
||||
|
||||
//copy(out, buf[0][:])
|
||||
//copy(out[dstEvery:], buf[1][:])
|
||||
//copy(out[dstEvery*2:], buf[2][:])
|
||||
// copy(out[dstEvery*3:], buf[3][:])
|
||||
*(*[bufoff]byte)(out) = buf[0]
|
||||
*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
|
||||
*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
|
||||
*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
|
||||
out = out[bufoff:]
|
||||
decoded += bufoff * 4
|
||||
}
|
||||
}
|
||||
if off > 0 {
|
||||
|
|
|
@ -14,12 +14,14 @@ import (
|
|||
|
||||
// decompress4x_main_loop_x86 is an x86 assembler implementation
|
||||
// of Decompress4X when tablelog > 8.
|
||||
//
|
||||
//go:noescape
|
||||
func decompress4x_main_loop_amd64(ctx *decompress4xContext)
|
||||
|
||||
// decompress4x_8b_loop_x86 is an x86 assembler implementation
|
||||
// of Decompress4X when tablelog <= 8 which decodes 4 entries
|
||||
// per loop.
|
||||
//
|
||||
//go:noescape
|
||||
func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
|
||||
|
||||
|
@ -145,11 +147,13 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
|||
|
||||
// decompress4x_main_loop_x86 is an x86 assembler implementation
|
||||
// of Decompress1X when tablelog > 8.
|
||||
//
|
||||
//go:noescape
|
||||
func decompress1x_main_loop_amd64(ctx *decompress1xContext)
|
||||
|
||||
// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
|
||||
// of Decompress1X when tablelog > 8.
|
||||
//
|
||||
//go:noescape
|
||||
func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT.
|
||||
|
||||
//go:build amd64 && !appengine && !noasm && gc
|
||||
// +build amd64,!appengine,!noasm,gc
|
||||
|
||||
// func decompress4x_main_loop_amd64(ctx *decompress4xContext)
|
||||
TEXT ·decompress4x_main_loop_amd64(SB), $0-8
|
||||
|
|
|
@ -122,17 +122,21 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
|||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 1")
|
||||
}
|
||||
copy(out, buf[0][:])
|
||||
copy(out[dstEvery:], buf[1][:])
|
||||
copy(out[dstEvery*2:], buf[2][:])
|
||||
copy(out[dstEvery*3:], buf[3][:])
|
||||
out = out[bufoff:]
|
||||
decoded += bufoff * 4
|
||||
// There must at least be 3 buffers left.
|
||||
if len(out) < dstEvery*3 {
|
||||
if len(out)-bufoff < dstEvery*3 {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 2")
|
||||
}
|
||||
//copy(out, buf[0][:])
|
||||
//copy(out[dstEvery:], buf[1][:])
|
||||
//copy(out[dstEvery*2:], buf[2][:])
|
||||
//copy(out[dstEvery*3:], buf[3][:])
|
||||
*(*[bufoff]byte)(out) = buf[0]
|
||||
*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
|
||||
*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
|
||||
*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
|
||||
out = out[bufoff:]
|
||||
decoded += bufoff * 4
|
||||
}
|
||||
}
|
||||
if off > 0 {
|
||||
|
|
|
@ -18,6 +18,7 @@ func load64(b []byte, i int) uint64 {
|
|||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||
//
|
||||
// It assumes that:
|
||||
//
|
||||
// dst is long enough to hold the encoded bytes
|
||||
// 1 <= len(lit) && len(lit) <= 65536
|
||||
func emitLiteral(dst, lit []byte) int {
|
||||
|
@ -42,6 +43,7 @@ func emitLiteral(dst, lit []byte) int {
|
|||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||
//
|
||||
// It assumes that:
|
||||
//
|
||||
// dst is long enough to hold the encoded bytes
|
||||
// 1 <= offset && offset <= 65535
|
||||
// 4 <= length && length <= 65535
|
||||
|
@ -89,6 +91,7 @@ func emitCopy(dst []byte, offset, length int) int {
|
|||
// src[i:i+k-j] and src[j:k] have the same contents.
|
||||
//
|
||||
// It assumes that:
|
||||
//
|
||||
// 0 <= i && i < j && j <= len(src)
|
||||
func extendMatch(src []byte, i, j int) int {
|
||||
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
|
||||
|
@ -105,8 +108,9 @@ func hash(u, shift uint32) uint32 {
|
|||
// been written.
|
||||
//
|
||||
// It also assumes that:
|
||||
//
|
||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||
func encodeBlock(dst, src []byte) (d int) {
|
||||
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
|
||||
// The table element type is uint16, as s < sLimit and sLimit < len(src)
|
||||
|
|
|
@ -12,6 +12,8 @@ The `zstd` package is provided as open source software using a Go standard licen
|
|||
|
||||
Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors.
|
||||
|
||||
For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go).
|
||||
|
||||
## Installation
|
||||
|
||||
Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
@ -233,7 +232,7 @@ func (b *blockDec) decodeBuf(hist *history) error {
|
|||
if b.lowMem {
|
||||
b.dst = make([]byte, b.RLESize)
|
||||
} else {
|
||||
b.dst = make([]byte, maxBlockSize)
|
||||
b.dst = make([]byte, maxCompressedBlockSize)
|
||||
}
|
||||
}
|
||||
b.dst = b.dst[:b.RLESize]
|
||||
|
@ -651,7 +650,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
|
|||
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
|
||||
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
|
||||
buf.Write(in)
|
||||
ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
|
||||
os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -7,7 +7,6 @@ package zstd
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
type byteBuffer interface {
|
||||
|
@ -124,7 +123,7 @@ func (r *readerWrapper) readByte() (byte, error) {
|
|||
}
|
||||
|
||||
func (r *readerWrapper) skipN(n int64) error {
|
||||
n2, err := io.CopyN(ioutil.Discard, r.r, n)
|
||||
n2, err := io.CopyN(io.Discard, r.r, n)
|
||||
if n2 != n {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ type Decoder struct {
|
|||
br readerWrapper
|
||||
enabled bool
|
||||
inFrame bool
|
||||
dstBuf []byte
|
||||
}
|
||||
|
||||
frame *frameDec
|
||||
|
@ -187,21 +188,23 @@ func (d *Decoder) Reset(r io.Reader) error {
|
|||
}
|
||||
|
||||
// If bytes buffer and < 5MB, do sync decoding anyway.
|
||||
if bb, ok := r.(byter); ok && bb.Len() < 5<<20 {
|
||||
if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap {
|
||||
bb2 := bb
|
||||
if debugDecoder {
|
||||
println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
|
||||
}
|
||||
b := bb2.Bytes()
|
||||
var dst []byte
|
||||
if cap(d.current.b) > 0 {
|
||||
dst = d.current.b
|
||||
if cap(d.syncStream.dstBuf) > 0 {
|
||||
dst = d.syncStream.dstBuf[:0]
|
||||
}
|
||||
|
||||
dst, err := d.DecodeAll(b, dst[:0])
|
||||
dst, err := d.DecodeAll(b, dst)
|
||||
if err == nil {
|
||||
err = io.EOF
|
||||
}
|
||||
// Save output buffer
|
||||
d.syncStream.dstBuf = dst
|
||||
d.current.b = dst
|
||||
d.current.err = err
|
||||
d.current.flushed = true
|
||||
|
@ -216,6 +219,7 @@ func (d *Decoder) Reset(r io.Reader) error {
|
|||
d.current.err = nil
|
||||
d.current.flushed = false
|
||||
d.current.d = nil
|
||||
d.syncStream.dstBuf = nil
|
||||
|
||||
// Ensure no-one else is still running...
|
||||
d.streamWg.Wait()
|
||||
|
@ -312,6 +316,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
|||
// Grab a block decoder and frame decoder.
|
||||
block := <-d.decoders
|
||||
frame := block.localFrame
|
||||
initialSize := len(dst)
|
||||
defer func() {
|
||||
if debugDecoder {
|
||||
printf("re-adding decoder: %p", block)
|
||||
|
@ -354,7 +359,16 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
|||
return dst, ErrWindowSizeExceeded
|
||||
}
|
||||
if frame.FrameContentSize != fcsUnknown {
|
||||
if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
|
||||
if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) {
|
||||
if debugDecoder {
|
||||
println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst))
|
||||
}
|
||||
return dst, ErrDecoderSizeExceeded
|
||||
}
|
||||
if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) {
|
||||
if debugDecoder {
|
||||
println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst))
|
||||
}
|
||||
return dst, ErrDecoderSizeExceeded
|
||||
}
|
||||
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
|
||||
|
@ -364,7 +378,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
|||
}
|
||||
}
|
||||
|
||||
if cap(dst) == 0 {
|
||||
if cap(dst) == 0 && !d.o.limitToCap {
|
||||
// Allocate len(input) * 2 by default if nothing is provided
|
||||
// and we didn't get frame content size.
|
||||
size := len(input) * 2
|
||||
|
@ -382,6 +396,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
|||
if err != nil {
|
||||
return dst, err
|
||||
}
|
||||
if uint64(len(dst)-initialSize) > d.o.maxDecodedSize {
|
||||
return dst, ErrDecoderSizeExceeded
|
||||
}
|
||||
if len(frame.bBuf) == 0 {
|
||||
if debugDecoder {
|
||||
println("frame dbuf empty")
|
||||
|
@ -667,6 +684,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
|||
if debugDecoder {
|
||||
println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
|
||||
}
|
||||
hist.reset()
|
||||
hist.decoders = block.async.newHist.decoders
|
||||
hist.recentOffsets = block.async.newHist.recentOffsets
|
||||
hist.windowSize = block.async.newHist.windowSize
|
||||
|
@ -698,6 +716,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
|||
seqExecute <- block
|
||||
}
|
||||
close(seqExecute)
|
||||
hist.reset()
|
||||
}()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
@ -721,6 +740,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
|||
if debugDecoder {
|
||||
println("Async 2: new history")
|
||||
}
|
||||
hist.reset()
|
||||
hist.windowSize = block.async.newHist.windowSize
|
||||
hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
|
||||
if block.async.newHist.dict != nil {
|
||||
|
@ -750,7 +770,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
|||
if block.lowMem {
|
||||
block.dst = make([]byte, block.RLESize)
|
||||
} else {
|
||||
block.dst = make([]byte, maxBlockSize)
|
||||
block.dst = make([]byte, maxCompressedBlockSize)
|
||||
}
|
||||
}
|
||||
block.dst = block.dst[:block.RLESize]
|
||||
|
@ -802,13 +822,14 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
|||
if debugDecoder {
|
||||
println("decoder goroutines finished")
|
||||
}
|
||||
hist.reset()
|
||||
}()
|
||||
|
||||
var hist history
|
||||
decodeStream:
|
||||
for {
|
||||
var hist history
|
||||
var hasErr bool
|
||||
|
||||
hist.reset()
|
||||
decodeBlock := func(block *blockDec) {
|
||||
if hasErr {
|
||||
if block != nil {
|
||||
|
@ -852,6 +873,10 @@ decodeStream:
|
|||
}
|
||||
}
|
||||
if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
|
||||
if debugDecoder {
|
||||
println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize)
|
||||
}
|
||||
|
||||
err = ErrDecoderSizeExceeded
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -920,5 +945,6 @@ decodeStream:
|
|||
}
|
||||
close(seqDecode)
|
||||
wg.Wait()
|
||||
hist.reset()
|
||||
d.frame.history.b = frameHistCache
|
||||
}
|
||||
|
|
|
@ -14,20 +14,23 @@ type DOption func(*decoderOptions) error
|
|||
|
||||
// options retains accumulated state of multiple options.
|
||||
type decoderOptions struct {
|
||||
lowMem bool
|
||||
concurrent int
|
||||
maxDecodedSize uint64
|
||||
maxWindowSize uint64
|
||||
dicts []dict
|
||||
ignoreChecksum bool
|
||||
lowMem bool
|
||||
concurrent int
|
||||
maxDecodedSize uint64
|
||||
maxWindowSize uint64
|
||||
dicts []dict
|
||||
ignoreChecksum bool
|
||||
limitToCap bool
|
||||
decodeBufsBelow int
|
||||
}
|
||||
|
||||
func (o *decoderOptions) setDefault() {
|
||||
*o = decoderOptions{
|
||||
// use less ram: true for now, but may change.
|
||||
lowMem: true,
|
||||
concurrent: runtime.GOMAXPROCS(0),
|
||||
maxWindowSize: MaxWindowSize,
|
||||
lowMem: true,
|
||||
concurrent: runtime.GOMAXPROCS(0),
|
||||
maxWindowSize: MaxWindowSize,
|
||||
decodeBufsBelow: 128 << 10,
|
||||
}
|
||||
if o.concurrent > 4 {
|
||||
o.concurrent = 4
|
||||
|
@ -114,6 +117,29 @@ func WithDecoderMaxWindow(size uint64) DOption {
|
|||
}
|
||||
}
|
||||
|
||||
// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes,
|
||||
// or any size set in WithDecoderMaxMemory.
|
||||
// This can be used to limit decoding to a specific maximum output size.
|
||||
// Disabled by default.
|
||||
func WithDecodeAllCapLimit(b bool) DOption {
|
||||
return func(o *decoderOptions) error {
|
||||
o.limitToCap = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDecodeBuffersBelow will fully decode readers that have a
|
||||
// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer.
|
||||
// This typically uses less allocations but will have the full decompressed object in memory.
|
||||
// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less.
|
||||
// Default is 128KiB.
|
||||
func WithDecodeBuffersBelow(size int) DOption {
|
||||
return func(o *decoderOptions) error {
|
||||
o.decodeBufsBelow = size
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// IgnoreChecksum allows to forcibly ignore checksum checking.
|
||||
func IgnoreChecksum(b bool) DOption {
|
||||
return func(o *decoderOptions) error {
|
||||
|
|
|
@ -32,6 +32,7 @@ type match struct {
|
|||
length int32
|
||||
rep int32
|
||||
est int32
|
||||
_ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes
|
||||
}
|
||||
|
||||
const highScore = 25000
|
||||
|
|
|
@ -416,15 +416,23 @@ encodeLoop:
|
|||
|
||||
// Try to find a better match by searching for a long match at the end of the current best match
|
||||
if s+matched < sLimit {
|
||||
// Allow some bytes at the beginning to mismatch.
|
||||
// Sweet spot is around 3 bytes, but depends on input.
|
||||
// The skipped bytes are tested in Extend backwards,
|
||||
// and still picked up as part of the match if they do.
|
||||
const skipBeginning = 3
|
||||
|
||||
nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
|
||||
cv := load3232(src, s)
|
||||
s2 := s + skipBeginning
|
||||
cv := load3232(src, s2)
|
||||
candidateL := e.longTable[nextHashL]
|
||||
coffsetL := candidateL.offset - e.cur - matched
|
||||
if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
|
||||
coffsetL := candidateL.offset - e.cur - matched + skipBeginning
|
||||
if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
|
||||
// Found a long match, at least 4 bytes.
|
||||
matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
|
||||
matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
|
||||
if matchedNext > matched {
|
||||
t = coffsetL
|
||||
s = s2
|
||||
matched = matchedNext
|
||||
if debugMatches {
|
||||
println("long match at end-of-match")
|
||||
|
@ -434,12 +442,13 @@ encodeLoop:
|
|||
|
||||
// Check prev long...
|
||||
if true {
|
||||
coffsetL = candidateL.prev - e.cur - matched
|
||||
if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
|
||||
coffsetL = candidateL.prev - e.cur - matched + skipBeginning
|
||||
if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
|
||||
// Found a long match, at least 4 bytes.
|
||||
matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
|
||||
matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
|
||||
if matchedNext > matched {
|
||||
t = coffsetL
|
||||
s = s2
|
||||
matched = matchedNext
|
||||
if debugMatches {
|
||||
println("prev long match at end-of-match")
|
||||
|
|
|
@ -1103,7 +1103,8 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
|
|||
}
|
||||
|
||||
if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
|
||||
copy(e.longTable[:], e.dictLongTable)
|
||||
//copy(e.longTable[:], e.dictLongTable)
|
||||
e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable)
|
||||
for i := range e.longTableShardDirty {
|
||||
e.longTableShardDirty[i] = false
|
||||
}
|
||||
|
@ -1114,7 +1115,9 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
|
|||
continue
|
||||
}
|
||||
|
||||
copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
|
||||
// copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
|
||||
*(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:])
|
||||
|
||||
e.longTableShardDirty[i] = false
|
||||
}
|
||||
}
|
||||
|
|
|
@ -304,7 +304,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
|
|||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
)
|
||||
if debugEncoder {
|
||||
if len(src) > maxBlockSize {
|
||||
if len(src) > maxCompressedBlockSize {
|
||||
panic("src too big")
|
||||
}
|
||||
}
|
||||
|
@ -871,7 +871,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
|
|||
const shardCnt = tableShardCnt
|
||||
const shardSize = tableShardSize
|
||||
if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
|
||||
copy(e.table[:], e.dictTable)
|
||||
//copy(e.table[:], e.dictTable)
|
||||
e.table = *(*[tableSize]tableEntry)(e.dictTable)
|
||||
for i := range e.tableShardDirty {
|
||||
e.tableShardDirty[i] = false
|
||||
}
|
||||
|
@ -883,7 +884,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
|
|||
continue
|
||||
}
|
||||
|
||||
copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
|
||||
//copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
|
||||
*(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:])
|
||||
e.tableShardDirty[i] = false
|
||||
}
|
||||
e.allDirty = false
|
||||
|
|
|
@ -261,11 +261,16 @@ func (d *frameDec) reset(br byteBuffer) error {
|
|||
}
|
||||
d.history.windowSize = int(d.WindowSize)
|
||||
if !d.o.lowMem || d.history.windowSize < maxBlockSize {
|
||||
// Alloc 2x window size if not low-mem, or very small window size.
|
||||
// Alloc 2x window size if not low-mem, or window size below 2MB.
|
||||
d.history.allocFrameBuffer = d.history.windowSize * 2
|
||||
} else {
|
||||
// Alloc with one additional block
|
||||
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
|
||||
if d.o.lowMem {
|
||||
// Alloc with 1MB extra.
|
||||
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2
|
||||
} else {
|
||||
// Alloc with 2MB extra.
|
||||
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
|
||||
}
|
||||
}
|
||||
|
||||
if debugDecoder {
|
||||
|
@ -343,7 +348,7 @@ func (d *frameDec) consumeCRC() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// runDecoder will create a sync decoder that will decode a block of data.
|
||||
// runDecoder will run the decoder for the remainder of the frame.
|
||||
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
||||
saved := d.history.b
|
||||
|
||||
|
@ -353,12 +358,23 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
|||
// Store input length, so we only check new data.
|
||||
crcStart := len(dst)
|
||||
d.history.decoders.maxSyncLen = 0
|
||||
if d.o.limitToCap {
|
||||
d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst))
|
||||
}
|
||||
if d.FrameContentSize != fcsUnknown {
|
||||
d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
|
||||
if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen {
|
||||
d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
|
||||
}
|
||||
if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
|
||||
if debugDecoder {
|
||||
println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize)
|
||||
}
|
||||
return dst, ErrDecoderSizeExceeded
|
||||
}
|
||||
if uint64(cap(dst)) < d.history.decoders.maxSyncLen {
|
||||
if debugDecoder {
|
||||
println("maxSyncLen:", d.history.decoders.maxSyncLen)
|
||||
}
|
||||
if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen {
|
||||
// Alloc for output
|
||||
dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
|
||||
copy(dst2, dst)
|
||||
|
@ -378,7 +394,13 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
|||
if err != nil {
|
||||
break
|
||||
}
|
||||
if uint64(len(d.history.b)) > d.o.maxDecodedSize {
|
||||
if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize {
|
||||
println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize)
|
||||
err = ErrDecoderSizeExceeded
|
||||
break
|
||||
}
|
||||
if d.o.limitToCap && len(d.history.b) > cap(dst) {
|
||||
println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst))
|
||||
err = ErrDecoderSizeExceeded
|
||||
break
|
||||
}
|
||||
|
|
|
@ -21,7 +21,8 @@ type buildDtableAsmContext struct {
|
|||
|
||||
// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable.
|
||||
// Function returns non-zero exit code on error.
|
||||
// go:noescape
|
||||
//
|
||||
//go:noescape
|
||||
func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
|
||||
|
||||
// please keep in sync with _generate/gen_fse.go
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT.
|
||||
|
||||
//go:build !appengine && !noasm && gc && !noasm
|
||||
// +build !appengine,!noasm,gc,!noasm
|
||||
|
||||
// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
|
||||
TEXT ·buildDtable_asm(SB), $0-24
|
||||
|
|
|
@ -37,26 +37,23 @@ func (h *history) reset() {
|
|||
h.ignoreBuffer = 0
|
||||
h.error = false
|
||||
h.recentOffsets = [3]int{1, 4, 8}
|
||||
if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
|
||||
fseDecoderPool.Put(f)
|
||||
}
|
||||
if f := h.decoders.offsets.fse; f != nil && !f.preDefined {
|
||||
fseDecoderPool.Put(f)
|
||||
}
|
||||
if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
|
||||
fseDecoderPool.Put(f)
|
||||
}
|
||||
h.decoders.freeDecoders()
|
||||
h.decoders = sequenceDecs{br: h.decoders.br}
|
||||
if h.huffTree != nil {
|
||||
if h.dict == nil || h.dict.litEnc != h.huffTree {
|
||||
huffDecoderPool.Put(h.huffTree)
|
||||
}
|
||||
}
|
||||
h.freeHuffDecoder()
|
||||
h.huffTree = nil
|
||||
h.dict = nil
|
||||
//printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
|
||||
}
|
||||
|
||||
func (h *history) freeHuffDecoder() {
|
||||
if h.huffTree != nil {
|
||||
if h.dict == nil || h.dict.litEnc != h.huffTree {
|
||||
huffDecoderPool.Put(h.huffTree)
|
||||
h.huffTree = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *history) setDict(dict *dict) {
|
||||
if dict == nil {
|
||||
return
|
||||
|
|
|
@ -99,6 +99,21 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) erro
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *sequenceDecs) freeDecoders() {
|
||||
if f := s.litLengths.fse; f != nil && !f.preDefined {
|
||||
fseDecoderPool.Put(f)
|
||||
s.litLengths.fse = nil
|
||||
}
|
||||
if f := s.offsets.fse; f != nil && !f.preDefined {
|
||||
fseDecoderPool.Put(f)
|
||||
s.offsets.fse = nil
|
||||
}
|
||||
if f := s.matchLengths.fse; f != nil && !f.preDefined {
|
||||
fseDecoderPool.Put(f)
|
||||
s.matchLengths.fse = nil
|
||||
}
|
||||
}
|
||||
|
||||
// execute will execute the decoded sequence with the provided history.
|
||||
// The sequence must be evaluated before being sent.
|
||||
func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
|
||||
|
@ -299,7 +314,10 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
|||
}
|
||||
size := ll + ml + len(out)
|
||||
if size-startSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
|
||||
if size-startSize == 424242 {
|
||||
panic("here")
|
||||
}
|
||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
}
|
||||
if size > cap(out) {
|
||||
// Not enough size, which can happen under high volume block streaming conditions
|
||||
|
@ -411,7 +429,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
|||
|
||||
// Check if space for literals
|
||||
if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
|
||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
}
|
||||
|
||||
// Add final literals
|
||||
|
|
|
@ -32,18 +32,22 @@ type decodeSyncAsmContext struct {
|
|||
// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
|
||||
//
|
||||
// Please refer to seqdec_generic.go for the reference implementation.
|
||||
//
|
||||
//go:noescape
|
||||
func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||
|
||||
// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
|
||||
//
|
||||
//go:noescape
|
||||
func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||
|
||||
// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
|
||||
//
|
||||
//go:noescape
|
||||
func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||
|
||||
// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
|
||||
//
|
||||
//go:noescape
|
||||
func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||
|
||||
|
@ -135,7 +139,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
|||
if debugDecoder {
|
||||
println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
|
||||
}
|
||||
return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
|
||||
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
|
||||
default:
|
||||
return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
|
||||
|
@ -143,7 +147,8 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
|||
|
||||
s.seqSize += ctx.litRemain
|
||||
if s.seqSize > maxBlockSize {
|
||||
return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
|
||||
}
|
||||
err := br.close()
|
||||
if err != nil {
|
||||
|
@ -201,20 +206,24 @@ const errorNotEnoughSpace = 5
|
|||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
|
||||
//
|
||||
// Please refer to seqdec_generic.go for the reference implementation.
|
||||
//
|
||||
//go:noescape
|
||||
func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
|
||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
|
||||
//
|
||||
// Please refer to seqdec_generic.go for the reference implementation.
|
||||
//
|
||||
//go:noescape
|
||||
func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
|
||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
|
||||
//
|
||||
//go:noescape
|
||||
func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
|
||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
|
||||
//
|
||||
//go:noescape
|
||||
func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
|
||||
|
@ -281,7 +290,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
|||
|
||||
s.seqSize += ctx.litRemain
|
||||
if s.seqSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
}
|
||||
err := br.close()
|
||||
if err != nil {
|
||||
|
@ -308,10 +317,12 @@ type executeAsmContext struct {
|
|||
// Returns false if a match offset is too big.
|
||||
//
|
||||
// Please refer to seqdec_generic.go for the reference implementation.
|
||||
//
|
||||
//go:noescape
|
||||
func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
|
||||
|
||||
// Same as above, but with safe memcopies
|
||||
//
|
||||
//go:noescape
|
||||
func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT.
|
||||
|
||||
//go:build !appengine && !noasm && gc && !noasm
|
||||
// +build !appengine,!noasm,gc,!noasm
|
||||
|
||||
// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
// Requires: CMOV
|
||||
|
|
|
@ -111,7 +111,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
|||
}
|
||||
s.seqSize += ll + ml
|
||||
if s.seqSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
}
|
||||
litRemain -= ll
|
||||
if litRemain < 0 {
|
||||
|
@ -149,7 +149,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
|||
}
|
||||
s.seqSize += litRemain
|
||||
if s.seqSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
}
|
||||
err := br.close()
|
||||
if err != nil {
|
||||
|
|
|
@ -140,12 +140,13 @@ func (c *counter) get() float64 {
|
|||
}
|
||||
|
||||
func (c *counter) Write(out *dto.Metric) error {
|
||||
val := c.get()
|
||||
|
||||
// Read the Exemplar first and the value second. This is to avoid a race condition
|
||||
// where users see an exemplar for a not-yet-existing observation.
|
||||
var exemplar *dto.Exemplar
|
||||
if e := c.exemplar.Load(); e != nil {
|
||||
exemplar = e.(*dto.Exemplar)
|
||||
}
|
||||
val := c.get()
|
||||
|
||||
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
|
||||
}
|
||||
|
@ -245,7 +246,8 @@ func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
|
|||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||
// error allows shortcuts like
|
||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||
//
|
||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||
func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
|
||||
c, err := v.GetMetricWithLabelValues(lvs...)
|
||||
if err != nil {
|
||||
|
@ -256,7 +258,8 @@ func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
|
|||
|
||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||
// returned an error. Not returning an error allows shortcuts like
|
||||
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
//
|
||||
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
func (v *CounterVec) With(labels Labels) Counter {
|
||||
c, err := v.GetMetricWith(labels)
|
||||
if err != nil {
|
||||
|
|
|
@ -21,55 +21,66 @@
|
|||
// All exported functions and methods are safe to be used concurrently unless
|
||||
// specified otherwise.
|
||||
//
|
||||
// A Basic Example
|
||||
// # A Basic Example
|
||||
//
|
||||
// As a starting point, a very basic usage example:
|
||||
//
|
||||
// package main
|
||||
// package main
|
||||
//
|
||||
// import (
|
||||
// "log"
|
||||
// "net/http"
|
||||
// import (
|
||||
// "log"
|
||||
// "net/http"
|
||||
//
|
||||
// "github.com/prometheus/client_golang/prometheus"
|
||||
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
// )
|
||||
// "github.com/prometheus/client_golang/prometheus"
|
||||
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
// )
|
||||
//
|
||||
// var (
|
||||
// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
// Name: "cpu_temperature_celsius",
|
||||
// Help: "Current temperature of the CPU.",
|
||||
// })
|
||||
// hdFailures = prometheus.NewCounterVec(
|
||||
// prometheus.CounterOpts{
|
||||
// Name: "hd_errors_total",
|
||||
// Help: "Number of hard-disk errors.",
|
||||
// },
|
||||
// []string{"device"},
|
||||
// )
|
||||
// )
|
||||
// type metrics struct {
|
||||
// cpuTemp prometheus.Gauge
|
||||
// hdFailures *prometheus.CounterVec
|
||||
// }
|
||||
//
|
||||
// func init() {
|
||||
// // Metrics have to be registered to be exposed:
|
||||
// prometheus.MustRegister(cpuTemp)
|
||||
// prometheus.MustRegister(hdFailures)
|
||||
// }
|
||||
// func NewMetrics(reg prometheus.Registerer) *metrics {
|
||||
// m := &metrics{
|
||||
// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
// Name: "cpu_temperature_celsius",
|
||||
// Help: "Current temperature of the CPU.",
|
||||
// }),
|
||||
// hdFailures: prometheus.NewCounterVec(
|
||||
// prometheus.CounterOpts{
|
||||
// Name: "hd_errors_total",
|
||||
// Help: "Number of hard-disk errors.",
|
||||
// },
|
||||
// []string{"device"},
|
||||
// ),
|
||||
// }
|
||||
// reg.MustRegister(m.cpuTemp)
|
||||
// reg.MustRegister(m.hdFailures)
|
||||
// return m
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// cpuTemp.Set(65.3)
|
||||
// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
|
||||
// func main() {
|
||||
// // Create a non-global registry.
|
||||
// reg := prometheus.NewRegistry()
|
||||
//
|
||||
// // The Handler function provides a default handler to expose metrics
|
||||
// // via an HTTP server. "/metrics" is the usual endpoint for that.
|
||||
// http.Handle("/metrics", promhttp.Handler())
|
||||
// log.Fatal(http.ListenAndServe(":8080", nil))
|
||||
// }
|
||||
// // Create new metrics and register them using the custom registry.
|
||||
// m := NewMetrics(reg)
|
||||
// // Set values for the new created metrics.
|
||||
// m.cpuTemp.Set(65.3)
|
||||
// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
|
||||
//
|
||||
// // Expose metrics and custom registry via an HTTP server
|
||||
// // using the HandleFor function. "/metrics" is the usual endpoint for that.
|
||||
// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}))
|
||||
// log.Fatal(http.ListenAndServe(":8080", nil))
|
||||
// }
|
||||
//
|
||||
// This is a complete program that exports two metrics, a Gauge and a Counter,
|
||||
// the latter with a label attached to turn it into a (one-dimensional) vector.
|
||||
// It register the metrics using a custom registry and exposes them via an HTTP server
|
||||
// on the /metrics endpoint.
|
||||
//
|
||||
// Metrics
|
||||
// # Metrics
|
||||
//
|
||||
// The number of exported identifiers in this package might appear a bit
|
||||
// overwhelming. However, in addition to the basic plumbing shown in the example
|
||||
|
@ -100,7 +111,7 @@
|
|||
// To create instances of Metrics and their vector versions, you need a suitable
|
||||
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
|
||||
//
|
||||
// Custom Collectors and constant Metrics
|
||||
// # Custom Collectors and constant Metrics
|
||||
//
|
||||
// While you could create your own implementations of Metric, most likely you
|
||||
// will only ever implement the Collector interface on your own. At a first
|
||||
|
@ -141,7 +152,7 @@
|
|||
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
|
||||
// shortcuts.
|
||||
//
|
||||
// Advanced Uses of the Registry
|
||||
// # Advanced Uses of the Registry
|
||||
//
|
||||
// While MustRegister is the by far most common way of registering a Collector,
|
||||
// sometimes you might want to handle the errors the registration might cause.
|
||||
|
@ -176,23 +187,23 @@
|
|||
// NewProcessCollector). With a custom registry, you are in control and decide
|
||||
// yourself about the Collectors to register.
|
||||
//
|
||||
// HTTP Exposition
|
||||
// # HTTP Exposition
|
||||
//
|
||||
// The Registry implements the Gatherer interface. The caller of the Gather
|
||||
// method can then expose the gathered metrics in some way. Usually, the metrics
|
||||
// are served via HTTP on the /metrics endpoint. That's happening in the example
|
||||
// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
|
||||
//
|
||||
// Pushing to the Pushgateway
|
||||
// # Pushing to the Pushgateway
|
||||
//
|
||||
// Function for pushing to the Pushgateway can be found in the push sub-package.
|
||||
//
|
||||
// Graphite Bridge
|
||||
// # Graphite Bridge
|
||||
//
|
||||
// Functions and examples to push metrics from a Gatherer to Graphite can be
|
||||
// found in the graphite sub-package.
|
||||
//
|
||||
// Other Means of Exposition
|
||||
// # Other Means of Exposition
|
||||
//
|
||||
// More ways of exposing metrics can easily be added by following the approaches
|
||||
// of the existing implementations.
|
||||
|
|
|
@ -210,7 +210,8 @@ func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
|
|||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||
// error allows shortcuts like
|
||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||
//
|
||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||
func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
|
||||
g, err := v.GetMetricWithLabelValues(lvs...)
|
||||
if err != nil {
|
||||
|
@ -221,7 +222,8 @@ func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
|
|||
|
||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||
// returned an error. Not returning an error allows shortcuts like
|
||||
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
//
|
||||
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
func (v *GaugeVec) With(labels Labels) Gauge {
|
||||
g, err := v.GetMetricWith(labels)
|
||||
if err != nil {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
60
vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
generated
vendored
Normal file
60
vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
// Copyright (c) 2015 Björn Rabenstein
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
//
|
||||
// The code in this package is copy/paste to avoid a dependency. Hence this file
|
||||
// carries the copyright of the original repo.
|
||||
// https://github.com/beorn7/floats
|
||||
package internal
|
||||
|
||||
import (
|
||||
"math"
|
||||
)
|
||||
|
||||
// minNormalFloat64 is the smallest positive normal value of type float64.
|
||||
var minNormalFloat64 = math.Float64frombits(0x0010000000000000)
|
||||
|
||||
// AlmostEqualFloat64 returns true if a and b are equal within a relative error
|
||||
// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the
|
||||
// details of the applied method.
|
||||
func AlmostEqualFloat64(a, b, epsilon float64) bool {
|
||||
if a == b {
|
||||
return true
|
||||
}
|
||||
absA := math.Abs(a)
|
||||
absB := math.Abs(b)
|
||||
diff := math.Abs(a - b)
|
||||
if a == 0 || b == 0 || absA+absB < minNormalFloat64 {
|
||||
return diff < epsilon*minNormalFloat64
|
||||
}
|
||||
return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon
|
||||
}
|
||||
|
||||
// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64.
|
||||
func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if !AlmostEqualFloat64(a[i], b[i], epsilon) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -201,12 +201,15 @@ func (m *SequenceMatcher) isBJunk(s string) bool {
|
|||
// If IsJunk is not defined:
|
||||
//
|
||||
// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
|
||||
// alo <= i <= i+k <= ahi
|
||||
// blo <= j <= j+k <= bhi
|
||||
//
|
||||
// alo <= i <= i+k <= ahi
|
||||
// blo <= j <= j+k <= bhi
|
||||
//
|
||||
// and for all (i',j',k') meeting those conditions,
|
||||
// k >= k'
|
||||
// i <= i'
|
||||
// and if i == i', j <= j'
|
||||
//
|
||||
// k >= k'
|
||||
// i <= i'
|
||||
// and if i == i', j <= j'
|
||||
//
|
||||
// In other words, of all maximal matching blocks, return one that
|
||||
// starts earliest in a, and of all those maximal matching blocks that
|
||||
|
|
|
@ -25,7 +25,8 @@ import (
|
|||
// Labels represents a collection of label name -> value mappings. This type is
|
||||
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
|
||||
// metric vector Collectors, e.g.:
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
//
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
//
|
||||
// The other use-case is the specification of constant label pairs in Opts or to
|
||||
// create a Desc.
|
||||
|
|
|
@ -187,7 +187,7 @@ func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
|
|||
} else {
|
||||
// The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
|
||||
b := &dto.Bucket{
|
||||
CumulativeCount: proto.Uint64(pb.Histogram.Bucket[len(pb.Histogram.GetBucket())-1].GetCumulativeCount()),
|
||||
CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()),
|
||||
UpperBound: proto.Float64(math.Inf(1)),
|
||||
Exemplar: e,
|
||||
}
|
||||
|
|
5
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
5
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
|
@ -73,12 +73,11 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
|
|||
return func(r *http.Request) (*http.Response, error) {
|
||||
resp, err := next.RoundTrip(r)
|
||||
if err == nil {
|
||||
exemplarAdd(
|
||||
addWithExemplar(
|
||||
counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
|
||||
1,
|
||||
rtOpts.getExemplarFn(r.Context()),
|
||||
)
|
||||
counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc()
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
@ -117,7 +116,7 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT
|
|||
start := time.Now()
|
||||
resp, err := next.RoundTrip(r)
|
||||
if err == nil {
|
||||
exemplarObserve(
|
||||
observeWithExemplar(
|
||||
obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
|
||||
time.Since(start).Seconds(),
|
||||
rtOpts.getExemplarFn(r.Context()),
|
||||
|
|
24
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
24
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
|
@ -28,7 +28,9 @@ import (
|
|||
// magicString is used for the hacky label test in checkLabels. Remove once fixed.
|
||||
const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
|
||||
|
||||
func exemplarObserve(obs prometheus.Observer, val float64, labels map[string]string) {
|
||||
// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver],
|
||||
// which falls back to [prometheus.Observer.Observe] if no labels are provided.
|
||||
func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) {
|
||||
if labels == nil {
|
||||
obs.Observe(val)
|
||||
return
|
||||
|
@ -36,7 +38,9 @@ func exemplarObserve(obs prometheus.Observer, val float64, labels map[string]str
|
|||
obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels)
|
||||
}
|
||||
|
||||
func exemplarAdd(obs prometheus.Counter, val float64, labels map[string]string) {
|
||||
// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar],
|
||||
// which falls back to [prometheus.Counter.Add] if no labels are provided.
|
||||
func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) {
|
||||
if labels == nil {
|
||||
obs.Add(val)
|
||||
return
|
||||
|
@ -91,7 +95,7 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
|
|||
d := newDelegator(w, nil)
|
||||
next.ServeHTTP(d, r)
|
||||
|
||||
exemplarObserve(
|
||||
observeWithExemplar(
|
||||
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
|
||||
time.Since(now).Seconds(),
|
||||
hOpts.getExemplarFn(r.Context()),
|
||||
|
@ -103,7 +107,7 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
|
|||
now := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
|
||||
exemplarObserve(
|
||||
observeWithExemplar(
|
||||
obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
|
||||
time.Since(now).Seconds(),
|
||||
hOpts.getExemplarFn(r.Context()),
|
||||
|
@ -141,7 +145,7 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
|
|||
d := newDelegator(w, nil)
|
||||
next.ServeHTTP(d, r)
|
||||
|
||||
exemplarAdd(
|
||||
addWithExemplar(
|
||||
counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
|
||||
1,
|
||||
hOpts.getExemplarFn(r.Context()),
|
||||
|
@ -151,7 +155,7 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
|
|||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
next.ServeHTTP(w, r)
|
||||
exemplarAdd(
|
||||
addWithExemplar(
|
||||
counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
|
||||
1,
|
||||
hOpts.getExemplarFn(r.Context()),
|
||||
|
@ -192,7 +196,7 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
|
|||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
now := time.Now()
|
||||
d := newDelegator(w, func(status int) {
|
||||
exemplarObserve(
|
||||
observeWithExemplar(
|
||||
obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)),
|
||||
time.Since(now).Seconds(),
|
||||
hOpts.getExemplarFn(r.Context()),
|
||||
|
@ -233,7 +237,7 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
|
|||
d := newDelegator(w, nil)
|
||||
next.ServeHTTP(d, r)
|
||||
size := computeApproximateRequestSize(r)
|
||||
exemplarObserve(
|
||||
observeWithExemplar(
|
||||
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
|
||||
float64(size),
|
||||
hOpts.getExemplarFn(r.Context()),
|
||||
|
@ -244,7 +248,7 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
|
|||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
next.ServeHTTP(w, r)
|
||||
size := computeApproximateRequestSize(r)
|
||||
exemplarObserve(
|
||||
observeWithExemplar(
|
||||
obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
|
||||
float64(size),
|
||||
hOpts.getExemplarFn(r.Context()),
|
||||
|
@ -282,7 +286,7 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
|
|||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
d := newDelegator(w, nil)
|
||||
next.ServeHTTP(d, r)
|
||||
exemplarObserve(
|
||||
observeWithExemplar(
|
||||
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
|
||||
float64(d.Written()),
|
||||
hOpts.getExemplarFn(r.Context()),
|
||||
|
|
|
@ -252,9 +252,12 @@ func (errs MultiError) MaybeUnwrap() error {
|
|||
}
|
||||
|
||||
// Registry registers Prometheus collectors, collects their metrics, and gathers
|
||||
// them into MetricFamilies for exposition. It implements both Registerer and
|
||||
// Gatherer. The zero value is not usable. Create instances with NewRegistry or
|
||||
// NewPedanticRegistry.
|
||||
// them into MetricFamilies for exposition. It implements Registerer, Gatherer,
|
||||
// and Collector. The zero value is not usable. Create instances with
|
||||
// NewRegistry or NewPedanticRegistry.
|
||||
//
|
||||
// Registry implements Collector to allow it to be used for creating groups of
|
||||
// metrics. See the Grouping example for how this can be done.
|
||||
type Registry struct {
|
||||
mtx sync.RWMutex
|
||||
collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
|
||||
|
@ -556,6 +559,31 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
|||
return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
||||
}
|
||||
|
||||
// Describe implements Collector.
|
||||
func (r *Registry) Describe(ch chan<- *Desc) {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
|
||||
// Only report the checked Collectors; unchecked collectors don't report any
|
||||
// Desc.
|
||||
for _, c := range r.collectorsByID {
|
||||
c.Describe(ch)
|
||||
}
|
||||
}
|
||||
|
||||
// Collect implements Collector.
|
||||
func (r *Registry) Collect(ch chan<- Metric) {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
|
||||
for _, c := range r.collectorsByID {
|
||||
c.Collect(ch)
|
||||
}
|
||||
for _, c := range r.uncheckedCollectors {
|
||||
c.Collect(ch)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
|
||||
// Prometheus text format, and writes it to a temporary file. Upon success, the
|
||||
// temporary file is renamed to the provided filename.
|
||||
|
|
|
@ -603,7 +603,8 @@ func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
|
|||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||
// error allows shortcuts like
|
||||
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
||||
//
|
||||
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
||||
func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
|
||||
s, err := v.GetMetricWithLabelValues(lvs...)
|
||||
if err != nil {
|
||||
|
@ -614,7 +615,8 @@ func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
|
|||
|
||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||
// returned an error. Not returning an error allows shortcuts like
|
||||
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
||||
//
|
||||
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
||||
func (v *SummaryVec) With(labels Labels) Observer {
|
||||
s, err := v.GetMetricWith(labels)
|
||||
if err != nil {
|
||||
|
@ -701,7 +703,8 @@ func (s *constSummary) Write(out *dto.Metric) error {
|
|||
//
|
||||
// quantiles maps ranks to quantile values. For example, a median latency of
|
||||
// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
|
||||
// map[float64]float64{0.5: 0.23, 0.99: 0.56}
|
||||
//
|
||||
// map[float64]float64{0.5: 0.23, 0.99: 0.56}
|
||||
//
|
||||
// NewConstSummary returns an error if the length of labelValues is not
|
||||
// consistent with the variable labels in Desc or if Desc is invalid.
|
||||
|
|
|
@ -25,11 +25,12 @@ type Timer struct {
|
|||
// NewTimer creates a new Timer. The provided Observer is used to observe a
|
||||
// duration in seconds. Timer is usually used to time a function call in the
|
||||
// following way:
|
||||
// func TimeMe() {
|
||||
// timer := NewTimer(myHistogram)
|
||||
// defer timer.ObserveDuration()
|
||||
// // Do actual work.
|
||||
// }
|
||||
//
|
||||
// func TimeMe() {
|
||||
// timer := NewTimer(myHistogram)
|
||||
// defer timer.ObserveDuration()
|
||||
// // Do actual work.
|
||||
// }
|
||||
func NewTimer(o Observer) *Timer {
|
||||
return &Timer{
|
||||
begin: time.Now(),
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: metrics.proto
|
||||
// source: io/prometheus/client/metrics.proto
|
||||
|
||||
package io_prometheus_client
|
||||
|
||||
|
@ -24,11 +24,18 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|||
type MetricType int32
|
||||
|
||||
const (
|
||||
MetricType_COUNTER MetricType = 0
|
||||
MetricType_GAUGE MetricType = 1
|
||||
MetricType_SUMMARY MetricType = 2
|
||||
MetricType_UNTYPED MetricType = 3
|
||||
// COUNTER must use the Metric field "counter".
|
||||
MetricType_COUNTER MetricType = 0
|
||||
// GAUGE must use the Metric field "gauge".
|
||||
MetricType_GAUGE MetricType = 1
|
||||
// SUMMARY must use the Metric field "summary".
|
||||
MetricType_SUMMARY MetricType = 2
|
||||
// UNTYPED must use the Metric field "untyped".
|
||||
MetricType_UNTYPED MetricType = 3
|
||||
// HISTOGRAM must use the Metric field "histogram".
|
||||
MetricType_HISTOGRAM MetricType = 4
|
||||
// GAUGE_HISTOGRAM must use the Metric field "histogram".
|
||||
MetricType_GAUGE_HISTOGRAM MetricType = 5
|
||||
)
|
||||
|
||||
var MetricType_name = map[int32]string{
|
||||
|
@ -37,14 +44,16 @@ var MetricType_name = map[int32]string{
|
|||
2: "SUMMARY",
|
||||
3: "UNTYPED",
|
||||
4: "HISTOGRAM",
|
||||
5: "GAUGE_HISTOGRAM",
|
||||
}
|
||||
|
||||
var MetricType_value = map[string]int32{
|
||||
"COUNTER": 0,
|
||||
"GAUGE": 1,
|
||||
"SUMMARY": 2,
|
||||
"UNTYPED": 3,
|
||||
"HISTOGRAM": 4,
|
||||
"COUNTER": 0,
|
||||
"GAUGE": 1,
|
||||
"SUMMARY": 2,
|
||||
"UNTYPED": 3,
|
||||
"HISTOGRAM": 4,
|
||||
"GAUGE_HISTOGRAM": 5,
|
||||
}
|
||||
|
||||
func (x MetricType) Enum() *MetricType {
|
||||
|
@ -67,7 +76,7 @@ func (x *MetricType) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
func (MetricType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{0}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{0}
|
||||
}
|
||||
|
||||
type LabelPair struct {
|
||||
|
@ -82,7 +91,7 @@ func (m *LabelPair) Reset() { *m = LabelPair{} }
|
|||
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
|
||||
func (*LabelPair) ProtoMessage() {}
|
||||
func (*LabelPair) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{0}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{0}
|
||||
}
|
||||
|
||||
func (m *LabelPair) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -128,7 +137,7 @@ func (m *Gauge) Reset() { *m = Gauge{} }
|
|||
func (m *Gauge) String() string { return proto.CompactTextString(m) }
|
||||
func (*Gauge) ProtoMessage() {}
|
||||
func (*Gauge) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{1}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{1}
|
||||
}
|
||||
|
||||
func (m *Gauge) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -168,7 +177,7 @@ func (m *Counter) Reset() { *m = Counter{} }
|
|||
func (m *Counter) String() string { return proto.CompactTextString(m) }
|
||||
func (*Counter) ProtoMessage() {}
|
||||
func (*Counter) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{2}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{2}
|
||||
}
|
||||
|
||||
func (m *Counter) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -215,7 +224,7 @@ func (m *Quantile) Reset() { *m = Quantile{} }
|
|||
func (m *Quantile) String() string { return proto.CompactTextString(m) }
|
||||
func (*Quantile) ProtoMessage() {}
|
||||
func (*Quantile) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{3}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{3}
|
||||
}
|
||||
|
||||
func (m *Quantile) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -263,7 +272,7 @@ func (m *Summary) Reset() { *m = Summary{} }
|
|||
func (m *Summary) String() string { return proto.CompactTextString(m) }
|
||||
func (*Summary) ProtoMessage() {}
|
||||
func (*Summary) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{4}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{4}
|
||||
}
|
||||
|
||||
func (m *Summary) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -316,7 +325,7 @@ func (m *Untyped) Reset() { *m = Untyped{} }
|
|||
func (m *Untyped) String() string { return proto.CompactTextString(m) }
|
||||
func (*Untyped) ProtoMessage() {}
|
||||
func (*Untyped) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{5}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{5}
|
||||
}
|
||||
|
||||
func (m *Untyped) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -345,9 +354,34 @@ func (m *Untyped) GetValue() float64 {
|
|||
}
|
||||
|
||||
type Histogram struct {
|
||||
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
|
||||
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
|
||||
Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
|
||||
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
|
||||
SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"`
|
||||
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
|
||||
// Buckets for the conventional histogram.
|
||||
Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
|
||||
// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
|
||||
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
|
||||
// then each power of two is divided into 2^n logarithmic buckets.
|
||||
// Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
|
||||
// In the future, more bucket schemas may be added using numbers < -4 or > 8.
|
||||
Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"`
|
||||
ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"`
|
||||
ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"`
|
||||
ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"`
|
||||
// Negative buckets for the native histogram.
|
||||
NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"`
|
||||
// Use either "negative_delta" or "negative_count", the former for
|
||||
// regular histograms with integer counts, the latter for float
|
||||
// histograms.
|
||||
NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"`
|
||||
NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"`
|
||||
// Positive buckets for the native histogram.
|
||||
PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"`
|
||||
// Use either "positive_delta" or "positive_count", the former for
|
||||
// regular histograms with integer counts, the latter for float
|
||||
// histograms.
|
||||
PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"`
|
||||
PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
|
@ -357,7 +391,7 @@ func (m *Histogram) Reset() { *m = Histogram{} }
|
|||
func (m *Histogram) String() string { return proto.CompactTextString(m) }
|
||||
func (*Histogram) ProtoMessage() {}
|
||||
func (*Histogram) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{6}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{6}
|
||||
}
|
||||
|
||||
func (m *Histogram) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -385,6 +419,13 @@ func (m *Histogram) GetSampleCount() uint64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (m *Histogram) GetSampleCountFloat() float64 {
|
||||
if m != nil && m.SampleCountFloat != nil {
|
||||
return *m.SampleCountFloat
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Histogram) GetSampleSum() float64 {
|
||||
if m != nil && m.SampleSum != nil {
|
||||
return *m.SampleSum
|
||||
|
@ -399,8 +440,81 @@ func (m *Histogram) GetBucket() []*Bucket {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *Histogram) GetSchema() int32 {
|
||||
if m != nil && m.Schema != nil {
|
||||
return *m.Schema
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Histogram) GetZeroThreshold() float64 {
|
||||
if m != nil && m.ZeroThreshold != nil {
|
||||
return *m.ZeroThreshold
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Histogram) GetZeroCount() uint64 {
|
||||
if m != nil && m.ZeroCount != nil {
|
||||
return *m.ZeroCount
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Histogram) GetZeroCountFloat() float64 {
|
||||
if m != nil && m.ZeroCountFloat != nil {
|
||||
return *m.ZeroCountFloat
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Histogram) GetNegativeSpan() []*BucketSpan {
|
||||
if m != nil {
|
||||
return m.NegativeSpan
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Histogram) GetNegativeDelta() []int64 {
|
||||
if m != nil {
|
||||
return m.NegativeDelta
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Histogram) GetNegativeCount() []float64 {
|
||||
if m != nil {
|
||||
return m.NegativeCount
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Histogram) GetPositiveSpan() []*BucketSpan {
|
||||
if m != nil {
|
||||
return m.PositiveSpan
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Histogram) GetPositiveDelta() []int64 {
|
||||
if m != nil {
|
||||
return m.PositiveDelta
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Histogram) GetPositiveCount() []float64 {
|
||||
if m != nil {
|
||||
return m.PositiveCount
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A Bucket of a conventional histogram, each of which is treated as
|
||||
// an individual counter-like time series by Prometheus.
|
||||
type Bucket struct {
|
||||
CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
|
||||
CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"`
|
||||
UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
|
||||
Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
|
@ -412,7 +526,7 @@ func (m *Bucket) Reset() { *m = Bucket{} }
|
|||
func (m *Bucket) String() string { return proto.CompactTextString(m) }
|
||||
func (*Bucket) ProtoMessage() {}
|
||||
func (*Bucket) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{7}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{7}
|
||||
}
|
||||
|
||||
func (m *Bucket) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -440,6 +554,13 @@ func (m *Bucket) GetCumulativeCount() uint64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (m *Bucket) GetCumulativeCountFloat() float64 {
|
||||
if m != nil && m.CumulativeCountFloat != nil {
|
||||
return *m.CumulativeCountFloat
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Bucket) GetUpperBound() float64 {
|
||||
if m != nil && m.UpperBound != nil {
|
||||
return *m.UpperBound
|
||||
|
@ -454,6 +575,59 @@ func (m *Bucket) GetExemplar() *Exemplar {
|
|||
return nil
|
||||
}
|
||||
|
||||
// A BucketSpan defines a number of consecutive buckets in a native
|
||||
// histogram with their offset. Logically, it would be more
|
||||
// straightforward to include the bucket counts in the Span. However,
|
||||
// the protobuf representation is more compact in the way the data is
|
||||
// structured here (with all the buckets in a single array separate
|
||||
// from the Spans).
|
||||
type BucketSpan struct {
|
||||
Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"`
|
||||
Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BucketSpan) Reset() { *m = BucketSpan{} }
|
||||
func (m *BucketSpan) String() string { return proto.CompactTextString(m) }
|
||||
func (*BucketSpan) ProtoMessage() {}
|
||||
func (*BucketSpan) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{8}
|
||||
}
|
||||
|
||||
func (m *BucketSpan) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BucketSpan.Unmarshal(m, b)
|
||||
}
|
||||
func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *BucketSpan) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BucketSpan.Merge(m, src)
|
||||
}
|
||||
func (m *BucketSpan) XXX_Size() int {
|
||||
return xxx_messageInfo_BucketSpan.Size(m)
|
||||
}
|
||||
func (m *BucketSpan) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_BucketSpan.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_BucketSpan proto.InternalMessageInfo
|
||||
|
||||
func (m *BucketSpan) GetOffset() int32 {
|
||||
if m != nil && m.Offset != nil {
|
||||
return *m.Offset
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *BucketSpan) GetLength() uint32 {
|
||||
if m != nil && m.Length != nil {
|
||||
return *m.Length
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type Exemplar struct {
|
||||
Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
|
||||
Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
|
||||
|
@ -467,7 +641,7 @@ func (m *Exemplar) Reset() { *m = Exemplar{} }
|
|||
func (m *Exemplar) String() string { return proto.CompactTextString(m) }
|
||||
func (*Exemplar) ProtoMessage() {}
|
||||
func (*Exemplar) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{8}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{9}
|
||||
}
|
||||
|
||||
func (m *Exemplar) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -526,7 +700,7 @@ func (m *Metric) Reset() { *m = Metric{} }
|
|||
func (m *Metric) String() string { return proto.CompactTextString(m) }
|
||||
func (*Metric) ProtoMessage() {}
|
||||
func (*Metric) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{9}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{10}
|
||||
}
|
||||
|
||||
func (m *Metric) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -610,7 +784,7 @@ func (m *MetricFamily) Reset() { *m = MetricFamily{} }
|
|||
func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
|
||||
func (*MetricFamily) ProtoMessage() {}
|
||||
func (*MetricFamily) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{10}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{11}
|
||||
}
|
||||
|
||||
func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -669,55 +843,72 @@ func init() {
|
|||
proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
|
||||
proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
|
||||
proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
|
||||
proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan")
|
||||
proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
|
||||
proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
|
||||
proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) }
|
||||
|
||||
var fileDescriptor_6039342a2ba47b72 = []byte{
|
||||
// 665 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c,
|
||||
0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55,
|
||||
0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2,
|
||||
0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e,
|
||||
0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa,
|
||||
0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66,
|
||||
0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4,
|
||||
0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45,
|
||||
0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a,
|
||||
0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d,
|
||||
0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b,
|
||||
0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22,
|
||||
0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79,
|
||||
0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0,
|
||||
0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00,
|
||||
0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01,
|
||||
0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe,
|
||||
0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55,
|
||||
0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f,
|
||||
0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31,
|
||||
0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16,
|
||||
0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e,
|
||||
0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c,
|
||||
0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f,
|
||||
0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57,
|
||||
0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64,
|
||||
0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76,
|
||||
0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7,
|
||||
0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95,
|
||||
0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed,
|
||||
0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33,
|
||||
0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07,
|
||||
0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72,
|
||||
0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56,
|
||||
0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6,
|
||||
0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f,
|
||||
0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f,
|
||||
0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27,
|
||||
0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83,
|
||||
0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24,
|
||||
0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff,
|
||||
0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00,
|
||||
func init() {
|
||||
proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258)
|
||||
}
|
||||
|
||||
var fileDescriptor_d1e5ddb18987a258 = []byte{
|
||||
// 896 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44,
|
||||
0x18, 0xc5, 0x9b, 0x5f, 0x7f, 0xd9, 0x6c, 0xd3, 0x61, 0x55, 0x59, 0x0b, 0xcb, 0x06, 0x4b, 0x48,
|
||||
0x0b, 0x42, 0x8e, 0x40, 0x5b, 0x81, 0x0a, 0x5c, 0xec, 0xb6, 0xe9, 0x16, 0x89, 0xb4, 0x65, 0x92,
|
||||
0x5c, 0x14, 0x2e, 0xac, 0x49, 0x32, 0xeb, 0x58, 0x78, 0x3c, 0xc6, 0x1e, 0x57, 0x2c, 0x2f, 0xc0,
|
||||
0x35, 0xaf, 0xc0, 0xc3, 0xf0, 0x22, 0x3c, 0x08, 0x68, 0xfe, 0xec, 0xdd, 0xe2, 0x94, 0xd2, 0x3b,
|
||||
0x7f, 0x67, 0xce, 0xf7, 0xcd, 0x39, 0xe3, 0xc9, 0x71, 0xc0, 0x8f, 0xf9, 0x24, 0xcb, 0x39, 0xa3,
|
||||
0x62, 0x4b, 0xcb, 0x62, 0xb2, 0x4e, 0x62, 0x9a, 0x8a, 0x09, 0xa3, 0x22, 0x8f, 0xd7, 0x45, 0x90,
|
||||
0xe5, 0x5c, 0x70, 0x74, 0x18, 0xf3, 0xa0, 0xe6, 0x04, 0x9a, 0x73, 0x74, 0x12, 0x71, 0x1e, 0x25,
|
||||
0x74, 0xa2, 0x38, 0xab, 0xf2, 0x6a, 0x22, 0x62, 0x46, 0x0b, 0x41, 0x58, 0xa6, 0xdb, 0xfc, 0xfb,
|
||||
0xe0, 0x7e, 0x47, 0x56, 0x34, 0x79, 0x4e, 0xe2, 0x1c, 0x21, 0x68, 0xa7, 0x84, 0x51, 0xcf, 0x19,
|
||||
0x3b, 0xa7, 0x2e, 0x56, 0xcf, 0xe8, 0x10, 0x3a, 0x2f, 0x49, 0x52, 0x52, 0x6f, 0x4f, 0x81, 0xba,
|
||||
0xf0, 0x8f, 0xa1, 0x73, 0x49, 0xca, 0xe8, 0xc6, 0xb2, 0xec, 0x71, 0xec, 0xf2, 0x8f, 0xd0, 0x7b,
|
||||
0xc8, 0xcb, 0x54, 0xd0, 0xbc, 0x99, 0x80, 0x1e, 0x40, 0x9f, 0xfe, 0x42, 0x59, 0x96, 0x90, 0x5c,
|
||||
0x0d, 0x1e, 0x7c, 0xfe, 0x41, 0xd0, 0x64, 0x20, 0x98, 0x1a, 0x16, 0xae, 0xf8, 0xfe, 0xd7, 0xd0,
|
||||
0xff, 0xbe, 0x24, 0xa9, 0x88, 0x13, 0x8a, 0x8e, 0xa0, 0xff, 0xb3, 0x79, 0x36, 0x1b, 0x54, 0xf5,
|
||||
0x6d, 0xe5, 0x95, 0xb4, 0xdf, 0x1c, 0xe8, 0xcd, 0x4b, 0xc6, 0x48, 0x7e, 0x8d, 0x3e, 0x84, 0xfd,
|
||||
0x82, 0xb0, 0x2c, 0xa1, 0xe1, 0x5a, 0xaa, 0x55, 0x13, 0xda, 0x78, 0xa0, 0x31, 0x65, 0x00, 0x1d,
|
||||
0x03, 0x18, 0x4a, 0x51, 0x32, 0x33, 0xc9, 0xd5, 0xc8, 0xbc, 0x64, 0xd2, 0x47, 0xb5, 0x7f, 0x6b,
|
||||
0xdc, 0xda, 0xed, 0xc3, 0x2a, 0xae, 0xf5, 0xf9, 0x27, 0xd0, 0x5b, 0xa6, 0xe2, 0x3a, 0xa3, 0x9b,
|
||||
0x1d, 0xa7, 0xf8, 0x57, 0x1b, 0xdc, 0x27, 0x71, 0x21, 0x78, 0x94, 0x13, 0xf6, 0x26, 0x62, 0x3f,
|
||||
0x05, 0x74, 0x93, 0x12, 0x5e, 0x25, 0x9c, 0x08, 0xaf, 0xad, 0x66, 0x8e, 0x6e, 0x10, 0x1f, 0x4b,
|
||||
0xfc, 0xbf, 0xac, 0x9d, 0x41, 0x77, 0x55, 0xae, 0x7f, 0xa2, 0xc2, 0x18, 0x7b, 0xbf, 0xd9, 0xd8,
|
||||
0x85, 0xe2, 0x60, 0xc3, 0x45, 0xf7, 0xa0, 0x5b, 0xac, 0xb7, 0x94, 0x11, 0xaf, 0x33, 0x76, 0x4e,
|
||||
0xef, 0x62, 0x53, 0xa1, 0x8f, 0xe0, 0xe0, 0x57, 0x9a, 0xf3, 0x50, 0x6c, 0x73, 0x5a, 0x6c, 0x79,
|
||||
0xb2, 0xf1, 0xba, 0x6a, 0xc3, 0xa1, 0x44, 0x17, 0x16, 0x94, 0x9a, 0x14, 0x4d, 0x5b, 0xec, 0x29,
|
||||
0x8b, 0xae, 0x44, 0xb4, 0xc1, 0x53, 0x18, 0xd5, 0xcb, 0xc6, 0x5e, 0x5f, 0xcd, 0x39, 0xa8, 0x48,
|
||||
0xda, 0xdc, 0x14, 0x86, 0x29, 0x8d, 0x88, 0x88, 0x5f, 0xd2, 0xb0, 0xc8, 0x48, 0xea, 0xb9, 0xca,
|
||||
0xc4, 0xf8, 0x75, 0x26, 0xe6, 0x19, 0x49, 0xf1, 0xbe, 0x6d, 0x93, 0x95, 0x94, 0x5d, 0x8d, 0xd9,
|
||||
0xd0, 0x44, 0x10, 0x0f, 0xc6, 0xad, 0x53, 0x84, 0xab, 0xe1, 0x8f, 0x24, 0x78, 0x8b, 0xa6, 0xa5,
|
||||
0x0f, 0xc6, 0x2d, 0xe9, 0xce, 0xa2, 0x5a, 0xfe, 0x14, 0x86, 0x19, 0x2f, 0xe2, 0x5a, 0xd4, 0xfe,
|
||||
0x9b, 0x8a, 0xb2, 0x6d, 0x56, 0x54, 0x35, 0x46, 0x8b, 0x1a, 0x6a, 0x51, 0x16, 0xad, 0x44, 0x55,
|
||||
0x34, 0x2d, 0xea, 0x40, 0x8b, 0xb2, 0xa8, 0x12, 0xe5, 0xff, 0xe9, 0x40, 0x57, 0x6f, 0x85, 0x3e,
|
||||
0x86, 0xd1, 0xba, 0x64, 0x65, 0x72, 0xd3, 0x88, 0xbe, 0x66, 0x77, 0x6a, 0x5c, 0x5b, 0x39, 0x83,
|
||||
0x7b, 0xaf, 0x52, 0x6f, 0x5d, 0xb7, 0xc3, 0x57, 0x1a, 0xf4, 0x5b, 0x39, 0x81, 0x41, 0x99, 0x65,
|
||||
0x34, 0x0f, 0x57, 0xbc, 0x4c, 0x37, 0xe6, 0xce, 0x81, 0x82, 0x2e, 0x24, 0x72, 0x2b, 0x17, 0x5a,
|
||||
0xff, 0x3b, 0x17, 0xa0, 0x3e, 0x32, 0x79, 0x11, 0xf9, 0xd5, 0x55, 0x41, 0xb5, 0x83, 0xbb, 0xd8,
|
||||
0x54, 0x12, 0x4f, 0x68, 0x1a, 0x89, 0xad, 0xda, 0x7d, 0x88, 0x4d, 0xe5, 0xff, 0xee, 0x40, 0xdf,
|
||||
0x0e, 0x45, 0xf7, 0xa1, 0x93, 0xc8, 0x54, 0xf4, 0x1c, 0xf5, 0x82, 0x4e, 0x9a, 0x35, 0x54, 0xc1,
|
||||
0x89, 0x35, 0xbb, 0x39, 0x71, 0xd0, 0x97, 0xe0, 0x56, 0xa9, 0x6b, 0x4c, 0x1d, 0x05, 0x3a, 0x97,
|
||||
0x03, 0x9b, 0xcb, 0xc1, 0xc2, 0x32, 0x70, 0x4d, 0xf6, 0xff, 0xde, 0x83, 0xee, 0x4c, 0xa5, 0xfc,
|
||||
0xdb, 0x2a, 0xfa, 0x0c, 0x3a, 0x91, 0xcc, 0x69, 0x13, 0xb2, 0xef, 0x35, 0xb7, 0xa9, 0x28, 0xc7,
|
||||
0x9a, 0x89, 0xbe, 0x80, 0xde, 0x5a, 0x67, 0xb7, 0x11, 0x7b, 0xdc, 0xdc, 0x64, 0x02, 0x1e, 0x5b,
|
||||
0xb6, 0x6c, 0x2c, 0x74, 0xb0, 0xaa, 0x3b, 0xb0, 0xb3, 0xd1, 0xa4, 0x2f, 0xb6, 0x6c, 0xd9, 0x58,
|
||||
0xea, 0x20, 0x54, 0xa1, 0xb1, 0xb3, 0xd1, 0xa4, 0x25, 0xb6, 0x6c, 0xf4, 0x0d, 0xb8, 0x5b, 0x9b,
|
||||
0x8f, 0x2a, 0x2c, 0x76, 0x1e, 0x4c, 0x15, 0xa3, 0xb8, 0xee, 0x90, 0x89, 0x5a, 0x9d, 0x75, 0xc8,
|
||||
0x0a, 0x95, 0x48, 0x2d, 0x3c, 0xa8, 0xb0, 0x59, 0xe1, 0xff, 0xe1, 0xc0, 0xbe, 0x7e, 0x03, 0x8f,
|
||||
0x09, 0x8b, 0x93, 0xeb, 0xc6, 0x4f, 0x24, 0x82, 0xf6, 0x96, 0x26, 0x99, 0xf9, 0x42, 0xaa, 0x67,
|
||||
0x74, 0x06, 0x6d, 0xa9, 0x51, 0x1d, 0xe1, 0xc1, 0xae, 0x5f, 0xb8, 0x9e, 0xbc, 0xb8, 0xce, 0x28,
|
||||
0x56, 0x6c, 0x99, 0xb9, 0xfa, 0xab, 0xee, 0xb5, 0x5f, 0x97, 0xb9, 0xba, 0x0f, 0x1b, 0xee, 0x27,
|
||||
0x2b, 0x80, 0x7a, 0x12, 0x1a, 0x40, 0xef, 0xe1, 0xb3, 0xe5, 0xd3, 0xc5, 0x14, 0x8f, 0xde, 0x41,
|
||||
0x2e, 0x74, 0x2e, 0xcf, 0x97, 0x97, 0xd3, 0x91, 0x23, 0xf1, 0xf9, 0x72, 0x36, 0x3b, 0xc7, 0x2f,
|
||||
0x46, 0x7b, 0xb2, 0x58, 0x3e, 0x5d, 0xbc, 0x78, 0x3e, 0x7d, 0x34, 0x6a, 0xa1, 0x21, 0xb8, 0x4f,
|
||||
0xbe, 0x9d, 0x2f, 0x9e, 0x5d, 0xe2, 0xf3, 0xd9, 0xa8, 0x8d, 0xde, 0x85, 0x3b, 0xaa, 0x27, 0xac,
|
||||
0xc1, 0xce, 0x05, 0x86, 0xc6, 0x3f, 0x18, 0x3f, 0x3c, 0x88, 0x62, 0xb1, 0x2d, 0x57, 0xc1, 0x9a,
|
||||
0xb3, 0x7f, 0xff, 0x45, 0x09, 0x19, 0xdf, 0xd0, 0x64, 0x12, 0xf1, 0xaf, 0x62, 0x1e, 0xd6, 0xab,
|
||||
0xa1, 0x5e, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x77, 0x81, 0x98, 0xd7, 0x08, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -27,7 +27,14 @@ func buildCommonHeaderMaps() {
|
|||
"accept-language",
|
||||
"accept-ranges",
|
||||
"age",
|
||||
"access-control-allow-credentials",
|
||||
"access-control-allow-headers",
|
||||
"access-control-allow-methods",
|
||||
"access-control-allow-origin",
|
||||
"access-control-expose-headers",
|
||||
"access-control-max-age",
|
||||
"access-control-request-headers",
|
||||
"access-control-request-method",
|
||||
"allow",
|
||||
"authorization",
|
||||
"cache-control",
|
||||
|
@ -53,6 +60,7 @@ func buildCommonHeaderMaps() {
|
|||
"link",
|
||||
"location",
|
||||
"max-forwards",
|
||||
"origin",
|
||||
"proxy-authenticate",
|
||||
"proxy-authorization",
|
||||
"range",
|
||||
|
@ -68,6 +76,8 @@ func buildCommonHeaderMaps() {
|
|||
"vary",
|
||||
"via",
|
||||
"www-authenticate",
|
||||
"x-forwarded-for",
|
||||
"x-forwarded-proto",
|
||||
}
|
||||
commonLowerHeader = make(map[string]string, len(common))
|
||||
commonCanonHeader = make(map[string]string, len(common))
|
||||
|
@ -85,3 +95,11 @@ func lowerHeader(v string) (lower string, ascii bool) {
|
|||
}
|
||||
return asciiToLower(v)
|
||||
}
|
||||
|
||||
func canonicalHeader(v string) string {
|
||||
buildCommonHeaderMapsOnce()
|
||||
if s, ok := commonCanonHeader[v]; ok {
|
||||
return s
|
||||
}
|
||||
return http.CanonicalHeaderKey(v)
|
||||
}
|
||||
|
|
|
@ -116,6 +116,11 @@ func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
|
|||
e.dynTab.setMaxSize(v)
|
||||
}
|
||||
|
||||
// MaxDynamicTableSize returns the current dynamic header table size.
|
||||
func (e *Encoder) MaxDynamicTableSize() (v uint32) {
|
||||
return e.dynTab.maxSize
|
||||
}
|
||||
|
||||
// SetMaxDynamicTableSizeLimit changes the maximum value that can be
|
||||
// specified in SetMaxDynamicTableSize to v. By default, it is set to
|
||||
// 4096, which is the same size of the default dynamic header table
|
||||
|
|
|
@ -0,0 +1,188 @@
|
|||
// go generate gen.go
|
||||
// Code generated by the command above; DO NOT EDIT.
|
||||
|
||||
package hpack
|
||||
|
||||
var staticTable = &headerFieldTable{
|
||||
evictCount: 0,
|
||||
byName: map[string]uint64{
|
||||
":authority": 1,
|
||||
":method": 3,
|
||||
":path": 5,
|
||||
":scheme": 7,
|
||||
":status": 14,
|
||||
"accept-charset": 15,
|
||||
"accept-encoding": 16,
|
||||
"accept-language": 17,
|
||||
"accept-ranges": 18,
|
||||
"accept": 19,
|
||||
"access-control-allow-origin": 20,
|
||||
"age": 21,
|
||||
"allow": 22,
|
||||
"authorization": 23,
|
||||
"cache-control": 24,
|
||||
"content-disposition": 25,
|
||||
"content-encoding": 26,
|
||||
"content-language": 27,
|
||||
"content-length": 28,
|
||||
"content-location": 29,
|
||||
"content-range": 30,
|
||||
"content-type": 31,
|
||||
"cookie": 32,
|
||||
"date": 33,
|
||||
"etag": 34,
|
||||
"expect": 35,
|
||||
"expires": 36,
|
||||
"from": 37,
|
||||
"host": 38,
|
||||
"if-match": 39,
|
||||
"if-modified-since": 40,
|
||||
"if-none-match": 41,
|
||||
"if-range": 42,
|
||||
"if-unmodified-since": 43,
|
||||
"last-modified": 44,
|
||||
"link": 45,
|
||||
"location": 46,
|
||||
"max-forwards": 47,
|
||||
"proxy-authenticate": 48,
|
||||
"proxy-authorization": 49,
|
||||
"range": 50,
|
||||
"referer": 51,
|
||||
"refresh": 52,
|
||||
"retry-after": 53,
|
||||
"server": 54,
|
||||
"set-cookie": 55,
|
||||
"strict-transport-security": 56,
|
||||
"transfer-encoding": 57,
|
||||
"user-agent": 58,
|
||||
"vary": 59,
|
||||
"via": 60,
|
||||
"www-authenticate": 61,
|
||||
},
|
||||
byNameValue: map[pairNameValue]uint64{
|
||||
{name: ":authority", value: ""}: 1,
|
||||
{name: ":method", value: "GET"}: 2,
|
||||
{name: ":method", value: "POST"}: 3,
|
||||
{name: ":path", value: "/"}: 4,
|
||||
{name: ":path", value: "/index.html"}: 5,
|
||||
{name: ":scheme", value: "http"}: 6,
|
||||
{name: ":scheme", value: "https"}: 7,
|
||||
{name: ":status", value: "200"}: 8,
|
||||
{name: ":status", value: "204"}: 9,
|
||||
{name: ":status", value: "206"}: 10,
|
||||
{name: ":status", value: "304"}: 11,
|
||||
{name: ":status", value: "400"}: 12,
|
||||
{name: ":status", value: "404"}: 13,
|
||||
{name: ":status", value: "500"}: 14,
|
||||
{name: "accept-charset", value: ""}: 15,
|
||||
{name: "accept-encoding", value: "gzip, deflate"}: 16,
|
||||
{name: "accept-language", value: ""}: 17,
|
||||
{name: "accept-ranges", value: ""}: 18,
|
||||
{name: "accept", value: ""}: 19,
|
||||
{name: "access-control-allow-origin", value: ""}: 20,
|
||||
{name: "age", value: ""}: 21,
|
||||
{name: "allow", value: ""}: 22,
|
||||
{name: "authorization", value: ""}: 23,
|
||||
{name: "cache-control", value: ""}: 24,
|
||||
{name: "content-disposition", value: ""}: 25,
|
||||
{name: "content-encoding", value: ""}: 26,
|
||||
{name: "content-language", value: ""}: 27,
|
||||
{name: "content-length", value: ""}: 28,
|
||||
{name: "content-location", value: ""}: 29,
|
||||
{name: "content-range", value: ""}: 30,
|
||||
{name: "content-type", value: ""}: 31,
|
||||
{name: "cookie", value: ""}: 32,
|
||||
{name: "date", value: ""}: 33,
|
||||
{name: "etag", value: ""}: 34,
|
||||
{name: "expect", value: ""}: 35,
|
||||
{name: "expires", value: ""}: 36,
|
||||
{name: "from", value: ""}: 37,
|
||||
{name: "host", value: ""}: 38,
|
||||
{name: "if-match", value: ""}: 39,
|
||||
{name: "if-modified-since", value: ""}: 40,
|
||||
{name: "if-none-match", value: ""}: 41,
|
||||
{name: "if-range", value: ""}: 42,
|
||||
{name: "if-unmodified-since", value: ""}: 43,
|
||||
{name: "last-modified", value: ""}: 44,
|
||||
{name: "link", value: ""}: 45,
|
||||
{name: "location", value: ""}: 46,
|
||||
{name: "max-forwards", value: ""}: 47,
|
||||
{name: "proxy-authenticate", value: ""}: 48,
|
||||
{name: "proxy-authorization", value: ""}: 49,
|
||||
{name: "range", value: ""}: 50,
|
||||
{name: "referer", value: ""}: 51,
|
||||
{name: "refresh", value: ""}: 52,
|
||||
{name: "retry-after", value: ""}: 53,
|
||||
{name: "server", value: ""}: 54,
|
||||
{name: "set-cookie", value: ""}: 55,
|
||||
{name: "strict-transport-security", value: ""}: 56,
|
||||
{name: "transfer-encoding", value: ""}: 57,
|
||||
{name: "user-agent", value: ""}: 58,
|
||||
{name: "vary", value: ""}: 59,
|
||||
{name: "via", value: ""}: 60,
|
||||
{name: "www-authenticate", value: ""}: 61,
|
||||
},
|
||||
ents: []HeaderField{
|
||||
{Name: ":authority", Value: "", Sensitive: false},
|
||||
{Name: ":method", Value: "GET", Sensitive: false},
|
||||
{Name: ":method", Value: "POST", Sensitive: false},
|
||||
{Name: ":path", Value: "/", Sensitive: false},
|
||||
{Name: ":path", Value: "/index.html", Sensitive: false},
|
||||
{Name: ":scheme", Value: "http", Sensitive: false},
|
||||
{Name: ":scheme", Value: "https", Sensitive: false},
|
||||
{Name: ":status", Value: "200", Sensitive: false},
|
||||
{Name: ":status", Value: "204", Sensitive: false},
|
||||
{Name: ":status", Value: "206", Sensitive: false},
|
||||
{Name: ":status", Value: "304", Sensitive: false},
|
||||
{Name: ":status", Value: "400", Sensitive: false},
|
||||
{Name: ":status", Value: "404", Sensitive: false},
|
||||
{Name: ":status", Value: "500", Sensitive: false},
|
||||
{Name: "accept-charset", Value: "", Sensitive: false},
|
||||
{Name: "accept-encoding", Value: "gzip, deflate", Sensitive: false},
|
||||
{Name: "accept-language", Value: "", Sensitive: false},
|
||||
{Name: "accept-ranges", Value: "", Sensitive: false},
|
||||
{Name: "accept", Value: "", Sensitive: false},
|
||||
{Name: "access-control-allow-origin", Value: "", Sensitive: false},
|
||||
{Name: "age", Value: "", Sensitive: false},
|
||||
{Name: "allow", Value: "", Sensitive: false},
|
||||
{Name: "authorization", Value: "", Sensitive: false},
|
||||
{Name: "cache-control", Value: "", Sensitive: false},
|
||||
{Name: "content-disposition", Value: "", Sensitive: false},
|
||||
{Name: "content-encoding", Value: "", Sensitive: false},
|
||||
{Name: "content-language", Value: "", Sensitive: false},
|
||||
{Name: "content-length", Value: "", Sensitive: false},
|
||||
{Name: "content-location", Value: "", Sensitive: false},
|
||||
{Name: "content-range", Value: "", Sensitive: false},
|
||||
{Name: "content-type", Value: "", Sensitive: false},
|
||||
{Name: "cookie", Value: "", Sensitive: false},
|
||||
{Name: "date", Value: "", Sensitive: false},
|
||||
{Name: "etag", Value: "", Sensitive: false},
|
||||
{Name: "expect", Value: "", Sensitive: false},
|
||||
{Name: "expires", Value: "", Sensitive: false},
|
||||
{Name: "from", Value: "", Sensitive: false},
|
||||
{Name: "host", Value: "", Sensitive: false},
|
||||
{Name: "if-match", Value: "", Sensitive: false},
|
||||
{Name: "if-modified-since", Value: "", Sensitive: false},
|
||||
{Name: "if-none-match", Value: "", Sensitive: false},
|
||||
{Name: "if-range", Value: "", Sensitive: false},
|
||||
{Name: "if-unmodified-since", Value: "", Sensitive: false},
|
||||
{Name: "last-modified", Value: "", Sensitive: false},
|
||||
{Name: "link", Value: "", Sensitive: false},
|
||||
{Name: "location", Value: "", Sensitive: false},
|
||||
{Name: "max-forwards", Value: "", Sensitive: false},
|
||||
{Name: "proxy-authenticate", Value: "", Sensitive: false},
|
||||
{Name: "proxy-authorization", Value: "", Sensitive: false},
|
||||
{Name: "range", Value: "", Sensitive: false},
|
||||
{Name: "referer", Value: "", Sensitive: false},
|
||||
{Name: "refresh", Value: "", Sensitive: false},
|
||||
{Name: "retry-after", Value: "", Sensitive: false},
|
||||
{Name: "server", Value: "", Sensitive: false},
|
||||
{Name: "set-cookie", Value: "", Sensitive: false},
|
||||
{Name: "strict-transport-security", Value: "", Sensitive: false},
|
||||
{Name: "transfer-encoding", Value: "", Sensitive: false},
|
||||
{Name: "user-agent", Value: "", Sensitive: false},
|
||||
{Name: "vary", Value: "", Sensitive: false},
|
||||
{Name: "via", Value: "", Sensitive: false},
|
||||
{Name: "www-authenticate", Value: "", Sensitive: false},
|
||||
},
|
||||
}
|
|
@ -96,8 +96,7 @@ func (t *headerFieldTable) evictOldest(n int) {
|
|||
// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic
|
||||
// table, the return value i actually refers to the entry t.ents[t.len()-i].
|
||||
//
|
||||
// All tables are assumed to be a dynamic tables except for the global
|
||||
// staticTable pointer.
|
||||
// All tables are assumed to be a dynamic tables except for the global staticTable.
|
||||
//
|
||||
// See Section 2.3.3.
|
||||
func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||
|
@ -125,81 +124,6 @@ func (t *headerFieldTable) idToIndex(id uint64) uint64 {
|
|||
return k + 1
|
||||
}
|
||||
|
||||
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
|
||||
var staticTable = newStaticTable()
|
||||
var staticTableEntries = [...]HeaderField{
|
||||
{Name: ":authority"},
|
||||
{Name: ":method", Value: "GET"},
|
||||
{Name: ":method", Value: "POST"},
|
||||
{Name: ":path", Value: "/"},
|
||||
{Name: ":path", Value: "/index.html"},
|
||||
{Name: ":scheme", Value: "http"},
|
||||
{Name: ":scheme", Value: "https"},
|
||||
{Name: ":status", Value: "200"},
|
||||
{Name: ":status", Value: "204"},
|
||||
{Name: ":status", Value: "206"},
|
||||
{Name: ":status", Value: "304"},
|
||||
{Name: ":status", Value: "400"},
|
||||
{Name: ":status", Value: "404"},
|
||||
{Name: ":status", Value: "500"},
|
||||
{Name: "accept-charset"},
|
||||
{Name: "accept-encoding", Value: "gzip, deflate"},
|
||||
{Name: "accept-language"},
|
||||
{Name: "accept-ranges"},
|
||||
{Name: "accept"},
|
||||
{Name: "access-control-allow-origin"},
|
||||
{Name: "age"},
|
||||
{Name: "allow"},
|
||||
{Name: "authorization"},
|
||||
{Name: "cache-control"},
|
||||
{Name: "content-disposition"},
|
||||
{Name: "content-encoding"},
|
||||
{Name: "content-language"},
|
||||
{Name: "content-length"},
|
||||
{Name: "content-location"},
|
||||
{Name: "content-range"},
|
||||
{Name: "content-type"},
|
||||
{Name: "cookie"},
|
||||
{Name: "date"},
|
||||
{Name: "etag"},
|
||||
{Name: "expect"},
|
||||
{Name: "expires"},
|
||||
{Name: "from"},
|
||||
{Name: "host"},
|
||||
{Name: "if-match"},
|
||||
{Name: "if-modified-since"},
|
||||
{Name: "if-none-match"},
|
||||
{Name: "if-range"},
|
||||
{Name: "if-unmodified-since"},
|
||||
{Name: "last-modified"},
|
||||
{Name: "link"},
|
||||
{Name: "location"},
|
||||
{Name: "max-forwards"},
|
||||
{Name: "proxy-authenticate"},
|
||||
{Name: "proxy-authorization"},
|
||||
{Name: "range"},
|
||||
{Name: "referer"},
|
||||
{Name: "refresh"},
|
||||
{Name: "retry-after"},
|
||||
{Name: "server"},
|
||||
{Name: "set-cookie"},
|
||||
{Name: "strict-transport-security"},
|
||||
{Name: "transfer-encoding"},
|
||||
{Name: "user-agent"},
|
||||
{Name: "vary"},
|
||||
{Name: "via"},
|
||||
{Name: "www-authenticate"},
|
||||
}
|
||||
|
||||
func newStaticTable() *headerFieldTable {
|
||||
t := &headerFieldTable{}
|
||||
t.init()
|
||||
for _, e := range staticTableEntries[:] {
|
||||
t.addEntry(e)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
var huffmanCodes = [256]uint32{
|
||||
0x1ff8,
|
||||
0x7fffd8,
|
||||
|
|
|
@ -98,6 +98,19 @@ type Server struct {
|
|||
// the HTTP/2 spec's recommendations.
|
||||
MaxConcurrentStreams uint32
|
||||
|
||||
// MaxDecoderHeaderTableSize optionally specifies the http2
|
||||
// SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It
|
||||
// informs the remote endpoint of the maximum size of the header compression
|
||||
// table used to decode header blocks, in octets. If zero, the default value
|
||||
// of 4096 is used.
|
||||
MaxDecoderHeaderTableSize uint32
|
||||
|
||||
// MaxEncoderHeaderTableSize optionally specifies an upper limit for the
|
||||
// header compression table used for encoding request headers. Received
|
||||
// SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero,
|
||||
// the default value of 4096 is used.
|
||||
MaxEncoderHeaderTableSize uint32
|
||||
|
||||
// MaxReadFrameSize optionally specifies the largest frame
|
||||
// this server is willing to read. A valid value is between
|
||||
// 16k and 16M, inclusive. If zero or otherwise invalid, a
|
||||
|
@ -170,6 +183,20 @@ func (s *Server) maxConcurrentStreams() uint32 {
|
|||
return defaultMaxStreams
|
||||
}
|
||||
|
||||
func (s *Server) maxDecoderHeaderTableSize() uint32 {
|
||||
if v := s.MaxDecoderHeaderTableSize; v > 0 {
|
||||
return v
|
||||
}
|
||||
return initialHeaderTableSize
|
||||
}
|
||||
|
||||
func (s *Server) maxEncoderHeaderTableSize() uint32 {
|
||||
if v := s.MaxEncoderHeaderTableSize; v > 0 {
|
||||
return v
|
||||
}
|
||||
return initialHeaderTableSize
|
||||
}
|
||||
|
||||
// maxQueuedControlFrames is the maximum number of control frames like
|
||||
// SETTINGS, PING and RST_STREAM that will be queued for writing before
|
||||
// the connection is closed to prevent memory exhaustion attacks.
|
||||
|
@ -394,7 +421,6 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
|||
advMaxStreams: s.maxConcurrentStreams(),
|
||||
initialStreamSendWindowSize: initialWindowSize,
|
||||
maxFrameSize: initialMaxFrameSize,
|
||||
headerTableSize: initialHeaderTableSize,
|
||||
serveG: newGoroutineLock(),
|
||||
pushEnabled: true,
|
||||
sawClientPreface: opts.SawClientPreface,
|
||||
|
@ -424,12 +450,13 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
|||
sc.flow.add(initialWindowSize)
|
||||
sc.inflow.add(initialWindowSize)
|
||||
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
|
||||
sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
|
||||
|
||||
fr := NewFramer(sc.bw, c)
|
||||
if s.CountError != nil {
|
||||
fr.countError = s.CountError
|
||||
}
|
||||
fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
|
||||
fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil)
|
||||
fr.MaxHeaderListSize = sc.maxHeaderListSize()
|
||||
fr.SetMaxReadFrameSize(s.maxReadFrameSize())
|
||||
sc.framer = fr
|
||||
|
@ -559,9 +586,9 @@ type serverConn struct {
|
|||
streams map[uint32]*stream
|
||||
initialStreamSendWindowSize int32
|
||||
maxFrameSize int32
|
||||
headerTableSize uint32
|
||||
peerMaxHeaderListSize uint32 // zero means unknown (default)
|
||||
canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
|
||||
canonHeaderKeysSize int // canonHeader keys size in bytes
|
||||
writingFrame bool // started writing a frame (on serve goroutine or separate)
|
||||
writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
|
||||
needsFrameFlush bool // last frame write wasn't a flush
|
||||
|
@ -622,7 +649,9 @@ type stream struct {
|
|||
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
|
||||
gotTrailerHeader bool // HEADER frame for trailers was seen
|
||||
wroteHeaders bool // whether we wrote headers (not status 100)
|
||||
readDeadline *time.Timer // nil if unused
|
||||
writeDeadline *time.Timer // nil if unused
|
||||
closeErr error // set before cw is closed
|
||||
|
||||
trailer http.Header // accumulated trailers
|
||||
reqTrailer http.Header // handler's Request.Trailer
|
||||
|
@ -738,6 +767,13 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
|
|||
}
|
||||
}
|
||||
|
||||
// maxCachedCanonicalHeadersKeysSize is an arbitrarily-chosen limit on the size
|
||||
// of the entries in the canonHeader cache.
|
||||
// This should be larger than the size of unique, uncommon header keys likely to
|
||||
// be sent by the peer, while not so high as to permit unreasonable memory usage
|
||||
// if the peer sends an unbounded number of unique header keys.
|
||||
const maxCachedCanonicalHeadersKeysSize = 2048
|
||||
|
||||
func (sc *serverConn) canonicalHeader(v string) string {
|
||||
sc.serveG.check()
|
||||
buildCommonHeaderMapsOnce()
|
||||
|
@ -753,14 +789,10 @@ func (sc *serverConn) canonicalHeader(v string) string {
|
|||
sc.canonHeader = make(map[string]string)
|
||||
}
|
||||
cv = http.CanonicalHeaderKey(v)
|
||||
// maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of
|
||||
// entries in the canonHeader cache. This should be larger than the number
|
||||
// of unique, uncommon header keys likely to be sent by the peer, while not
|
||||
// so high as to permit unreasonable memory usage if the peer sends an unbounded
|
||||
// number of unique header keys.
|
||||
const maxCachedCanonicalHeaders = 32
|
||||
if len(sc.canonHeader) < maxCachedCanonicalHeaders {
|
||||
size := 100 + len(v)*2 // 100 bytes of map overhead + key + value
|
||||
if sc.canonHeaderKeysSize+size <= maxCachedCanonicalHeadersKeysSize {
|
||||
sc.canonHeader[v] = cv
|
||||
sc.canonHeaderKeysSize += size
|
||||
}
|
||||
return cv
|
||||
}
|
||||
|
@ -862,6 +894,7 @@ func (sc *serverConn) serve() {
|
|||
{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
|
||||
{SettingMaxConcurrentStreams, sc.advMaxStreams},
|
||||
{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
|
||||
{SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()},
|
||||
{SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
|
||||
},
|
||||
})
|
||||
|
@ -869,7 +902,9 @@ func (sc *serverConn) serve() {
|
|||
|
||||
// Each connection starts with initialWindowSize inflow tokens.
|
||||
// If a higher value is configured, we add more tokens.
|
||||
sc.sendWindowUpdate(nil)
|
||||
if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
|
||||
sc.sendWindowUpdate(nil, int(diff))
|
||||
}
|
||||
|
||||
if err := sc.readPreface(); err != nil {
|
||||
sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
|
||||
|
@ -946,6 +981,8 @@ func (sc *serverConn) serve() {
|
|||
}
|
||||
case *startPushRequest:
|
||||
sc.startPush(v)
|
||||
case func(*serverConn):
|
||||
v(sc)
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected type %T", v))
|
||||
}
|
||||
|
@ -1459,6 +1496,21 @@ func (sc *serverConn) processFrame(f Frame) error {
|
|||
sc.sawFirstSettings = true
|
||||
}
|
||||
|
||||
// Discard frames for streams initiated after the identified last
|
||||
// stream sent in a GOAWAY, or all frames after sending an error.
|
||||
// We still need to return connection-level flow control for DATA frames.
|
||||
// RFC 9113 Section 6.8.
|
||||
if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) {
|
||||
|
||||
if f, ok := f.(*DataFrame); ok {
|
||||
if sc.inflow.available() < int32(f.Length) {
|
||||
return sc.countError("data_flow", streamError(f.Header().StreamID, ErrCodeFlowControl))
|
||||
}
|
||||
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
switch f := f.(type) {
|
||||
case *SettingsFrame:
|
||||
return sc.processSettings(f)
|
||||
|
@ -1501,9 +1553,6 @@ func (sc *serverConn) processPing(f *PingFrame) error {
|
|||
// PROTOCOL_ERROR."
|
||||
return sc.countError("ping_on_stream", ConnectionError(ErrCodeProtocol))
|
||||
}
|
||||
if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
|
||||
return nil
|
||||
}
|
||||
sc.writeFrame(FrameWriteRequest{write: writePingAck{f}})
|
||||
return nil
|
||||
}
|
||||
|
@ -1565,6 +1614,9 @@ func (sc *serverConn) closeStream(st *stream, err error) {
|
|||
panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
|
||||
}
|
||||
st.state = stateClosed
|
||||
if st.readDeadline != nil {
|
||||
st.readDeadline.Stop()
|
||||
}
|
||||
if st.writeDeadline != nil {
|
||||
st.writeDeadline.Stop()
|
||||
}
|
||||
|
@ -1586,10 +1638,18 @@ func (sc *serverConn) closeStream(st *stream, err error) {
|
|||
if p := st.body; p != nil {
|
||||
// Return any buffered unread bytes worth of conn-level flow control.
|
||||
// See golang.org/issue/16481
|
||||
sc.sendWindowUpdate(nil)
|
||||
sc.sendWindowUpdate(nil, p.Len())
|
||||
|
||||
p.CloseWithError(err)
|
||||
}
|
||||
if e, ok := err.(StreamError); ok {
|
||||
if e.Cause != nil {
|
||||
err = e.Cause
|
||||
} else {
|
||||
err = errStreamClosed
|
||||
}
|
||||
}
|
||||
st.closeErr = err
|
||||
st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
|
||||
sc.writeSched.CloseStream(st.id)
|
||||
}
|
||||
|
@ -1632,7 +1692,6 @@ func (sc *serverConn) processSetting(s Setting) error {
|
|||
}
|
||||
switch s.ID {
|
||||
case SettingHeaderTableSize:
|
||||
sc.headerTableSize = s.Val
|
||||
sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
|
||||
case SettingEnablePush:
|
||||
sc.pushEnabled = s.Val != 0
|
||||
|
@ -1686,16 +1745,6 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
|
|||
func (sc *serverConn) processData(f *DataFrame) error {
|
||||
sc.serveG.check()
|
||||
id := f.Header().StreamID
|
||||
if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || id > sc.maxClientStreamID) {
|
||||
// Discard all DATA frames if the GOAWAY is due to an
|
||||
// error, or:
|
||||
//
|
||||
// Section 6.8: After sending a GOAWAY frame, the sender
|
||||
// can discard frames for streams initiated by the
|
||||
// receiver with identifiers higher than the identified
|
||||
// last stream.
|
||||
return nil
|
||||
}
|
||||
|
||||
data := f.Data()
|
||||
state, st := sc.state(id)
|
||||
|
@ -1734,7 +1783,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
|||
// sendWindowUpdate, which also schedules sending the
|
||||
// frames.
|
||||
sc.inflow.take(int32(f.Length))
|
||||
sc.sendWindowUpdate(nil) // conn-level
|
||||
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
|
||||
|
||||
if st != nil && st.resetQueued {
|
||||
// Already have a stream error in flight. Don't send another.
|
||||
|
@ -1752,7 +1801,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
|||
return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
|
||||
}
|
||||
sc.inflow.take(int32(f.Length))
|
||||
sc.sendWindowUpdate(nil) // conn-level
|
||||
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
|
||||
|
||||
st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
|
||||
// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
|
||||
|
@ -1770,7 +1819,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
|||
if len(data) > 0 {
|
||||
wrote, err := st.body.Write(data)
|
||||
if err != nil {
|
||||
sc.sendWindowUpdate32(nil, int32(f.Length)-int32(wrote))
|
||||
sc.sendWindowUpdate(nil, int(f.Length)-wrote)
|
||||
return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed))
|
||||
}
|
||||
if wrote != len(data) {
|
||||
|
@ -1838,19 +1887,27 @@ func (st *stream) copyTrailersToHandlerRequest() {
|
|||
}
|
||||
}
|
||||
|
||||
// onReadTimeout is run on its own goroutine (from time.AfterFunc)
|
||||
// when the stream's ReadTimeout has fired.
|
||||
func (st *stream) onReadTimeout() {
|
||||
// Wrap the ErrDeadlineExceeded to avoid callers depending on us
|
||||
// returning the bare error.
|
||||
st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
|
||||
}
|
||||
|
||||
// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
|
||||
// when the stream's WriteTimeout has fired.
|
||||
func (st *stream) onWriteTimeout() {
|
||||
st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)})
|
||||
st.sc.writeFrameFromHandler(FrameWriteRequest{write: StreamError{
|
||||
StreamID: st.id,
|
||||
Code: ErrCodeInternal,
|
||||
Cause: os.ErrDeadlineExceeded,
|
||||
}})
|
||||
}
|
||||
|
||||
func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
||||
sc.serveG.check()
|
||||
id := f.StreamID
|
||||
if sc.inGoAway {
|
||||
// Ignore.
|
||||
return nil
|
||||
}
|
||||
// http://tools.ietf.org/html/rfc7540#section-5.1.1
|
||||
// Streams initiated by a client MUST use odd-numbered stream
|
||||
// identifiers. [...] An endpoint that receives an unexpected
|
||||
|
@ -1953,6 +2010,9 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
|||
// (in Go 1.8), though. That's a more sane option anyway.
|
||||
if sc.hs.ReadTimeout != 0 {
|
||||
sc.conn.SetReadDeadline(time.Time{})
|
||||
if st.body != nil {
|
||||
st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
go sc.runHandler(rw, req, handler)
|
||||
|
@ -2021,9 +2081,6 @@ func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error {
|
|||
}
|
||||
|
||||
func (sc *serverConn) processPriority(f *PriorityFrame) error {
|
||||
if sc.inGoAway {
|
||||
return nil
|
||||
}
|
||||
if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -2322,39 +2379,24 @@ func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
|
|||
|
||||
func (sc *serverConn) noteBodyRead(st *stream, n int) {
|
||||
sc.serveG.check()
|
||||
sc.sendWindowUpdate(nil) // conn-level
|
||||
sc.sendWindowUpdate(nil, n) // conn-level
|
||||
if st.state != stateHalfClosedRemote && st.state != stateClosed {
|
||||
// Don't send this WINDOW_UPDATE if the stream is closed
|
||||
// remotely.
|
||||
sc.sendWindowUpdate(st)
|
||||
sc.sendWindowUpdate(st, n)
|
||||
}
|
||||
}
|
||||
|
||||
// st may be nil for conn-level
|
||||
func (sc *serverConn) sendWindowUpdate(st *stream) {
|
||||
func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
|
||||
sc.serveG.check()
|
||||
|
||||
var n int32
|
||||
if st == nil {
|
||||
if avail, windowSize := sc.inflow.available(), sc.srv.initialConnRecvWindowSize(); avail > windowSize/2 {
|
||||
return
|
||||
} else {
|
||||
n = windowSize - avail
|
||||
}
|
||||
} else {
|
||||
if avail, windowSize := st.inflow.available(), sc.srv.initialStreamRecvWindowSize(); avail > windowSize/2 {
|
||||
return
|
||||
} else {
|
||||
n = windowSize - avail
|
||||
}
|
||||
}
|
||||
// "The legal range for the increment to the flow control
|
||||
// window is 1 to 2^31-1 (2,147,483,647) octets."
|
||||
// A Go Read call on 64-bit machines could in theory read
|
||||
// a larger Read than this. Very unlikely, but we handle it here
|
||||
// rather than elsewhere for now.
|
||||
const maxUint31 = 1<<31 - 1
|
||||
for n >= maxUint31 {
|
||||
for n > maxUint31 {
|
||||
sc.sendWindowUpdate32(st, maxUint31)
|
||||
n -= maxUint31
|
||||
}
|
||||
|
@ -2474,7 +2516,15 @@ type responseWriterState struct {
|
|||
|
||||
type chunkWriter struct{ rws *responseWriterState }
|
||||
|
||||
func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
|
||||
func (cw chunkWriter) Write(p []byte) (n int, err error) {
|
||||
n, err = cw.rws.writeChunk(p)
|
||||
if err == errStreamClosed {
|
||||
// If writing failed because the stream has been closed,
|
||||
// return the reason it was closed.
|
||||
err = cw.rws.stream.closeErr
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 }
|
||||
|
||||
|
@ -2668,23 +2718,85 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() {
|
|||
}
|
||||
}
|
||||
|
||||
func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
|
||||
st := w.rws.stream
|
||||
if !deadline.IsZero() && deadline.Before(time.Now()) {
|
||||
// If we're setting a deadline in the past, reset the stream immediately
|
||||
// so writes after SetWriteDeadline returns will fail.
|
||||
st.onReadTimeout()
|
||||
return nil
|
||||
}
|
||||
w.rws.conn.sendServeMsg(func(sc *serverConn) {
|
||||
if st.readDeadline != nil {
|
||||
if !st.readDeadline.Stop() {
|
||||
// Deadline already exceeded, or stream has been closed.
|
||||
return
|
||||
}
|
||||
}
|
||||
if deadline.IsZero() {
|
||||
st.readDeadline = nil
|
||||
} else if st.readDeadline == nil {
|
||||
st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
|
||||
} else {
|
||||
st.readDeadline.Reset(deadline.Sub(time.Now()))
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
|
||||
st := w.rws.stream
|
||||
if !deadline.IsZero() && deadline.Before(time.Now()) {
|
||||
// If we're setting a deadline in the past, reset the stream immediately
|
||||
// so writes after SetWriteDeadline returns will fail.
|
||||
st.onWriteTimeout()
|
||||
return nil
|
||||
}
|
||||
w.rws.conn.sendServeMsg(func(sc *serverConn) {
|
||||
if st.writeDeadline != nil {
|
||||
if !st.writeDeadline.Stop() {
|
||||
// Deadline already exceeded, or stream has been closed.
|
||||
return
|
||||
}
|
||||
}
|
||||
if deadline.IsZero() {
|
||||
st.writeDeadline = nil
|
||||
} else if st.writeDeadline == nil {
|
||||
st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
|
||||
} else {
|
||||
st.writeDeadline.Reset(deadline.Sub(time.Now()))
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *responseWriter) Flush() {
|
||||
w.FlushError()
|
||||
}
|
||||
|
||||
func (w *responseWriter) FlushError() error {
|
||||
rws := w.rws
|
||||
if rws == nil {
|
||||
panic("Header called after Handler finished")
|
||||
}
|
||||
var err error
|
||||
if rws.bw.Buffered() > 0 {
|
||||
if err := rws.bw.Flush(); err != nil {
|
||||
// Ignore the error. The frame writer already knows.
|
||||
return
|
||||
}
|
||||
err = rws.bw.Flush()
|
||||
} else {
|
||||
// The bufio.Writer won't call chunkWriter.Write
|
||||
// (writeChunk with zero bytes, so we have to do it
|
||||
// ourselves to force the HTTP response header and/or
|
||||
// final DATA frame (with END_STREAM) to be sent.
|
||||
rws.writeChunk(nil)
|
||||
_, err = chunkWriter{rws}.Write(nil)
|
||||
if err == nil {
|
||||
select {
|
||||
case <-rws.stream.cw:
|
||||
err = rws.stream.closeErr
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *responseWriter) CloseNotify() <-chan bool {
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log"
|
||||
"math"
|
||||
mathrand "math/rand"
|
||||
|
@ -117,6 +118,28 @@ type Transport struct {
|
|||
// to mean no limit.
|
||||
MaxHeaderListSize uint32
|
||||
|
||||
// MaxReadFrameSize is the http2 SETTINGS_MAX_FRAME_SIZE to send in the
|
||||
// initial settings frame. It is the size in bytes of the largest frame
|
||||
// payload that the sender is willing to receive. If 0, no setting is
|
||||
// sent, and the value is provided by the peer, which should be 16384
|
||||
// according to the spec:
|
||||
// https://datatracker.ietf.org/doc/html/rfc7540#section-6.5.2.
|
||||
// Values are bounded in the range 16k to 16M.
|
||||
MaxReadFrameSize uint32
|
||||
|
||||
// MaxDecoderHeaderTableSize optionally specifies the http2
|
||||
// SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It
|
||||
// informs the remote endpoint of the maximum size of the header compression
|
||||
// table used to decode header blocks, in octets. If zero, the default value
|
||||
// of 4096 is used.
|
||||
MaxDecoderHeaderTableSize uint32
|
||||
|
||||
// MaxEncoderHeaderTableSize optionally specifies an upper limit for the
|
||||
// header compression table used for encoding request headers. Received
|
||||
// SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero,
|
||||
// the default value of 4096 is used.
|
||||
MaxEncoderHeaderTableSize uint32
|
||||
|
||||
// StrictMaxConcurrentStreams controls whether the server's
|
||||
// SETTINGS_MAX_CONCURRENT_STREAMS should be respected
|
||||
// globally. If false, new TCP connections are created to the
|
||||
|
@ -170,6 +193,19 @@ func (t *Transport) maxHeaderListSize() uint32 {
|
|||
return t.MaxHeaderListSize
|
||||
}
|
||||
|
||||
func (t *Transport) maxFrameReadSize() uint32 {
|
||||
if t.MaxReadFrameSize == 0 {
|
||||
return 0 // use the default provided by the peer
|
||||
}
|
||||
if t.MaxReadFrameSize < minMaxFrameSize {
|
||||
return minMaxFrameSize
|
||||
}
|
||||
if t.MaxReadFrameSize > maxFrameSize {
|
||||
return maxFrameSize
|
||||
}
|
||||
return t.MaxReadFrameSize
|
||||
}
|
||||
|
||||
func (t *Transport) disableCompression() bool {
|
||||
return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
|
||||
}
|
||||
|
@ -292,10 +328,11 @@ type ClientConn struct {
|
|||
lastActive time.Time
|
||||
lastIdle time.Time // time last idle
|
||||
// Settings from peer: (also guarded by wmu)
|
||||
maxFrameSize uint32
|
||||
maxConcurrentStreams uint32
|
||||
peerMaxHeaderListSize uint64
|
||||
initialWindowSize uint32
|
||||
maxFrameSize uint32
|
||||
maxConcurrentStreams uint32
|
||||
peerMaxHeaderListSize uint64
|
||||
peerMaxHeaderTableSize uint32
|
||||
initialWindowSize uint32
|
||||
|
||||
// reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
|
||||
// Write to reqHeaderMu to lock it, read from it to unlock.
|
||||
|
@ -501,6 +538,15 @@ func authorityAddr(scheme string, authority string) (addr string) {
|
|||
return net.JoinHostPort(host, port)
|
||||
}
|
||||
|
||||
var retryBackoffHook func(time.Duration) *time.Timer
|
||||
|
||||
func backoffNewTimer(d time.Duration) *time.Timer {
|
||||
if retryBackoffHook != nil {
|
||||
return retryBackoffHook(d)
|
||||
}
|
||||
return time.NewTimer(d)
|
||||
}
|
||||
|
||||
// RoundTripOpt is like RoundTrip, but takes options.
|
||||
func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
|
||||
if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) {
|
||||
|
@ -526,11 +572,14 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
|
|||
}
|
||||
backoff := float64(uint(1) << (uint(retry) - 1))
|
||||
backoff += backoff * (0.1 * mathrand.Float64())
|
||||
d := time.Second * time.Duration(backoff)
|
||||
timer := backoffNewTimer(d)
|
||||
select {
|
||||
case <-time.After(time.Second * time.Duration(backoff)):
|
||||
case <-timer.C:
|
||||
t.vlogf("RoundTrip retrying after failure: %v", err)
|
||||
continue
|
||||
case <-req.Context().Done():
|
||||
timer.Stop()
|
||||
err = req.Context().Err()
|
||||
}
|
||||
}
|
||||
|
@ -668,6 +717,20 @@ func (t *Transport) expectContinueTimeout() time.Duration {
|
|||
return t.t1.ExpectContinueTimeout
|
||||
}
|
||||
|
||||
func (t *Transport) maxDecoderHeaderTableSize() uint32 {
|
||||
if v := t.MaxDecoderHeaderTableSize; v > 0 {
|
||||
return v
|
||||
}
|
||||
return initialHeaderTableSize
|
||||
}
|
||||
|
||||
func (t *Transport) maxEncoderHeaderTableSize() uint32 {
|
||||
if v := t.MaxEncoderHeaderTableSize; v > 0 {
|
||||
return v
|
||||
}
|
||||
return initialHeaderTableSize
|
||||
}
|
||||
|
||||
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
|
||||
return t.newClientConn(c, t.disableKeepAlives())
|
||||
}
|
||||
|
@ -708,15 +771,19 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
|||
})
|
||||
cc.br = bufio.NewReader(c)
|
||||
cc.fr = NewFramer(cc.bw, cc.br)
|
||||
if t.maxFrameReadSize() != 0 {
|
||||
cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize())
|
||||
}
|
||||
if t.CountError != nil {
|
||||
cc.fr.countError = t.CountError
|
||||
}
|
||||
cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
|
||||
maxHeaderTableSize := t.maxDecoderHeaderTableSize()
|
||||
cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil)
|
||||
cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
|
||||
|
||||
// TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on
|
||||
// henc in response to SETTINGS frames?
|
||||
cc.henc = hpack.NewEncoder(&cc.hbuf)
|
||||
cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize())
|
||||
cc.peerMaxHeaderTableSize = initialHeaderTableSize
|
||||
|
||||
if t.AllowHTTP {
|
||||
cc.nextStreamID = 3
|
||||
|
@ -731,9 +798,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
|||
{ID: SettingEnablePush, Val: 0},
|
||||
{ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
|
||||
}
|
||||
if max := t.maxFrameReadSize(); max != 0 {
|
||||
initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max})
|
||||
}
|
||||
if max := t.maxHeaderListSize(); max != 0 {
|
||||
initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
|
||||
}
|
||||
if maxHeaderTableSize != initialHeaderTableSize {
|
||||
initialSettings = append(initialSettings, Setting{ID: SettingHeaderTableSize, Val: maxHeaderTableSize})
|
||||
}
|
||||
|
||||
cc.bw.Write(clientPreface)
|
||||
cc.fr.WriteSettings(initialSettings...)
|
||||
|
@ -1075,7 +1148,7 @@ var errRequestCanceled = errors.New("net/http: request canceled")
|
|||
func commaSeparatedTrailers(req *http.Request) (string, error) {
|
||||
keys := make([]string, 0, len(req.Trailer))
|
||||
for k := range req.Trailer {
|
||||
k = http.CanonicalHeaderKey(k)
|
||||
k = canonicalHeader(k)
|
||||
switch k {
|
||||
case "Transfer-Encoding", "Trailer", "Content-Length":
|
||||
return "", fmt.Errorf("invalid Trailer key %q", k)
|
||||
|
@ -1612,7 +1685,7 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
|
|||
|
||||
var sawEOF bool
|
||||
for !sawEOF {
|
||||
n, err := body.Read(buf[:len(buf)])
|
||||
n, err := body.Read(buf)
|
||||
if hasContentLen {
|
||||
remainLen -= int64(n)
|
||||
if remainLen == 0 && err == nil {
|
||||
|
@ -1915,7 +1988,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
|
|||
|
||||
// Header list size is ok. Write the headers.
|
||||
enumerateHeaders(func(name, value string) {
|
||||
name, ascii := asciiToLower(name)
|
||||
name, ascii := lowerHeader(name)
|
||||
if !ascii {
|
||||
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
|
||||
// field names have to be ASCII characters (just as in HTTP/1.x).
|
||||
|
@ -1968,7 +2041,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
|
|||
}
|
||||
|
||||
for k, vv := range trailer {
|
||||
lowKey, ascii := asciiToLower(k)
|
||||
lowKey, ascii := lowerHeader(k)
|
||||
if !ascii {
|
||||
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
|
||||
// field names have to be ASCII characters (just as in HTTP/1.x).
|
||||
|
@ -2301,7 +2374,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
|
|||
Status: status + " " + http.StatusText(statusCode),
|
||||
}
|
||||
for _, hf := range regularFields {
|
||||
key := http.CanonicalHeaderKey(hf.Name)
|
||||
key := canonicalHeader(hf.Name)
|
||||
if key == "Trailer" {
|
||||
t := res.Trailer
|
||||
if t == nil {
|
||||
|
@ -2309,7 +2382,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
|
|||
res.Trailer = t
|
||||
}
|
||||
foreachHeaderElement(hf.Value, func(v string) {
|
||||
t[http.CanonicalHeaderKey(v)] = nil
|
||||
t[canonicalHeader(v)] = nil
|
||||
})
|
||||
} else {
|
||||
vv := header[key]
|
||||
|
@ -2414,7 +2487,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr
|
|||
|
||||
trailer := make(http.Header)
|
||||
for _, hf := range f.RegularFields() {
|
||||
key := http.CanonicalHeaderKey(hf.Name)
|
||||
key := canonicalHeader(hf.Name)
|
||||
trailer[key] = append(trailer[key], hf.Value)
|
||||
}
|
||||
cs.trailer = trailer
|
||||
|
@ -2760,8 +2833,10 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
|
|||
cc.cond.Broadcast()
|
||||
|
||||
cc.initialWindowSize = s.Val
|
||||
case SettingHeaderTableSize:
|
||||
cc.henc.SetMaxDynamicTableSize(s.Val)
|
||||
cc.peerMaxHeaderTableSize = s.Val
|
||||
default:
|
||||
// TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
|
||||
cc.vlogf("Unhandled Setting: %v", s)
|
||||
}
|
||||
return nil
|
||||
|
@ -2985,7 +3060,11 @@ func (gz *gzipReader) Read(p []byte) (n int, err error) {
|
|||
}
|
||||
|
||||
func (gz *gzipReader) Close() error {
|
||||
return gz.body.Close()
|
||||
if err := gz.body.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
gz.zerr = fs.ErrClosed
|
||||
return nil
|
||||
}
|
||||
|
||||
type errorReader struct{ err error }
|
||||
|
|
|
@ -7,9 +7,11 @@
|
|||
|
||||
package execabs
|
||||
|
||||
import "strings"
|
||||
import (
|
||||
"errors"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func isGo119ErrDot(err error) bool {
|
||||
// TODO: return errors.Is(err, exec.ErrDot)
|
||||
return strings.Contains(err.Error(), "current directory")
|
||||
return errors.Is(err, exec.ErrDot)
|
||||
}
|
||||
|
|
|
@ -367,6 +367,7 @@ func NewCallbackCDecl(fn interface{}) uintptr {
|
|||
//sys IsWindowUnicode(hwnd HWND) (isUnicode bool) = user32.IsWindowUnicode
|
||||
//sys IsWindowVisible(hwnd HWND) (isVisible bool) = user32.IsWindowVisible
|
||||
//sys GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) = user32.GetGUIThreadInfo
|
||||
//sys GetLargePageMinimum() (size uintptr)
|
||||
|
||||
// Volume Management Functions
|
||||
//sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW
|
||||
|
|
|
@ -252,6 +252,7 @@ var (
|
|||
procGetFileType = modkernel32.NewProc("GetFileType")
|
||||
procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW")
|
||||
procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW")
|
||||
procGetLargePageMinimum = modkernel32.NewProc("GetLargePageMinimum")
|
||||
procGetLastError = modkernel32.NewProc("GetLastError")
|
||||
procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW")
|
||||
procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives")
|
||||
|
@ -2180,6 +2181,12 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (
|
|||
return
|
||||
}
|
||||
|
||||
func GetLargePageMinimum() (size uintptr) {
|
||||
r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0)
|
||||
size = uintptr(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func GetLastError() (lasterr error) {
|
||||
r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0)
|
||||
if r0 != 0 {
|
||||
|
|
|
@ -233,7 +233,6 @@ func (t *Terminal) queue(data []rune) {
|
|||
t.outBuf = append(t.outBuf, []byte(string(data))...)
|
||||
}
|
||||
|
||||
var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'}
|
||||
var space = []rune{' '}
|
||||
|
||||
func isPrintable(key rune) bool {
|
||||
|
|
|
@ -37,18 +37,6 @@ const (
|
|||
unknownClass = ^Class(0)
|
||||
)
|
||||
|
||||
var controlToClass = map[rune]Class{
|
||||
0x202D: LRO, // LeftToRightOverride,
|
||||
0x202E: RLO, // RightToLeftOverride,
|
||||
0x202A: LRE, // LeftToRightEmbedding,
|
||||
0x202B: RLE, // RightToLeftEmbedding,
|
||||
0x202C: PDF, // PopDirectionalFormat,
|
||||
0x2066: LRI, // LeftToRightIsolate,
|
||||
0x2067: RLI, // RightToLeftIsolate,
|
||||
0x2068: FSI, // FirstStrongIsolate,
|
||||
0x2069: PDI, // PopDirectionalIsolate,
|
||||
}
|
||||
|
||||
// A trie entry has the following bits:
|
||||
// 7..5 XOR mask for brackets
|
||||
// 4 1: Bracket open, 0: Bracket close
|
||||
|
|
|
@ -12,7 +12,7 @@ github.com/beorn7/perks/quantile
|
|||
# github.com/cespare/xxhash/v2 v2.1.2
|
||||
## explicit; go 1.11
|
||||
github.com/cespare/xxhash/v2
|
||||
# github.com/containerd/containerd v1.6.10
|
||||
# github.com/containerd/containerd v1.6.14
|
||||
## explicit; go 1.17
|
||||
github.com/containerd/containerd/errdefs
|
||||
github.com/containerd/containerd/log
|
||||
|
@ -39,7 +39,7 @@ github.com/docker/distribution/registry/client/transport
|
|||
github.com/docker/distribution/registry/storage/cache
|
||||
github.com/docker/distribution/registry/storage/cache/memory
|
||||
github.com/docker/distribution/uuid
|
||||
# github.com/docker/docker v23.0.0-beta.1+incompatible
|
||||
# github.com/docker/docker v23.0.0-beta.1.0.20221221173850-cba986b34090+incompatible
|
||||
## explicit
|
||||
github.com/docker/docker/api
|
||||
github.com/docker/docker/api/types
|
||||
|
@ -135,8 +135,8 @@ github.com/imdario/mergo
|
|||
# github.com/inconshreveable/mousetrap v1.0.1
|
||||
## explicit; go 1.18
|
||||
github.com/inconshreveable/mousetrap
|
||||
# github.com/klauspost/compress v1.15.9
|
||||
## explicit; go 1.16
|
||||
# github.com/klauspost/compress v1.15.12
|
||||
## explicit; go 1.17
|
||||
github.com/klauspost/compress
|
||||
github.com/klauspost/compress/fse
|
||||
github.com/klauspost/compress/huff0
|
||||
|
@ -163,7 +163,7 @@ github.com/moby/buildkit/util/appcontext
|
|||
# github.com/moby/patternmatcher v0.5.0
|
||||
## explicit; go 1.19
|
||||
github.com/moby/patternmatcher
|
||||
# github.com/moby/swarmkit/v2 v2.0.0-20221123162438-b17f02f0a054
|
||||
# github.com/moby/swarmkit/v2 v2.0.0-20221215132206-0da442b2780f
|
||||
## explicit; go 1.18
|
||||
github.com/moby/swarmkit/v2/api
|
||||
github.com/moby/swarmkit/v2/api/deepcopy
|
||||
|
@ -194,18 +194,18 @@ github.com/opencontainers/go-digest
|
|||
## explicit; go 1.16
|
||||
github.com/opencontainers/image-spec/specs-go
|
||||
github.com/opencontainers/image-spec/specs-go/v1
|
||||
# github.com/opencontainers/runc v1.1.2
|
||||
# github.com/opencontainers/runc v1.1.3
|
||||
## explicit; go 1.16
|
||||
github.com/opencontainers/runc/libcontainer/user
|
||||
# github.com/pkg/errors v0.9.1
|
||||
## explicit
|
||||
github.com/pkg/errors
|
||||
# github.com/prometheus/client_golang v1.13.0
|
||||
# github.com/prometheus/client_golang v1.14.0
|
||||
## explicit; go 1.17
|
||||
github.com/prometheus/client_golang/prometheus
|
||||
github.com/prometheus/client_golang/prometheus/internal
|
||||
github.com/prometheus/client_golang/prometheus/promhttp
|
||||
# github.com/prometheus/client_model v0.2.0
|
||||
# github.com/prometheus/client_model v0.3.0
|
||||
## explicit; go 1.9
|
||||
github.com/prometheus/client_model/go
|
||||
# github.com/prometheus/common v0.37.0
|
||||
|
@ -261,11 +261,11 @@ github.com/xeipuuv/gojsonschema
|
|||
# go.etcd.io/etcd/raft/v3 v3.5.6
|
||||
## explicit; go 1.16
|
||||
go.etcd.io/etcd/raft/v3/raftpb
|
||||
# golang.org/x/crypto v0.1.0
|
||||
# golang.org/x/crypto v0.2.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/crypto/ed25519
|
||||
golang.org/x/crypto/pbkdf2
|
||||
# golang.org/x/net v0.1.0
|
||||
# golang.org/x/net v0.4.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/net/http/httpguts
|
||||
golang.org/x/net/http2
|
||||
|
@ -275,17 +275,17 @@ golang.org/x/net/internal/socks
|
|||
golang.org/x/net/internal/timeseries
|
||||
golang.org/x/net/proxy
|
||||
golang.org/x/net/trace
|
||||
# golang.org/x/sys v0.2.0
|
||||
# golang.org/x/sys v0.3.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/sys/execabs
|
||||
golang.org/x/sys/internal/unsafeheader
|
||||
golang.org/x/sys/plan9
|
||||
golang.org/x/sys/unix
|
||||
golang.org/x/sys/windows
|
||||
# golang.org/x/term v0.1.0
|
||||
# golang.org/x/term v0.3.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/term
|
||||
# golang.org/x/text v0.4.0
|
||||
# golang.org/x/text v0.5.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/text/secure/bidirule
|
||||
golang.org/x/text/transform
|
||||
|
|
Loading…
Reference in New Issue