Merge pull request #4897 from thaJeztah/bump_engine

vendor: github.com/docker/docker c70d7905fbd9 (v26.0.0-dev)
This commit is contained in:
Sebastiaan van Stijn 2024-02-26 19:24:28 +01:00 committed by GitHub
commit efb9206433
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
130 changed files with 3481 additions and 1470 deletions

View File

@ -8,11 +8,11 @@ go 1.19
require ( require (
dario.cat/mergo v1.0.0 dario.cat/mergo v1.0.0
github.com/containerd/containerd v1.7.12 github.com/containerd/containerd v1.7.13
github.com/creack/pty v1.1.21 github.com/creack/pty v1.1.21
github.com/distribution/reference v0.5.0 github.com/distribution/reference v0.5.0
github.com/docker/distribution v2.8.3+incompatible github.com/docker/distribution v2.8.3+incompatible
github.com/docker/docker v25.0.1-0.20240209212242-86b86412a1b7+incompatible // master (v26.0.0-dev) github.com/docker/docker v25.0.1-0.20240226093800-c70d7905fbd9+incompatible // master (v26.0.0-dev)
github.com/docker/docker-credential-helpers v0.8.1 github.com/docker/docker-credential-helpers v0.8.1
github.com/docker/go-connections v0.5.0 github.com/docker/go-connections v0.5.0
github.com/docker/go-units v0.5.0 github.com/docker/go-units v0.5.0
@ -57,7 +57,7 @@ require (
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-metrics v0.0.1 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect github.com/golang/protobuf v1.5.3 // indirect
github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/mux v1.8.1 // indirect
@ -68,24 +68,24 @@ require (
github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/sys/symlink v0.2.0 // indirect github.com/moby/sys/symlink v0.2.0 // indirect
github.com/moby/sys/user v0.1.0 // indirect github.com/moby/sys/user v0.1.0 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_golang v1.17.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect
github.com/rivo/uniseg v0.2.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
go.etcd.io/etcd/raft/v3 v3.5.6 // indirect go.etcd.io/etcd/raft/v3 v3.5.6 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
go.opentelemetry.io/otel v1.19.0 // indirect go.opentelemetry.io/otel v1.21.0 // indirect
go.opentelemetry.io/otel/metric v1.19.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect
go.opentelemetry.io/otel/trace v1.19.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect
golang.org/x/crypto v0.17.0 // indirect golang.org/x/crypto v0.17.0 // indirect
golang.org/x/mod v0.14.0 // indirect golang.org/x/mod v0.14.0 // indirect
golang.org/x/net v0.19.0 // indirect golang.org/x/net v0.19.0 // indirect
golang.org/x/time v0.3.0 // indirect golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.16.0 // indirect golang.org/x/tools v0.16.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect
google.golang.org/grpc v1.58.3 // indirect google.golang.org/grpc v1.59.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect google.golang.org/protobuf v1.31.0 // indirect
) )

View File

@ -36,8 +36,8 @@ github.com/cloudflare/cfssl v1.6.4 h1:NMOvfrEjFfC63K3SGXgAnFdsgkmiq4kATme5BfcqrO
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0= github.com/containerd/containerd v1.7.13 h1:wPYKIeGMN8vaggSKuV1X0wZulpMz4CrgEsZdaCyB6Is=
github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk= github.com/containerd/containerd v1.7.13/go.mod h1:zT3up6yTRfEUa6+GsITYIJNgSVL9NQ4x4h1RPzk0Wu4=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
@ -54,8 +54,8 @@ github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v25.0.1-0.20240209212242-86b86412a1b7+incompatible h1:86+mY7JmJekvP67j6fyDrjjlKuMnkgJMQr1gC7b0Bmc= github.com/docker/docker v25.0.1-0.20240226093800-c70d7905fbd9+incompatible h1:f+nMTocctRJkIsKEGp/IDFaOJ0arCVGW+ptwcfkXaLQ=
github.com/docker/docker v25.0.1-0.20240209212242-86b86412a1b7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v25.0.1-0.20240226093800-c70d7905fbd9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo=
github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0= github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
@ -84,8 +84,8 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
@ -102,7 +102,6 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
@ -143,8 +142,8 @@ github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@ -209,26 +208,27 @@ github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06/go
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@ -275,17 +275,17 @@ github.com/zmap/zlint/v3 v3.1.0 h1:WjVytZo79m/L1+/Mlphl09WBob6YTGljN5IGWZFpAv0=
go.etcd.io/etcd/client/pkg/v3 v3.5.6/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= go.etcd.io/etcd/client/pkg/v3 v3.5.6/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ=
go.etcd.io/etcd/raft/v3 v3.5.6 h1:tOmx6Ym6rn2GpZOrvTGJZciJHek6RnC3U/zNInzIN50= go.etcd.io/etcd/raft/v3 v3.5.6 h1:tOmx6Ym6rn2GpZOrvTGJZciJHek6RnC3U/zNInzIN50=
go.etcd.io/etcd/raft/v3 v3.5.6/go.mod h1:wL8kkRGx1Hp8FmZUuHfL3K2/OaGIDaXGr1N7i2G07J0= go.etcd.io/etcd/raft/v3 v3.5.6/go.mod h1:wL8kkRGx1Hp8FmZUuHfL3K2/OaGIDaXGr1N7i2G07J0=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o=
go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
@ -355,13 +355,13 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a h1:fwgW9j3vHirt4ObdHoYNwuO24BEZjSzbh+zPaNWoiY8=
google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw= google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b h1:ZlWIi1wSK56/8hn4QcBp/j9M7Gt3U/3hZw3mC7vDICo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc=
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=

View File

@ -1746,7 +1746,7 @@ definitions:
Created: Created:
description: | description: |
Date and time at which the image was created, formatted in Date and time at which the image was created, formatted in
[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds, or empty if the field was not set in the image config.
type: "string" type: "string"
x-nullable: false x-nullable: false
example: "2022-02-04T21:20:12.497794809Z" example: "2022-02-04T21:20:12.497794809Z"

View File

@ -5,6 +5,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"sync"
"github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/progress"
@ -109,6 +110,7 @@ type progressOutput struct {
sf formatProgress sf formatProgress
out io.Writer out io.Writer
newLines bool newLines bool
mu sync.Mutex
} }
// WriteProgress formats progress information from a ProgressReader. // WriteProgress formats progress information from a ProgressReader.
@ -120,6 +122,9 @@ func (out *progressOutput) WriteProgress(prog progress.Progress) error {
jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total, HideCounts: prog.HideCounts, Units: prog.Units} jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total, HideCounts: prog.HideCounts, Units: prog.Units}
formatted = out.sf.formatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) formatted = out.sf.formatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux)
} }
out.mu.Lock()
defer out.mu.Unlock()
_, err := out.out.Write(formatted) _, err := out.out.Write(formatted)
if err != nil { if err != nil {
return err return err

View File

@ -1,6 +1,7 @@
# A minimal logging API for Go # A minimal logging API for Go
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
logr offers an(other) opinion on how Go programs and libraries can do logging logr offers an(other) opinion on how Go programs and libraries can do logging
without becoming coupled to a particular logging implementation. This is not without becoming coupled to a particular logging implementation. This is not
@ -73,6 +74,29 @@ received:
If the Go standard library had defined an interface for logging, this project If the Go standard library had defined an interface for logging, this project
probably would not be needed. Alas, here we are. probably would not be needed. Alas, here we are.
When the Go developers started developing such an interface with
[slog](https://github.com/golang/go/issues/56345), they adopted some of the
logr design but also left out some parts and changed others:
| Feature | logr | slog |
|---------|------|------|
| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) |
| Low-level API | `LogSink` | `Handler` |
| Stack unwinding | done by `LogSink` | done by `Logger` |
| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) |
| Generating a value for logging on demand | `Marshaler` | `LogValuer` |
| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" |
| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` |
| Passing logger via context | `NewContext`, `FromContext` | no API |
| Adding a name to a logger | `WithName` | no API |
| Modify verbosity of log entries in a call chain | `V` | no API |
| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
The high-level slog API is explicitly meant to be one of many different APIs
that can be layered on top of a shared `slog.Handler`. logr is one such
alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr)
package.
### Inspiration ### Inspiration
Before you consider this package, please read [this blog post by the Before you consider this package, please read [this blog post by the
@ -118,6 +142,91 @@ There are implementations for the following logging libraries:
- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) - **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) - **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
## slog interoperability
Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and
`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`.
As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
slog API. `slogr` itself leaves that to the caller.
## Using a `logr.Sink` as backend for slog
Ideally, a logr sink implementation should support both logr and slog by
implementing both the normal logr interface(s) and `slogr.SlogSink`. Because
of a conflict in the parameters of the common `Enabled` method, it is [not
possible to implement both slog.Handler and logr.Sink in the same
type](https://github.com/golang/go/issues/59110).
If both are supported, log calls can go from the high-level APIs to the backend
without the need to convert parameters. `NewLogr` and `NewSlogHandler` can
convert back and forth without adding additional wrappers, with one exception:
when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future
log calls.
Such an implementation should also support values that implement specific
interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`,
`slog.GroupValue`). logr does not convert those.
Not supporting slog has several drawbacks:
- Recording source code locations works correctly if the handler gets called
through `slog.Logger`, but may be wrong in other cases. That's because a
`logr.Sink` does its own stack unwinding instead of using the program counter
provided by the high-level API.
- slog levels <= 0 can be mapped to logr levels by negating the level without a
loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as
used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink
because logr does not support "more important than info" levels.
- The slog group concept is supported by prefixing each key in a key/value
pair with the group names, separated by a dot. For structured output like
JSON it would be better to group the key/value pairs inside an object.
- Special slog values and interfaces don't work as expected.
- The overhead is likely to be higher.
These drawbacks are severe enough that applications using a mixture of slog and
logr should switch to a different backend.
## Using a `slog.Handler` as backend for logr
Using a plain `slog.Handler` without support for logr works better than the
other direction:
- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
by negating them.
- Stack unwinding is done by the `slogr.SlogSink` and the resulting program
counter is passed to the `slog.Handler`.
- Names added via `Logger.WithName` are gathered and recorded in an additional
attribute with `logger` as key and the names separated by slash as value.
- `Logger.Error` is turned into a log record with `slog.LevelError` as level
and an additional attribute with `err` as key, if an error was provided.
The main drawback is that `logr.Marshaler` will not be supported. Types should
ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
with logr implementations without slog support is not important, then
`slog.Valuer` is sufficient.
## Context support for slog
Storing a logger in a `context.Context` is not supported by
slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this
to fill this gap:
func HandlerFromContext(ctx context.Context) slog.Handler {
logger, err := logr.FromContext(ctx)
if err == nil {
return slogr.NewSlogHandler(logger)
}
return slog.Default().Handler()
}
func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context {
return logr.NewContext(ctx, slogr.NewLogr(handler))
}
The downside is that storing and retrieving a `slog.Handler` needs more
allocations compared to using a `logr.Logger`. Therefore the recommendation is
to use the `logr.Logger` API in code which uses contextual logging.
## FAQ ## FAQ
### Conceptual ### Conceptual
@ -241,7 +350,9 @@ Otherwise, you can start out with `0` as "you always want to see this",
Then gradually choose levels in between as you need them, working your way Then gradually choose levels in between as you need them, working your way
down from 10 (for debug and trace style logs) and up from 1 (for chattier down from 10 (for debug and trace style logs) and up from 1 (for chattier
info-type logs.) info-type logs). For reference, slog pre-defines -4 for debug logs
(corresponds to 4 in logr), which matches what is
[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
#### How do I choose my keys? #### How do I choose my keys?

18
vendor/github.com/go-logr/logr/SECURITY.md generated vendored Normal file
View File

@ -0,0 +1,18 @@
# Security Policy
If you have discovered a security vulnerability in this project, please report it
privately. **Do not disclose it as a public issue.** This gives us time to work with you
to fix the issue before public exposure, reducing the chance that the exploit will be
used before a patch is released.
You may submit the report in the following ways:
- send an email to go-logr-security@googlegroups.com
- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new)
Please provide the following information in your report:
- A description of the vulnerability and its impact
- How to reproduce the issue
We ask that you give us 90 days to work on a fix before public exposure.

View File

@ -116,17 +116,17 @@ type Options struct {
// Equivalent hooks are offered for key-value pairs saved via // Equivalent hooks are offered for key-value pairs saved via
// logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
// for user-provided pairs (see RenderArgsHook). // for user-provided pairs (see RenderArgsHook).
RenderBuiltinsHook func(kvList []interface{}) []interface{} RenderBuiltinsHook func(kvList []any) []any
// RenderValuesHook is the same as RenderBuiltinsHook, except that it is // RenderValuesHook is the same as RenderBuiltinsHook, except that it is
// only called for key-value pairs saved via logr.Logger.WithValues. See // only called for key-value pairs saved via logr.Logger.WithValues. See
// RenderBuiltinsHook for more details. // RenderBuiltinsHook for more details.
RenderValuesHook func(kvList []interface{}) []interface{} RenderValuesHook func(kvList []any) []any
// RenderArgsHook is the same as RenderBuiltinsHook, except that it is only // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
// called for key-value pairs passed directly to Info and Error. See // called for key-value pairs passed directly to Info and Error. See
// RenderBuiltinsHook for more details. // RenderBuiltinsHook for more details.
RenderArgsHook func(kvList []interface{}) []interface{} RenderArgsHook func(kvList []any) []any
// MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
// that contains a struct, etc.) it may log. Every time it finds a struct, // that contains a struct, etc.) it may log. Every time it finds a struct,
@ -163,7 +163,7 @@ func (l fnlogger) WithName(name string) logr.LogSink {
return &l return &l
} }
func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { func (l fnlogger) WithValues(kvList ...any) logr.LogSink {
l.Formatter.AddValues(kvList) l.Formatter.AddValues(kvList)
return &l return &l
} }
@ -173,12 +173,12 @@ func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
return &l return &l
} }
func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { func (l fnlogger) Info(level int, msg string, kvList ...any) {
prefix, args := l.FormatInfo(level, msg, kvList) prefix, args := l.FormatInfo(level, msg, kvList)
l.write(prefix, args) l.write(prefix, args)
} }
func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { func (l fnlogger) Error(err error, msg string, kvList ...any) {
prefix, args := l.FormatError(err, msg, kvList) prefix, args := l.FormatError(err, msg, kvList)
l.write(prefix, args) l.write(prefix, args)
} }
@ -229,7 +229,7 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
type Formatter struct { type Formatter struct {
outputFormat outputFormat outputFormat outputFormat
prefix string prefix string
values []interface{} values []any
valuesStr string valuesStr string
depth int depth int
opts *Options opts *Options
@ -246,10 +246,10 @@ const (
) )
// PseudoStruct is a list of key-value pairs that gets logged as a struct. // PseudoStruct is a list of key-value pairs that gets logged as a struct.
type PseudoStruct []interface{} type PseudoStruct []any
// render produces a log line, ready to use. // render produces a log line, ready to use.
func (f Formatter) render(builtins, args []interface{}) string { func (f Formatter) render(builtins, args []any) string {
// Empirically bytes.Buffer is faster than strings.Builder for this. // Empirically bytes.Buffer is faster than strings.Builder for this.
buf := bytes.NewBuffer(make([]byte, 0, 1024)) buf := bytes.NewBuffer(make([]byte, 0, 1024))
if f.outputFormat == outputJSON { if f.outputFormat == outputJSON {
@ -292,7 +292,7 @@ func (f Formatter) render(builtins, args []interface{}) string {
// This function returns a potentially modified version of kvList, which // This function returns a potentially modified version of kvList, which
// ensures that there is a value for every key (adding a value if needed) and // ensures that there is a value for every key (adding a value if needed) and
// that each key is a string (substituting a key if needed). // that each key is a string (substituting a key if needed).
func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} { func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any {
// This logic overlaps with sanitize() but saves one type-cast per key, // This logic overlaps with sanitize() but saves one type-cast per key,
// which can be measurable. // which can be measurable.
if len(kvList)%2 != 0 { if len(kvList)%2 != 0 {
@ -334,7 +334,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing b
return kvList return kvList
} }
func (f Formatter) pretty(value interface{}) string { func (f Formatter) pretty(value any) string {
return f.prettyWithFlags(value, 0, 0) return f.prettyWithFlags(value, 0, 0)
} }
@ -343,7 +343,7 @@ const (
) )
// TODO: This is not fast. Most of the overhead goes here. // TODO: This is not fast. Most of the overhead goes here.
func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
if depth > f.opts.MaxLogDepth { if depth > f.opts.MaxLogDepth {
return `"<max-log-depth-exceeded>"` return `"<max-log-depth-exceeded>"`
} }
@ -614,7 +614,7 @@ func isEmpty(v reflect.Value) bool {
return false return false
} }
func invokeMarshaler(m logr.Marshaler) (ret interface{}) { func invokeMarshaler(m logr.Marshaler) (ret any) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
ret = fmt.Sprintf("<panic: %s>", r) ret = fmt.Sprintf("<panic: %s>", r)
@ -675,12 +675,12 @@ func (f Formatter) caller() Caller {
const noValue = "<no-value>" const noValue = "<no-value>"
func (f Formatter) nonStringKey(v interface{}) string { func (f Formatter) nonStringKey(v any) string {
return fmt.Sprintf("<non-string-key: %s>", f.snippet(v)) return fmt.Sprintf("<non-string-key: %s>", f.snippet(v))
} }
// snippet produces a short snippet string of an arbitrary value. // snippet produces a short snippet string of an arbitrary value.
func (f Formatter) snippet(v interface{}) string { func (f Formatter) snippet(v any) string {
const snipLen = 16 const snipLen = 16
snip := f.pretty(v) snip := f.pretty(v)
@ -693,7 +693,7 @@ func (f Formatter) snippet(v interface{}) string {
// sanitize ensures that a list of key-value pairs has a value for every key // sanitize ensures that a list of key-value pairs has a value for every key
// (adding a value if needed) and that each key is a string (substituting a key // (adding a value if needed) and that each key is a string (substituting a key
// if needed). // if needed).
func (f Formatter) sanitize(kvList []interface{}) []interface{} { func (f Formatter) sanitize(kvList []any) []any {
if len(kvList)%2 != 0 { if len(kvList)%2 != 0 {
kvList = append(kvList, noValue) kvList = append(kvList, noValue)
} }
@ -727,8 +727,8 @@ func (f Formatter) GetDepth() int {
// FormatInfo renders an Info log message into strings. The prefix will be // FormatInfo renders an Info log message into strings. The prefix will be
// empty when no names were set (via AddNames), or when the output is // empty when no names were set (via AddNames), or when the output is
// configured for JSON. // configured for JSON.
func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) {
args := make([]interface{}, 0, 64) // using a constant here impacts perf args := make([]any, 0, 64) // using a constant here impacts perf
prefix = f.prefix prefix = f.prefix
if f.outputFormat == outputJSON { if f.outputFormat == outputJSON {
args = append(args, "logger", prefix) args = append(args, "logger", prefix)
@ -745,10 +745,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (pref
} }
// FormatError renders an Error log message into strings. The prefix will be // FormatError renders an Error log message into strings. The prefix will be
// empty when no names were set (via AddNames), or when the output is // empty when no names were set (via AddNames), or when the output is
// configured for JSON. // configured for JSON.
func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) {
args := make([]interface{}, 0, 64) // using a constant here impacts perf args := make([]any, 0, 64) // using a constant here impacts perf
prefix = f.prefix prefix = f.prefix
if f.outputFormat == outputJSON { if f.outputFormat == outputJSON {
args = append(args, "logger", prefix) args = append(args, "logger", prefix)
@ -761,12 +761,12 @@ func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (pre
args = append(args, "caller", f.caller()) args = append(args, "caller", f.caller())
} }
args = append(args, "msg", msg) args = append(args, "msg", msg)
var loggableErr interface{} var loggableErr any
if err != nil { if err != nil {
loggableErr = err.Error() loggableErr = err.Error()
} }
args = append(args, "error", loggableErr) args = append(args, "error", loggableErr)
return f.prefix, f.render(args, kvList) return prefix, f.render(args, kvList)
} }
// AddName appends the specified name. funcr uses '/' characters to separate // AddName appends the specified name. funcr uses '/' characters to separate
@ -781,7 +781,7 @@ func (f *Formatter) AddName(name string) {
// AddValues adds key-value pairs to the set of saved values to be logged with // AddValues adds key-value pairs to the set of saved values to be logged with
// each log line. // each log line.
func (f *Formatter) AddValues(kvList []interface{}) { func (f *Formatter) AddValues(kvList []any) {
// Three slice args forces a copy. // Three slice args forces a copy.
n := len(f.values) n := len(f.values)
f.values = append(f.values[:n:n], kvList...) f.values = append(f.values[:n:n], kvList...)

View File

@ -127,9 +127,9 @@ limitations under the License.
// such a value can call its methods without having to check whether the // such a value can call its methods without having to check whether the
// instance is ready for use. // instance is ready for use.
// //
// Calling methods with the null logger (Logger{}) as instance will crash // The zero logger (= Logger{}) is identical to Discard() and discards all log
// because it has no LogSink. Therefore this null logger should never be passed // entries. Code that receives a Logger by value can simply call it, the methods
// around. For cases where passing a logger is optional, a pointer to Logger // will never crash. For cases where passing a logger is optional, a pointer to Logger
// should be used. // should be used.
// //
// # Key Naming Conventions // # Key Naming Conventions
@ -258,6 +258,12 @@ type Logger struct {
// Enabled tests whether this Logger is enabled. For example, commandline // Enabled tests whether this Logger is enabled. For example, commandline
// flags might be used to set the logging verbosity and disable some info logs. // flags might be used to set the logging verbosity and disable some info logs.
func (l Logger) Enabled() bool { func (l Logger) Enabled() bool {
// Some implementations of LogSink look at the caller in Enabled (e.g.
// different verbosity levels per package or file), but we only pass one
// CallDepth in (via Init). This means that all calls from Logger to the
// LogSink's Enabled, Info, and Error methods must have the same number of
// frames. In other words, Logger methods can't call other Logger methods
// which call these LogSink methods unless we do it the same in all paths.
return l.sink != nil && l.sink.Enabled(l.level) return l.sink != nil && l.sink.Enabled(l.level)
} }
@ -267,11 +273,11 @@ func (l Logger) Enabled() bool {
// line. The key/value pairs can then be used to add additional variable // line. The key/value pairs can then be used to add additional variable
// information. The key/value pairs must alternate string keys and arbitrary // information. The key/value pairs must alternate string keys and arbitrary
// values. // values.
func (l Logger) Info(msg string, keysAndValues ...interface{}) { func (l Logger) Info(msg string, keysAndValues ...any) {
if l.sink == nil { if l.sink == nil {
return return
} }
if l.Enabled() { if l.sink.Enabled(l.level) { // see comment in Enabled
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
withHelper.GetCallStackHelper()() withHelper.GetCallStackHelper()()
} }
@ -289,7 +295,7 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) {
// while the err argument should be used to attach the actual error that // while the err argument should be used to attach the actual error that
// triggered this log line, if present. The err parameter is optional // triggered this log line, if present. The err parameter is optional
// and nil may be passed instead of an error instance. // and nil may be passed instead of an error instance.
func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { func (l Logger) Error(err error, msg string, keysAndValues ...any) {
if l.sink == nil { if l.sink == nil {
return return
} }
@ -314,9 +320,16 @@ func (l Logger) V(level int) Logger {
return l return l
} }
// GetV returns the verbosity level of the logger. If the logger's LogSink is
// nil as in the Discard logger, this will always return 0.
func (l Logger) GetV() int {
// 0 if l.sink nil because of the if check in V above.
return l.level
}
// WithValues returns a new Logger instance with additional key/value pairs. // WithValues returns a new Logger instance with additional key/value pairs.
// See Info for documentation on how key/value pairs work. // See Info for documentation on how key/value pairs work.
func (l Logger) WithValues(keysAndValues ...interface{}) Logger { func (l Logger) WithValues(keysAndValues ...any) Logger {
if l.sink == nil { if l.sink == nil {
return l return l
} }
@ -467,15 +480,15 @@ type LogSink interface {
// The level argument is provided for optional logging. This method will // The level argument is provided for optional logging. This method will
// only be called when Enabled(level) is true. See Logger.Info for more // only be called when Enabled(level) is true. See Logger.Info for more
// details. // details.
Info(level int, msg string, keysAndValues ...interface{}) Info(level int, msg string, keysAndValues ...any)
// Error logs an error, with the given message and key/value pairs as // Error logs an error, with the given message and key/value pairs as
// context. See Logger.Error for more details. // context. See Logger.Error for more details.
Error(err error, msg string, keysAndValues ...interface{}) Error(err error, msg string, keysAndValues ...any)
// WithValues returns a new LogSink with additional key/value pairs. See // WithValues returns a new LogSink with additional key/value pairs. See
// Logger.WithValues for more details. // Logger.WithValues for more details.
WithValues(keysAndValues ...interface{}) LogSink WithValues(keysAndValues ...any) LogSink
// WithName returns a new LogSink with the specified name appended. See // WithName returns a new LogSink with the specified name appended. See
// Logger.WithName for more details. // Logger.WithName for more details.
@ -546,5 +559,5 @@ type Marshaler interface {
// with exported fields // with exported fields
// //
// It may return any value of any type. // It may return any value of any type.
MarshalLog() interface{} MarshalLog() any
} }

View File

@ -20,6 +20,7 @@ import (
"time" "time"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/types/known/timestamppb"
) )
// Counter is a Metric that represents a single numerical value that only ever // Counter is a Metric that represents a single numerical value that only ever
@ -59,6 +60,18 @@ type ExemplarAdder interface {
// CounterOpts is an alias for Opts. See there for doc comments. // CounterOpts is an alias for Opts. See there for doc comments.
type CounterOpts Opts type CounterOpts Opts
// CounterVecOpts bundles the options to create a CounterVec metric.
// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels
// is optional and can safely be left to its default value.
type CounterVecOpts struct {
CounterOpts
// VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Constraint
// function, if provided.
VariableLabels ConstrainableLabels
}
// NewCounter creates a new Counter based on the provided CounterOpts. // NewCounter creates a new Counter based on the provided CounterOpts.
// //
// The returned implementation also implements ExemplarAdder. It is safe to // The returned implementation also implements ExemplarAdder. It is safe to
@ -78,8 +91,12 @@ func NewCounter(opts CounterOpts) Counter {
nil, nil,
opts.ConstLabels, opts.ConstLabels,
) )
result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now} if opts.now == nil {
opts.now = time.Now
}
result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now}
result.init(result) // Init self-collection. result.init(result) // Init self-collection.
result.createdTs = timestamppb.New(opts.now())
return result return result
} }
@ -94,10 +111,12 @@ type counter struct {
selfCollector selfCollector
desc *Desc desc *Desc
createdTs *timestamppb.Timestamp
labelPairs []*dto.LabelPair labelPairs []*dto.LabelPair
exemplar atomic.Value // Containing nil or a *dto.Exemplar. exemplar atomic.Value // Containing nil or a *dto.Exemplar.
now func() time.Time // To mock out time.Now() for testing. // now is for testing purposes, by default it's time.Now.
now func() time.Time
} }
func (c *counter) Desc() *Desc { func (c *counter) Desc() *Desc {
@ -147,8 +166,7 @@ func (c *counter) Write(out *dto.Metric) error {
exemplar = e.(*dto.Exemplar) exemplar = e.(*dto.Exemplar)
} }
val := c.get() val := c.get()
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs)
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
} }
func (c *counter) updateExemplar(v float64, l Labels) { func (c *counter) updateExemplar(v float64, l Labels) {
@ -174,19 +192,31 @@ type CounterVec struct {
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and // NewCounterVec creates a new CounterVec based on the provided CounterOpts and
// partitioned by the given label names. // partitioned by the given label names.
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
desc := NewDesc( return V2.NewCounterVec(CounterVecOpts{
CounterOpts: opts,
VariableLabels: UnconstrainedLabels(labelNames),
})
}
// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts.
func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help, opts.Help,
labelNames, opts.VariableLabels,
opts.ConstLabels, opts.ConstLabels,
) )
if opts.now == nil {
opts.now = time.Now
}
return &CounterVec{ return &CounterVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) { if len(lvs) != len(desc.variableLabels.names) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
} }
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now} result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now}
result.init(result) // Init self-collection. result.init(result) // Init self-collection.
result.createdTs = timestamppb.New(opts.now())
return result return result
}), }),
} }

View File

@ -14,20 +14,16 @@
package prometheus package prometheus
import ( import (
"errors"
"fmt" "fmt"
"sort" "sort"
"strings" "strings"
"github.com/cespare/xxhash/v2" "github.com/cespare/xxhash/v2"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
"google.golang.org/protobuf/proto"
"github.com/prometheus/client_golang/prometheus/internal" "github.com/prometheus/client_golang/prometheus/internal"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
) )
// Desc is the descriptor used by every Prometheus Metric. It is essentially // Desc is the descriptor used by every Prometheus Metric. It is essentially
@ -54,9 +50,9 @@ type Desc struct {
// constLabelPairs contains precalculated DTO label pairs based on // constLabelPairs contains precalculated DTO label pairs based on
// the constant labels. // the constant labels.
constLabelPairs []*dto.LabelPair constLabelPairs []*dto.LabelPair
// variableLabels contains names of labels for which the metric // variableLabels contains names of labels and normalization function for
// maintains variable values. // which the metric maintains variable values.
variableLabels []string variableLabels *compiledLabels
// id is a hash of the values of the ConstLabels and fqName. This // id is a hash of the values of the ConstLabels and fqName. This
// must be unique among all registered descriptors and can therefore be // must be unique among all registered descriptors and can therefore be
// used as an identifier of the descriptor. // used as an identifier of the descriptor.
@ -80,10 +76,24 @@ type Desc struct {
// For constLabels, the label values are constant. Therefore, they are fully // For constLabels, the label values are constant. Therefore, they are fully
// specified in the Desc. See the Collector example for a usage pattern. // specified in the Desc. See the Collector example for a usage pattern.
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels)
}
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
// and will be reported on registration time. variableLabels and constLabels can
// be nil if no such labels should be set. fqName must not be empty.
//
// variableLabels only contain the label names and normalization functions. Their
// label values are variable and therefore not part of the Desc. (They are managed
// within the Metric.)
//
// For constLabels, the label values are constant. Therefore, they are fully
// specified in the Desc. See the Collector example for a usage pattern.
func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc {
d := &Desc{ d := &Desc{
fqName: fqName, fqName: fqName,
help: help, help: help,
variableLabels: variableLabels, variableLabels: variableLabels.compile(),
} }
if !model.IsValidMetricName(model.LabelValue(fqName)) { if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName) d.err = fmt.Errorf("%q is not a valid metric name", fqName)
@ -93,7 +103,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
// their sorted label names) plus the fqName (at position 0). // their sorted label names) plus the fqName (at position 0).
labelValues := make([]string, 1, len(constLabels)+1) labelValues := make([]string, 1, len(constLabels)+1)
labelValues[0] = fqName labelValues[0] = fqName
labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names))
labelNameSet := map[string]struct{}{} labelNameSet := map[string]struct{}{}
// First add only the const label names and sort them... // First add only the const label names and sort them...
for labelName := range constLabels { for labelName := range constLabels {
@ -118,16 +128,16 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
// Now add the variable label names, but prefix them with something that // Now add the variable label names, but prefix them with something that
// cannot be in a regular label name. That prevents matching the label // cannot be in a regular label name. That prevents matching the label
// dimension with a different mix between preset and variable labels. // dimension with a different mix between preset and variable labels.
for _, labelName := range variableLabels { for _, label := range d.variableLabels.names {
if !checkLabelName(labelName) { if !checkLabelName(label) {
d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName)
return d return d
} }
labelNames = append(labelNames, "$"+labelName) labelNames = append(labelNames, "$"+label)
labelNameSet[labelName] = struct{}{} labelNameSet[label] = struct{}{}
} }
if len(labelNames) != len(labelNameSet) { if len(labelNames) != len(labelNameSet) {
d.err = errors.New("duplicate label names") d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
return d return d
} }
@ -179,11 +189,19 @@ func (d *Desc) String() string {
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
) )
} }
vlStrings := make([]string, 0, len(d.variableLabels.names))
for _, vl := range d.variableLabels.names {
if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
} else {
vlStrings = append(vlStrings, vl)
}
}
return fmt.Sprintf( return fmt.Sprintf(
"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}",
d.fqName, d.fqName,
d.help, d.help,
strings.Join(lpStrings, ","), strings.Join(lpStrings, ","),
d.variableLabels, strings.Join(vlStrings, ","),
) )
} }

View File

@ -37,35 +37,35 @@
// //
// type metrics struct { // type metrics struct {
// cpuTemp prometheus.Gauge // cpuTemp prometheus.Gauge
// hdFailures *prometheus.CounterVec // hdFailures *prometheus.CounterVec
// } // }
// //
// func NewMetrics(reg prometheus.Registerer) *metrics { // func NewMetrics(reg prometheus.Registerer) *metrics {
// m := &metrics{ // m := &metrics{
// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ // cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
// Name: "cpu_temperature_celsius", // Name: "cpu_temperature_celsius",
// Help: "Current temperature of the CPU.", // Help: "Current temperature of the CPU.",
// }), // }),
// hdFailures: prometheus.NewCounterVec( // hdFailures: prometheus.NewCounterVec(
// prometheus.CounterOpts{ // prometheus.CounterOpts{
// Name: "hd_errors_total", // Name: "hd_errors_total",
// Help: "Number of hard-disk errors.", // Help: "Number of hard-disk errors.",
// }, // },
// []string{"device"}, // []string{"device"},
// ), // ),
// } // }
// reg.MustRegister(m.cpuTemp) // reg.MustRegister(m.cpuTemp)
// reg.MustRegister(m.hdFailures) // reg.MustRegister(m.hdFailures)
// return m // return m
// } // }
// //
// func main() { // func main() {
// // Create a non-global registry. // // Create a non-global registry.
// reg := prometheus.NewRegistry() // reg := prometheus.NewRegistry()
// //
// // Create new metrics and register them using the custom registry. // // Create new metrics and register them using the custom registry.
// m := NewMetrics(reg) // m := NewMetrics(reg)
// // Set values for the new created metrics. // // Set values for the new created metrics.
// m.cpuTemp.Set(65.3) // m.cpuTemp.Set(65.3)
// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() // m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
// //

View File

@ -48,7 +48,7 @@ func (e *expvarCollector) Collect(ch chan<- Metric) {
continue continue
} }
var v interface{} var v interface{}
labels := make([]string, len(desc.variableLabels)) labels := make([]string, len(desc.variableLabels.names))
if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
ch <- NewInvalidMetric(desc, err) ch <- NewInvalidMetric(desc, err)
continue continue

View File

@ -55,6 +55,18 @@ type Gauge interface {
// GaugeOpts is an alias for Opts. See there for doc comments. // GaugeOpts is an alias for Opts. See there for doc comments.
type GaugeOpts Opts type GaugeOpts Opts
// GaugeVecOpts bundles the options to create a GaugeVec metric.
// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels
// is optional and can safely be left to its default value.
type GaugeVecOpts struct {
GaugeOpts
// VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Constraint
// function, if provided.
VariableLabels ConstrainableLabels
}
// NewGauge creates a new Gauge based on the provided GaugeOpts. // NewGauge creates a new Gauge based on the provided GaugeOpts.
// //
// The returned implementation is optimized for a fast Set method. If you have a // The returned implementation is optimized for a fast Set method. If you have a
@ -123,7 +135,7 @@ func (g *gauge) Sub(val float64) {
func (g *gauge) Write(out *dto.Metric) error { func (g *gauge) Write(out *dto.Metric) error {
val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
return populateMetric(GaugeValue, val, g.labelPairs, nil, out) return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil)
} }
// GaugeVec is a Collector that bundles a set of Gauges that all share the same // GaugeVec is a Collector that bundles a set of Gauges that all share the same
@ -138,16 +150,24 @@ type GaugeVec struct {
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
// partitioned by the given label names. // partitioned by the given label names.
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
desc := NewDesc( return V2.NewGaugeVec(GaugeVecOpts{
GaugeOpts: opts,
VariableLabels: UnconstrainedLabels(labelNames),
})
}
// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts.
func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help, opts.Help,
labelNames, opts.VariableLabels,
opts.ConstLabels, opts.ConstLabels,
) )
return &GaugeVec{ return &GaugeVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) { if len(lvs) != len(desc.variableLabels.names) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
} }
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection. result.init(result) // Init self-collection.

View File

@ -23,11 +23,10 @@ import (
"strings" "strings"
"sync" "sync"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus/internal" "github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/proto"
) )
const ( const (

View File

@ -22,10 +22,10 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
) )
// nativeHistogramBounds for the frac of observed values. Only relevant for // nativeHistogramBounds for the frac of observed values. Only relevant for
@ -392,7 +392,7 @@ type HistogramOpts struct {
// zero, it is replaced by default buckets. The default buckets are // zero, it is replaced by default buckets. The default buckets are
// DefBuckets if no buckets for a native histogram (see below) are used, // DefBuckets if no buckets for a native histogram (see below) are used,
// otherwise the default is no buckets. (In other words, if you want to // otherwise the default is no buckets. (In other words, if you want to
// use both reguler buckets and buckets for a native histogram, you have // use both regular buckets and buckets for a native histogram, you have
// to define the regular buckets here explicitly.) // to define the regular buckets here explicitly.)
Buckets []float64 Buckets []float64
@ -402,7 +402,7 @@ type HistogramOpts struct {
// Histogram by a Prometheus server with that feature enabled (requires // Histogram by a Prometheus server with that feature enabled (requires
// Prometheus v2.40+). Sparse buckets are exponential buckets covering // Prometheus v2.40+). Sparse buckets are exponential buckets covering
// the whole float64 range (with the exception of the “zero” bucket, see // the whole float64 range (with the exception of the “zero” bucket, see
// SparseBucketsZeroThreshold below). From any one bucket to the next, // NativeHistogramZeroThreshold below). From any one bucket to the next,
// the width of the bucket grows by a constant // the width of the bucket grows by a constant
// factor. NativeHistogramBucketFactor provides an upper bound for this // factor. NativeHistogramBucketFactor provides an upper bound for this
// factor (exception see below). The smaller // factor (exception see below). The smaller
@ -414,8 +414,8 @@ type HistogramOpts struct {
// and 2, same as between 2 and 4, and 4 and 8, etc.). // and 2, same as between 2 and 4, and 4 and 8, etc.).
// //
// Details about the actually used factor: The factor is calculated as // Details about the actually used factor: The factor is calculated as
// 2^(2^n), where n is an integer number between (and including) -8 and // 2^(2^-n), where n is an integer number between (and including) -4 and
// 4. n is chosen so that the resulting factor is the largest that is // 8. n is chosen so that the resulting factor is the largest that is
// still smaller or equal to NativeHistogramBucketFactor. Note that the // still smaller or equal to NativeHistogramBucketFactor. Note that the
// smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8)
// ). If NativeHistogramBucketFactor is greater than 1 but smaller than // ). If NativeHistogramBucketFactor is greater than 1 but smaller than
@ -429,12 +429,12 @@ type HistogramOpts struct {
// a major version bump. // a major version bump.
NativeHistogramBucketFactor float64 NativeHistogramBucketFactor float64
// All observations with an absolute value of less or equal // All observations with an absolute value of less or equal
// NativeHistogramZeroThreshold are accumulated into a “zero” // NativeHistogramZeroThreshold are accumulated into a “zero” bucket.
// bucket. For best results, this should be close to a bucket // For best results, this should be close to a bucket boundary. This is
// boundary. This is usually the case if picking a power of two. If // usually the case if picking a power of two. If
// NativeHistogramZeroThreshold is left at zero, // NativeHistogramZeroThreshold is left at zero,
// DefSparseBucketsZeroThreshold is used as the threshold. To configure // DefNativeHistogramZeroThreshold is used as the threshold. To
// a zero bucket with an actual threshold of zero (i.e. only // configure a zero bucket with an actual threshold of zero (i.e. only
// observations of precisely zero will go into the zero bucket), set // observations of precisely zero will go into the zero bucket), set
// NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
// constant (or any negative float value). // constant (or any negative float value).
@ -447,26 +447,46 @@ type HistogramOpts struct {
// Histogram are sufficiently wide-spread. In particular, this could be // Histogram are sufficiently wide-spread. In particular, this could be
// used as a DoS attack vector. Where the observed values depend on // used as a DoS attack vector. Where the observed values depend on
// external inputs, it is highly recommended to set a // external inputs, it is highly recommended to set a
// NativeHistogramMaxBucketNumber.) Once the set // NativeHistogramMaxBucketNumber.) Once the set
// NativeHistogramMaxBucketNumber is exceeded, the following strategy is // NativeHistogramMaxBucketNumber is exceeded, the following strategy is
// enacted: First, if the last reset (or the creation) of the histogram // enacted:
// is at least NativeHistogramMinResetDuration ago, then the whole // - First, if the last reset (or the creation) of the histogram is at
// histogram is reset to its initial state (including regular // least NativeHistogramMinResetDuration ago, then the whole
// buckets). If less time has passed, or if // histogram is reset to its initial state (including regular
// NativeHistogramMinResetDuration is zero, no reset is // buckets).
// performed. Instead, the zero threshold is increased sufficiently to // - If less time has passed, or if NativeHistogramMinResetDuration is
// reduce the number of buckets to or below // zero, no reset is performed. Instead, the zero threshold is
// NativeHistogramMaxBucketNumber, but not to more than // increased sufficiently to reduce the number of buckets to or below
// NativeHistogramMaxZeroThreshold. Thus, if // NativeHistogramMaxBucketNumber, but not to more than
// NativeHistogramMaxZeroThreshold is already at or below the current // NativeHistogramMaxZeroThreshold. Thus, if
// zero threshold, nothing happens at this step. After that, if the // NativeHistogramMaxZeroThreshold is already at or below the current
// number of buckets still exceeds NativeHistogramMaxBucketNumber, the // zero threshold, nothing happens at this step.
// resolution of the histogram is reduced by doubling the width of the // - After that, if the number of buckets still exceeds
// sparse buckets (up to a growth factor between one bucket to the next // NativeHistogramMaxBucketNumber, the resolution of the histogram is
// of 2^(2^4) = 65536, see above). // reduced by doubling the width of the sparse buckets (up to a
// growth factor between one bucket to the next of 2^(2^4) = 65536,
// see above).
// - Any increased zero threshold or reduced resolution is reset back
// to their original values once NativeHistogramMinResetDuration has
// passed (since the last reset or the creation of the histogram).
NativeHistogramMaxBucketNumber uint32 NativeHistogramMaxBucketNumber uint32
NativeHistogramMinResetDuration time.Duration NativeHistogramMinResetDuration time.Duration
NativeHistogramMaxZeroThreshold float64 NativeHistogramMaxZeroThreshold float64
// now is for testing purposes, by default it's time.Now.
now func() time.Time
}
// HistogramVecOpts bundles the options to create a HistogramVec metric.
// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels
// is optional and can safely be left to its default value.
type HistogramVecOpts struct {
HistogramOpts
// VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Constraint
// function, if provided.
VariableLabels ConstrainableLabels
} }
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It // NewHistogram creates a new Histogram based on the provided HistogramOpts. It
@ -488,11 +508,11 @@ func NewHistogram(opts HistogramOpts) Histogram {
} }
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
if len(desc.variableLabels) != len(labelValues) { if len(desc.variableLabels.names) != len(labelValues) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
} }
for _, n := range desc.variableLabels { for _, n := range desc.variableLabels.names {
if n == bucketLabel { if n == bucketLabel {
panic(errBucketLabelNotAllowed) panic(errBucketLabelNotAllowed)
} }
@ -503,6 +523,10 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
} }
} }
if opts.now == nil {
opts.now = time.Now
}
h := &histogram{ h := &histogram{
desc: desc, desc: desc,
upperBounds: opts.Buckets, upperBounds: opts.Buckets,
@ -510,8 +534,8 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber, nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber,
nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold, nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold,
nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
lastResetTime: time.Now(), lastResetTime: opts.now(),
now: time.Now, now: opts.now,
} }
if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
h.upperBounds = DefBuckets h.upperBounds = DefBuckets
@ -544,16 +568,12 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
} }
// Finally we know the final length of h.upperBounds and can make buckets // Finally we know the final length of h.upperBounds and can make buckets
// for both counts as well as exemplars: // for both counts as well as exemplars:
h.counts[0] = &histogramCounts{ h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
buckets: make([]uint64, len(h.upperBounds)), atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema)
nativeHistogramSchema: h.nativeHistogramSchema, h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
} atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
h.counts[1] = &histogramCounts{ atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema)
buckets: make([]uint64, len(h.upperBounds)),
nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold),
nativeHistogramSchema: h.nativeHistogramSchema,
}
h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
h.init(h) // Init self-collection. h.init(h) // Init self-collection.
@ -632,8 +652,8 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
if frac == 0.5 { if frac == 0.5 {
key-- key--
} }
div := 1 << -schema offset := (1 << -schema) - 1
key = (key + div - 1) / div key = (key + offset) >> -schema
} }
if isInf { if isInf {
key++ key++
@ -694,9 +714,11 @@ type histogram struct {
nativeHistogramMaxZeroThreshold float64 nativeHistogramMaxZeroThreshold float64
nativeHistogramMaxBuckets uint32 nativeHistogramMaxBuckets uint32
nativeHistogramMinResetDuration time.Duration nativeHistogramMinResetDuration time.Duration
lastResetTime time.Time // Protected by mtx. // lastResetTime is protected by mtx. It is also used as created timestamp.
lastResetTime time.Time
now func() time.Time // To mock out time.Now() for testing. // now is for testing purposes, by default it's time.Now.
now func() time.Time
} }
func (h *histogram) Desc() *Desc { func (h *histogram) Desc() *Desc {
@ -735,9 +757,10 @@ func (h *histogram) Write(out *dto.Metric) error {
waitForCooldown(count, coldCounts) waitForCooldown(count, coldCounts)
his := &dto.Histogram{ his := &dto.Histogram{
Bucket: make([]*dto.Bucket, len(h.upperBounds)), Bucket: make([]*dto.Bucket, len(h.upperBounds)),
SampleCount: proto.Uint64(count), SampleCount: proto.Uint64(count),
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
CreatedTimestamp: timestamppb.New(h.lastResetTime),
} }
out.Histogram = his out.Histogram = his
out.Label = h.labelPairs out.Label = h.labelPairs
@ -775,6 +798,16 @@ func (h *histogram) Write(out *dto.Metric) error {
his.ZeroCount = proto.Uint64(zeroBucket) his.ZeroCount = proto.Uint64(zeroBucket)
his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative) his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative)
his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive) his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive)
// Add a no-op span to a histogram without observations and with
// a zero threshold of zero. Otherwise, a native histogram would
// look like a classic histogram to scrapers.
if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 {
his.PositiveSpan = []*dto.BucketSpan{{
Offset: proto.Int32(0),
Length: proto.Uint32(0),
}}
}
} }
addAndResetCounts(hotCounts, coldCounts) addAndResetCounts(hotCounts, coldCounts)
return nil return nil
@ -810,7 +843,7 @@ func (h *histogram) observe(v float64, bucket int) {
} }
} }
// limitSparsebuckets applies a strategy to limit the number of populated sparse // limitBuckets applies a strategy to limit the number of populated sparse
// buckets. It's generally best effort, and there are situations where the // buckets. It's generally best effort, and there are situations where the
// number can go higher (if even the lowest resolution isn't enough to reduce // number can go higher (if even the lowest resolution isn't enough to reduce
// the number sufficiently, or if the provided counts aren't fully updated yet // the number sufficiently, or if the provided counts aren't fully updated yet
@ -847,20 +880,23 @@ func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket
h.doubleBucketWidth(hotCounts, coldCounts) h.doubleBucketWidth(hotCounts, coldCounts)
} }
// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration // maybeReset resets the whole histogram if at least h.nativeHistogramMinResetDuration
// has been passed. It returns true if the histogram has been reset. The caller // has been passed. It returns true if the histogram has been reset. The caller
// must have locked h.mtx. // must have locked h.mtx.
func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool { func (h *histogram) maybeReset(
hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int,
) bool {
// We are using the possibly mocked h.now() rather than // We are using the possibly mocked h.now() rather than
// time.Since(h.lastResetTime) to enable testing. // time.Since(h.lastResetTime) to enable testing.
if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { if h.nativeHistogramMinResetDuration == 0 ||
h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
return false return false
} }
// Completely reset coldCounts. // Completely reset coldCounts.
h.resetCounts(cold) h.resetCounts(cold)
// Repeat the latest observation to not lose it completely. // Repeat the latest observation to not lose it completely.
cold.observe(value, bucket, true) cold.observe(value, bucket, true)
// Make coldCounts the new hot counts while ressetting countAndHotIdx. // Make coldCounts the new hot counts while resetting countAndHotIdx.
n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1)
count := n & ((1 << 63) - 1) count := n & ((1 << 63) - 1)
waitForCooldown(count, hot) waitForCooldown(count, hot)
@ -1034,15 +1070,23 @@ type HistogramVec struct {
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
// partitioned by the given label names. // partitioned by the given label names.
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
desc := NewDesc( return V2.NewHistogramVec(HistogramVecOpts{
HistogramOpts: opts,
VariableLabels: UnconstrainedLabels(labelNames),
})
}
// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts.
func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec {
desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help, opts.Help,
labelNames, opts.VariableLabels,
opts.ConstLabels, opts.ConstLabels,
) )
return &HistogramVec{ return &HistogramVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...) return newHistogram(desc, opts.HistogramOpts, lvs...)
}), }),
} }
} }
@ -1161,6 +1205,7 @@ type constHistogram struct {
sum float64 sum float64
buckets map[float64]uint64 buckets map[float64]uint64
labelPairs []*dto.LabelPair labelPairs []*dto.LabelPair
createdTs *timestamppb.Timestamp
} }
func (h *constHistogram) Desc() *Desc { func (h *constHistogram) Desc() *Desc {
@ -1168,7 +1213,9 @@ func (h *constHistogram) Desc() *Desc {
} }
func (h *constHistogram) Write(out *dto.Metric) error { func (h *constHistogram) Write(out *dto.Metric) error {
his := &dto.Histogram{} his := &dto.Histogram{
CreatedTimestamp: h.createdTs,
}
buckets := make([]*dto.Bucket, 0, len(h.buckets)) buckets := make([]*dto.Bucket, 0, len(h.buckets))
@ -1215,7 +1262,7 @@ func NewConstHistogram(
if desc.err != nil { if desc.err != nil {
return nil, desc.err return nil, desc.err
} }
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err return nil, err
} }
return &constHistogram{ return &constHistogram{
@ -1309,7 +1356,7 @@ func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) {
// Multiple spans with only small gaps in between are probably // Multiple spans with only small gaps in between are probably
// encoded more efficiently as one larger span with a few empty // encoded more efficiently as one larger span with a few empty
// buckets. Needs some research to find the sweet spot. For now, // buckets. Needs some research to find the sweet spot. For now,
// we assume that gaps of one ore two buckets should not create // we assume that gaps of one or two buckets should not create
// a new span. // a new span.
iDelta := int32(i - nextI) iDelta := int32(i - nextI)
if n == 0 || iDelta > 2 { if n == 0 || iDelta > 2 {

View File

@ -14,7 +14,7 @@
// It provides tools to compare sequences of strings and generate textual diffs. // It provides tools to compare sequences of strings and generate textual diffs.
// //
// Maintaining `GetUnifiedDiffString` here because original repository // Maintaining `GetUnifiedDiffString` here because original repository
// (https://github.com/pmezard/go-difflib) is no loger maintained. // (https://github.com/pmezard/go-difflib) is no longer maintained.
package internal package internal
import ( import (

View File

@ -32,6 +32,104 @@ import (
// create a Desc. // create a Desc.
type Labels map[string]string type Labels map[string]string
// LabelConstraint normalizes label values.
type LabelConstraint func(string) string
// ConstrainedLabels represents a label name and its constrain function
// to normalize label values. This type is commonly used when constructing
// metric vector Collectors.
type ConstrainedLabel struct {
Name string
Constraint LabelConstraint
}
// ConstrainableLabels is an interface that allows creating of labels that can
// be optionally constrained.
//
// prometheus.V2().NewCounterVec(CounterVecOpts{
// CounterOpts: {...}, // Usual CounterOpts fields
// VariableLabels: []ConstrainedLabels{
// {Name: "A"},
// {Name: "B", Constraint: func(v string) string { ... }},
// },
// })
type ConstrainableLabels interface {
compile() *compiledLabels
labelNames() []string
}
// ConstrainedLabels represents a collection of label name -> constrain function
// to normalize label values. This type is commonly used when constructing
// metric vector Collectors.
type ConstrainedLabels []ConstrainedLabel
func (cls ConstrainedLabels) compile() *compiledLabels {
compiled := &compiledLabels{
names: make([]string, len(cls)),
labelConstraints: map[string]LabelConstraint{},
}
for i, label := range cls {
compiled.names[i] = label.Name
if label.Constraint != nil {
compiled.labelConstraints[label.Name] = label.Constraint
}
}
return compiled
}
func (cls ConstrainedLabels) labelNames() []string {
names := make([]string, len(cls))
for i, label := range cls {
names[i] = label.Name
}
return names
}
// UnconstrainedLabels represents collection of label without any constraint on
// their value. Thus, it is simply a collection of label names.
//
// UnconstrainedLabels([]string{ "A", "B" })
//
// is equivalent to
//
// ConstrainedLabels {
// { Name: "A" },
// { Name: "B" },
// }
type UnconstrainedLabels []string
func (uls UnconstrainedLabels) compile() *compiledLabels {
return &compiledLabels{
names: uls,
}
}
func (uls UnconstrainedLabels) labelNames() []string {
return uls
}
type compiledLabels struct {
names []string
labelConstraints map[string]LabelConstraint
}
func (cls *compiledLabels) compile() *compiledLabels {
return cls
}
func (cls *compiledLabels) labelNames() []string {
return cls.names
}
func (cls *compiledLabels) constrain(labelName, value string) string {
if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil {
return fn(value)
}
return value
}
// reservedLabelPrefix is a prefix which is not legal in user-supplied // reservedLabelPrefix is a prefix which is not legal in user-supplied
// label names. // label names.
const reservedLabelPrefix = "__" const reservedLabelPrefix = "__"

View File

@ -20,11 +20,9 @@ import (
"strings" "strings"
"time" "time"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
"google.golang.org/protobuf/proto"
) )
var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
@ -94,6 +92,9 @@ type Opts struct {
// machine_role metric). See also // machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels ConstLabels Labels
// now is for testing purposes, by default it's time.Now.
now func() time.Time
} }
// BuildFQName joins the given three name components by "_". Empty name // BuildFQName joins the given three name components by "_". Empty name

View File

@ -37,6 +37,7 @@ import (
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -47,9 +48,10 @@ import (
) )
const ( const (
contentTypeHeader = "Content-Type" contentTypeHeader = "Content-Type"
contentEncodingHeader = "Content-Encoding" contentEncodingHeader = "Content-Encoding"
acceptEncodingHeader = "Accept-Encoding" acceptEncodingHeader = "Accept-Encoding"
processStartTimeHeader = "Process-Start-Time-Unix"
) )
var gzipPool = sync.Pool{ var gzipPool = sync.Pool{
@ -121,6 +123,9 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
} }
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
if !opts.ProcessStartTime.IsZero() {
rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10))
}
if inFlightSem != nil { if inFlightSem != nil {
select { select {
case inFlightSem <- struct{}{}: // All good, carry on. case inFlightSem <- struct{}{}: // All good, carry on.
@ -366,6 +371,14 @@ type HandlerOpts struct {
// (which changes the identity of the resulting series on the Prometheus // (which changes the identity of the resulting series on the Prometheus
// server). // server).
EnableOpenMetrics bool EnableOpenMetrics bool
// ProcessStartTime allows setting process start timevalue that will be exposed
// with "Process-Start-Time-Unix" response header along with the metrics
// payload. This allow callers to have efficient transformations to cumulative
// counters (e.g. OpenTelemetry) or generally _created timestamp estimation per
// scrape target.
// NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus
// exposition format.
ProcessStartTime time.Time
} }
// gzipAccepted returns whether the client will accept gzip-encoded content. // gzipAccepted returns whether the client will accept gzip-encoded content.

View File

@ -68,16 +68,17 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
o.apply(rtOpts) o.apply(rtOpts)
} }
code, method := checkLabels(counter) // Curry the counter with dynamic labels before checking the remaining labels.
code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels()))
return func(r *http.Request) (*http.Response, error) { return func(r *http.Request) (*http.Response, error) {
resp, err := next.RoundTrip(r) resp, err := next.RoundTrip(r)
if err == nil { if err == nil {
addWithExemplar( l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), for label, resolve := range rtOpts.extraLabelsFromCtx {
1, l[label] = resolve(resp.Request.Context())
rtOpts.getExemplarFn(r.Context()), }
) addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context()))
} }
return resp, err return resp, err
} }
@ -110,17 +111,18 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT
o.apply(rtOpts) o.apply(rtOpts)
} }
code, method := checkLabels(obs) // Curry the observer with dynamic labels before checking the remaining labels.
code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels()))
return func(r *http.Request) (*http.Response, error) { return func(r *http.Request) (*http.Response, error) {
start := time.Now() start := time.Now()
resp, err := next.RoundTrip(r) resp, err := next.RoundTrip(r)
if err == nil { if err == nil {
observeWithExemplar( l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), for label, resolve := range rtOpts.extraLabelsFromCtx {
time.Since(start).Seconds(), l[label] = resolve(resp.Request.Context())
rtOpts.getExemplarFn(r.Context()), }
) observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context()))
} }
return resp, err return resp, err
} }

View File

@ -87,7 +87,8 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
o.apply(hOpts) o.apply(hOpts)
} }
code, method := checkLabels(obs) // Curry the observer with dynamic labels before checking the remaining labels.
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
if code { if code {
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
@ -95,23 +96,22 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
d := newDelegator(w, nil) d := newDelegator(w, nil)
next.ServeHTTP(d, r) next.ServeHTTP(d, r)
observeWithExemplar( l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), for label, resolve := range hOpts.extraLabelsFromCtx {
time.Since(now).Seconds(), l[label] = resolve(r.Context())
hOpts.getExemplarFn(r.Context()), }
) observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
} }
} }
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
now := time.Now() now := time.Now()
next.ServeHTTP(w, r) next.ServeHTTP(w, r)
l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
observeWithExemplar( for label, resolve := range hOpts.extraLabelsFromCtx {
obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), l[label] = resolve(r.Context())
time.Since(now).Seconds(), }
hOpts.getExemplarFn(r.Context()), observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
)
} }
} }
@ -138,28 +138,30 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
o.apply(hOpts) o.apply(hOpts)
} }
code, method := checkLabels(counter) // Curry the counter with dynamic labels before checking the remaining labels.
code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels()))
if code { if code {
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil) d := newDelegator(w, nil)
next.ServeHTTP(d, r) next.ServeHTTP(d, r)
addWithExemplar( l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), for label, resolve := range hOpts.extraLabelsFromCtx {
1, l[label] = resolve(r.Context())
hOpts.getExemplarFn(r.Context()), }
) addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
} }
} }
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r) next.ServeHTTP(w, r)
addWithExemplar(
counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
1, for label, resolve := range hOpts.extraLabelsFromCtx {
hOpts.getExemplarFn(r.Context()), l[label] = resolve(r.Context())
) }
addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
} }
} }
@ -191,16 +193,17 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
o.apply(hOpts) o.apply(hOpts)
} }
code, method := checkLabels(obs) // Curry the observer with dynamic labels before checking the remaining labels.
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
now := time.Now() now := time.Now()
d := newDelegator(w, func(status int) { d := newDelegator(w, func(status int) {
observeWithExemplar( l := labels(code, method, r.Method, status, hOpts.extraMethods...)
obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)), for label, resolve := range hOpts.extraLabelsFromCtx {
time.Since(now).Seconds(), l[label] = resolve(r.Context())
hOpts.getExemplarFn(r.Context()), }
) observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
}) })
next.ServeHTTP(d, r) next.ServeHTTP(d, r)
} }
@ -231,28 +234,32 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
o.apply(hOpts) o.apply(hOpts)
} }
code, method := checkLabels(obs) // Curry the observer with dynamic labels before checking the remaining labels.
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
if code { if code {
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil) d := newDelegator(w, nil)
next.ServeHTTP(d, r) next.ServeHTTP(d, r)
size := computeApproximateRequestSize(r) size := computeApproximateRequestSize(r)
observeWithExemplar(
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
float64(size), for label, resolve := range hOpts.extraLabelsFromCtx {
hOpts.getExemplarFn(r.Context()), l[label] = resolve(r.Context())
) }
observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
} }
} }
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r) next.ServeHTTP(w, r)
size := computeApproximateRequestSize(r) size := computeApproximateRequestSize(r)
observeWithExemplar(
obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
float64(size), for label, resolve := range hOpts.extraLabelsFromCtx {
hOpts.getExemplarFn(r.Context()), l[label] = resolve(r.Context())
) }
observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
} }
} }
@ -281,16 +288,18 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
o.apply(hOpts) o.apply(hOpts)
} }
code, method := checkLabels(obs) // Curry the observer with dynamic labels before checking the remaining labels.
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil) d := newDelegator(w, nil)
next.ServeHTTP(d, r) next.ServeHTTP(d, r)
observeWithExemplar(
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
float64(d.Written()), for label, resolve := range hOpts.extraLabelsFromCtx {
hOpts.getExemplarFn(r.Context()), l[label] = resolve(r.Context())
) }
observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context()))
}) })
} }
@ -380,16 +389,13 @@ func isLabelCurried(c prometheus.Collector, label string) bool {
return true return true
} }
// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
// unnecessary allocations on each request.
var emptyLabels = prometheus.Labels{}
func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
if !(code || method) {
return emptyLabels
}
labels := prometheus.Labels{} labels := prometheus.Labels{}
if !(code || method) {
return labels
}
if code { if code {
labels["code"] = sanitizeCode(status) labels["code"] = sanitizeCode(status)
} }

View File

@ -24,14 +24,32 @@ type Option interface {
apply(*options) apply(*options)
} }
// LabelValueFromCtx are used to compute the label value from request context.
// Context can be filled with values from request through middleware.
type LabelValueFromCtx func(ctx context.Context) string
// options store options for both a handler or round tripper. // options store options for both a handler or round tripper.
type options struct { type options struct {
extraMethods []string extraMethods []string
getExemplarFn func(requestCtx context.Context) prometheus.Labels getExemplarFn func(requestCtx context.Context) prometheus.Labels
extraLabelsFromCtx map[string]LabelValueFromCtx
} }
func defaultOptions() *options { func defaultOptions() *options {
return &options{getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }} return &options{
getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil },
extraLabelsFromCtx: map[string]LabelValueFromCtx{},
}
}
func (o *options) emptyDynamicLabels() prometheus.Labels {
labels := prometheus.Labels{}
for label := range o.extraLabelsFromCtx {
labels[label] = ""
}
return labels
} }
type optionApplyFunc func(*options) type optionApplyFunc func(*options)
@ -48,11 +66,19 @@ func WithExtraMethods(methods ...string) Option {
}) })
} }
// WithExemplarFromContext adds allows to put a hook to all counter and histogram metrics. // WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics.
// If the hook function returns non-nil labels, exemplars will be added for that request, otherwise metric // If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but
// will get instrumented without exemplar. // metric will continue to observe/increment.
func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option { func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option {
return optionApplyFunc(func(o *options) { return optionApplyFunc(func(o *options) {
o.getExemplarFn = getExemplarFn o.getExemplarFn = getExemplarFn
}) })
} }
// WithLabelFromCtx registers a label for dynamic resolution with access to context.
// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage
func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option {
return optionApplyFunc(func(o *options) {
o.extraLabelsFromCtx[name] = valueFn
})
}

View File

@ -21,18 +21,17 @@ import (
"path/filepath" "path/filepath"
"runtime" "runtime"
"sort" "sort"
"strconv"
"strings" "strings"
"sync" "sync"
"unicode/utf8" "unicode/utf8"
"github.com/cespare/xxhash/v2"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/expfmt"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus/internal" "github.com/prometheus/client_golang/prometheus/internal"
"github.com/cespare/xxhash/v2"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
"google.golang.org/protobuf/proto"
) )
const ( const (
@ -549,7 +548,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
goroutineBudget-- goroutineBudget--
runtime.Gosched() runtime.Gosched()
} }
// Once both checkedMetricChan and uncheckdMetricChan are closed // Once both checkedMetricChan and uncheckedMetricChan are closed
// and drained, the contraption above will nil out cmc and umc, // and drained, the contraption above will nil out cmc and umc,
// and then we can leave the collect loop here. // and then we can leave the collect loop here.
if cmc == nil && umc == nil { if cmc == nil && umc == nil {
@ -933,6 +932,10 @@ func checkMetricConsistency(
h.WriteString(lp.GetValue()) h.WriteString(lp.GetValue())
h.Write(separatorByteSlice) h.Write(separatorByteSlice)
} }
if dtoMetric.TimestampMs != nil {
h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10))
h.Write(separatorByteSlice)
}
hSum := h.Sum64() hSum := h.Sum64()
if _, exists := metricHashes[hSum]; exists { if _, exists := metricHashes[hSum]; exists {
return fmt.Errorf( return fmt.Errorf(
@ -960,7 +963,7 @@ func checkDescConsistency(
// Is the desc consistent with the content of the metric? // Is the desc consistent with the content of the metric?
lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
copy(lpsFromDesc, desc.constLabelPairs) copy(lpsFromDesc, desc.constLabelPairs)
for _, l := range desc.variableLabels { for _, l := range desc.variableLabels.names {
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
Name: proto.String(l), Name: proto.String(l),
}) })

View File

@ -22,11 +22,11 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/beorn7/perks/quantile"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"github.com/beorn7/perks/quantile"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
) )
// quantileLabel is used for the label that defines the quantile in a // quantileLabel is used for the label that defines the quantile in a
@ -146,6 +146,21 @@ type SummaryOpts struct {
// is the internal buffer size of the underlying package // is the internal buffer size of the underlying package
// "github.com/bmizerany/perks/quantile"). // "github.com/bmizerany/perks/quantile").
BufCap uint32 BufCap uint32
// now is for testing purposes, by default it's time.Now.
now func() time.Time
}
// SummaryVecOpts bundles the options to create a SummaryVec metric.
// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels
// is optional and can safely be left to its default value.
type SummaryVecOpts struct {
SummaryOpts
// VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Constraint
// function, if provided.
VariableLabels ConstrainableLabels
} }
// Problem with the sliding-window decay algorithm... The Merge method of // Problem with the sliding-window decay algorithm... The Merge method of
@ -177,11 +192,11 @@ func NewSummary(opts SummaryOpts) Summary {
} }
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
if len(desc.variableLabels) != len(labelValues) { if len(desc.variableLabels.names) != len(labelValues) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
} }
for _, n := range desc.variableLabels { for _, n := range desc.variableLabels.names {
if n == quantileLabel { if n == quantileLabel {
panic(errQuantileLabelNotAllowed) panic(errQuantileLabelNotAllowed)
} }
@ -211,6 +226,9 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
opts.BufCap = DefBufCap opts.BufCap = DefBufCap
} }
if opts.now == nil {
opts.now = time.Now
}
if len(opts.Objectives) == 0 { if len(opts.Objectives) == 0 {
// Use the lock-free implementation of a Summary without objectives. // Use the lock-free implementation of a Summary without objectives.
s := &noObjectivesSummary{ s := &noObjectivesSummary{
@ -219,6 +237,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
counts: [2]*summaryCounts{{}, {}}, counts: [2]*summaryCounts{{}, {}},
} }
s.init(s) // Init self-collection. s.init(s) // Init self-collection.
s.createdTs = timestamppb.New(opts.now())
return s return s
} }
@ -234,7 +253,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
coldBuf: make([]float64, 0, opts.BufCap), coldBuf: make([]float64, 0, opts.BufCap),
streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
} }
s.headStreamExpTime = time.Now().Add(s.streamDuration) s.headStreamExpTime = opts.now().Add(s.streamDuration)
s.hotBufExpTime = s.headStreamExpTime s.hotBufExpTime = s.headStreamExpTime
for i := uint32(0); i < opts.AgeBuckets; i++ { for i := uint32(0); i < opts.AgeBuckets; i++ {
@ -248,6 +267,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
sort.Float64s(s.sortedObjectives) sort.Float64s(s.sortedObjectives)
s.init(s) // Init self-collection. s.init(s) // Init self-collection.
s.createdTs = timestamppb.New(opts.now())
return s return s
} }
@ -275,6 +295,8 @@ type summary struct {
headStream *quantile.Stream headStream *quantile.Stream
headStreamIdx int headStreamIdx int
headStreamExpTime, hotBufExpTime time.Time headStreamExpTime, hotBufExpTime time.Time
createdTs *timestamppb.Timestamp
} }
func (s *summary) Desc() *Desc { func (s *summary) Desc() *Desc {
@ -296,7 +318,9 @@ func (s *summary) Observe(v float64) {
} }
func (s *summary) Write(out *dto.Metric) error { func (s *summary) Write(out *dto.Metric) error {
sum := &dto.Summary{} sum := &dto.Summary{
CreatedTimestamp: s.createdTs,
}
qs := make([]*dto.Quantile, 0, len(s.objectives)) qs := make([]*dto.Quantile, 0, len(s.objectives))
s.bufMtx.Lock() s.bufMtx.Lock()
@ -429,6 +453,8 @@ type noObjectivesSummary struct {
counts [2]*summaryCounts counts [2]*summaryCounts
labelPairs []*dto.LabelPair labelPairs []*dto.LabelPair
createdTs *timestamppb.Timestamp
} }
func (s *noObjectivesSummary) Desc() *Desc { func (s *noObjectivesSummary) Desc() *Desc {
@ -479,8 +505,9 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error {
} }
sum := &dto.Summary{ sum := &dto.Summary{
SampleCount: proto.Uint64(count), SampleCount: proto.Uint64(count),
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
CreatedTimestamp: s.createdTs,
} }
out.Summary = sum out.Summary = sum
@ -530,20 +557,28 @@ type SummaryVec struct {
// it is handled by the Prometheus server internally, “quantile” is an illegal // it is handled by the Prometheus server internally, “quantile” is an illegal
// label name. NewSummaryVec will panic if this label name is used. // label name. NewSummaryVec will panic if this label name is used.
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
for _, ln := range labelNames { return V2.NewSummaryVec(SummaryVecOpts{
SummaryOpts: opts,
VariableLabels: UnconstrainedLabels(labelNames),
})
}
// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts.
func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec {
for _, ln := range opts.VariableLabels.labelNames() {
if ln == quantileLabel { if ln == quantileLabel {
panic(errQuantileLabelNotAllowed) panic(errQuantileLabelNotAllowed)
} }
} }
desc := NewDesc( desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help, opts.Help,
labelNames, opts.VariableLabels,
opts.ConstLabels, opts.ConstLabels,
) )
return &SummaryVec{ return &SummaryVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...) return newSummary(desc, opts.SummaryOpts, lvs...)
}), }),
} }
} }
@ -662,6 +697,7 @@ type constSummary struct {
sum float64 sum float64
quantiles map[float64]float64 quantiles map[float64]float64
labelPairs []*dto.LabelPair labelPairs []*dto.LabelPair
createdTs *timestamppb.Timestamp
} }
func (s *constSummary) Desc() *Desc { func (s *constSummary) Desc() *Desc {
@ -669,7 +705,9 @@ func (s *constSummary) Desc() *Desc {
} }
func (s *constSummary) Write(out *dto.Metric) error { func (s *constSummary) Write(out *dto.Metric) error {
sum := &dto.Summary{} sum := &dto.Summary{
CreatedTimestamp: s.createdTs,
}
qs := make([]*dto.Quantile, 0, len(s.quantiles)) qs := make([]*dto.Quantile, 0, len(s.quantiles))
sum.SampleCount = proto.Uint64(s.count) sum.SampleCount = proto.Uint64(s.count)
@ -718,7 +756,7 @@ func NewConstSummary(
if desc.err != nil { if desc.err != nil {
return nil, desc.err return nil, desc.err
} }
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err return nil, err
} }
return &constSummary{ return &constSummary{

View File

@ -23,7 +23,9 @@ type Timer struct {
} }
// NewTimer creates a new Timer. The provided Observer is used to observe a // NewTimer creates a new Timer. The provided Observer is used to observe a
// duration in seconds. Timer is usually used to time a function call in the // duration in seconds. If the Observer implements ExemplarObserver, passing exemplar
// later on will be also supported.
// Timer is usually used to time a function call in the
// following way: // following way:
// //
// func TimeMe() { // func TimeMe() {
@ -31,6 +33,14 @@ type Timer struct {
// defer timer.ObserveDuration() // defer timer.ObserveDuration()
// // Do actual work. // // Do actual work.
// } // }
//
// or
//
// func TimeMeWithExemplar() {
// timer := NewTimer(myHistogram)
// defer timer.ObserveDurationWithExemplar(exemplar)
// // Do actual work.
// }
func NewTimer(o Observer) *Timer { func NewTimer(o Observer) *Timer {
return &Timer{ return &Timer{
begin: time.Now(), begin: time.Now(),
@ -53,3 +63,19 @@ func (t *Timer) ObserveDuration() time.Duration {
} }
return d return d
} }
// ObserveDurationWithExemplar is like ObserveDuration, but it will also
// observe exemplar with the duration unless exemplar is nil or provided Observer can't
// be casted to ExemplarObserver.
func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration {
d := time.Since(t.begin)
eo, ok := t.observer.(ExemplarObserver)
if ok && exemplar != nil {
eo.ObserveWithExemplar(d.Seconds(), exemplar)
return d
}
if t.observer != nil {
t.observer.Observe(d.Seconds())
}
return d
}

View File

@ -14,18 +14,17 @@
package prometheus package prometheus
import ( import (
"errors"
"fmt" "fmt"
"sort" "sort"
"time" "time"
"unicode/utf8" "unicode/utf8"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/prometheus/client_golang/prometheus/internal" "github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
) )
// ValueType is an enumeration of metric types that represent a simple value. // ValueType is an enumeration of metric types that represent a simple value.
@ -93,7 +92,7 @@ func (v *valueFunc) Desc() *Desc {
} }
func (v *valueFunc) Write(out *dto.Metric) error { func (v *valueFunc) Write(out *dto.Metric) error {
return populateMetric(v.valType, v.function(), v.labelPairs, nil, out) return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil)
} }
// NewConstMetric returns a metric with one fixed value that cannot be // NewConstMetric returns a metric with one fixed value that cannot be
@ -107,12 +106,12 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues
if desc.err != nil { if desc.err != nil {
return nil, desc.err return nil, desc.err
} }
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err return nil, err
} }
metric := &dto.Metric{} metric := &dto.Metric{}
if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric); err != nil { if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil {
return nil, err return nil, err
} }
@ -132,6 +131,43 @@ func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelVal
return m return m
} }
// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters
// with created timestamp set and returns an error for other metric types.
func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) {
if desc.err != nil {
return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
switch valueType {
case CounterValue:
break
default:
return nil, errors.New("created timestamps are only supported for counters")
}
metric := &dto.Metric{}
if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil {
return nil, err
}
return &constMetric{
desc: desc,
metric: metric,
}, nil
}
// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where
// NewConstMetricWithCreatedTimestamp would have returned an error.
func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric {
m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...)
if err != nil {
panic(err)
}
return m
}
type constMetric struct { type constMetric struct {
desc *Desc desc *Desc
metric *dto.Metric metric *dto.Metric
@ -155,11 +191,12 @@ func populateMetric(
labelPairs []*dto.LabelPair, labelPairs []*dto.LabelPair,
e *dto.Exemplar, e *dto.Exemplar,
m *dto.Metric, m *dto.Metric,
ct *timestamppb.Timestamp,
) error { ) error {
m.Label = labelPairs m.Label = labelPairs
switch t { switch t {
case CounterValue: case CounterValue:
m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e} m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct}
case GaugeValue: case GaugeValue:
m.Gauge = &dto.Gauge{Value: proto.Float64(v)} m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
case UntypedValue: case UntypedValue:
@ -178,19 +215,19 @@ func populateMetric(
// This function is only needed for custom Metric implementations. See MetricVec // This function is only needed for custom Metric implementations. See MetricVec
// example. // example.
func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs)
if totalLen == 0 { if totalLen == 0 {
// Super fast path. // Super fast path.
return nil return nil
} }
if len(desc.variableLabels) == 0 { if len(desc.variableLabels.names) == 0 {
// Moderately fast path. // Moderately fast path.
return desc.constLabelPairs return desc.constLabelPairs
} }
labelPairs := make([]*dto.LabelPair, 0, totalLen) labelPairs := make([]*dto.LabelPair, 0, totalLen)
for i, n := range desc.variableLabels { for i, l := range desc.variableLabels.names {
labelPairs = append(labelPairs, &dto.LabelPair{ labelPairs = append(labelPairs, &dto.LabelPair{
Name: proto.String(n), Name: proto.String(l),
Value: proto.String(labelValues[i]), Value: proto.String(labelValues[i]),
}) })
} }

View File

@ -72,6 +72,8 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
// with a performance overhead (for creating and processing the Labels map). // with a performance overhead (for creating and processing the Labels map).
// See also the CounterVec example. // See also the CounterVec example.
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
lvs = constrainLabelValues(m.desc, lvs, m.curry)
h, err := m.hashLabelValues(lvs) h, err := m.hashLabelValues(lvs)
if err != nil { if err != nil {
return false return false
@ -91,6 +93,9 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
// This method is used for the same purpose as DeleteLabelValues(...string). See // This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods. // there for pros and cons of the two methods.
func (m *MetricVec) Delete(labels Labels) bool { func (m *MetricVec) Delete(labels Labels) bool {
labels, closer := constrainLabels(m.desc, labels)
defer closer()
h, err := m.hashLabels(labels) h, err := m.hashLabels(labels)
if err != nil { if err != nil {
return false return false
@ -106,6 +111,9 @@ func (m *MetricVec) Delete(labels Labels) bool {
// Note that curried labels will never be matched if deleting from the curried vector. // Note that curried labels will never be matched if deleting from the curried vector.
// To match curried labels with DeletePartialMatch, it must be called on the base vector. // To match curried labels with DeletePartialMatch, it must be called on the base vector.
func (m *MetricVec) DeletePartialMatch(labels Labels) int { func (m *MetricVec) DeletePartialMatch(labels Labels) int {
labels, closer := constrainLabels(m.desc, labels)
defer closer()
return m.metricMap.deleteByLabels(labels, m.curry) return m.metricMap.deleteByLabels(labels, m.curry)
} }
@ -144,11 +152,11 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
oldCurry = m.curry oldCurry = m.curry
iCurry int iCurry int
) )
for i, label := range m.desc.variableLabels { for i, labelName := range m.desc.variableLabels.names {
val, ok := labels[label] val, ok := labels[labelName]
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
if ok { if ok {
return nil, fmt.Errorf("label name %q is already curried", label) return nil, fmt.Errorf("label name %q is already curried", labelName)
} }
newCurry = append(newCurry, oldCurry[iCurry]) newCurry = append(newCurry, oldCurry[iCurry])
iCurry++ iCurry++
@ -156,7 +164,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
if !ok { if !ok {
continue // Label stays uncurried. continue // Label stays uncurried.
} }
newCurry = append(newCurry, curriedLabelValue{i, val}) newCurry = append(newCurry, curriedLabelValue{
i,
m.desc.variableLabels.constrain(labelName, val),
})
} }
} }
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
@ -199,6 +210,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
// a wrapper around MetricVec, implementing a vector for a specific Metric // a wrapper around MetricVec, implementing a vector for a specific Metric
// implementation, for example GaugeVec. // implementation, for example GaugeVec.
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
lvs = constrainLabelValues(m.desc, lvs, m.curry)
h, err := m.hashLabelValues(lvs) h, err := m.hashLabelValues(lvs)
if err != nil { if err != nil {
return nil, err return nil, err
@ -224,6 +236,9 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
// around MetricVec, implementing a vector for a specific Metric implementation, // around MetricVec, implementing a vector for a specific Metric implementation,
// for example GaugeVec. // for example GaugeVec.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
labels, closer := constrainLabels(m.desc, labels)
defer closer()
h, err := m.hashLabels(labels) h, err := m.hashLabels(labels)
if err != nil { if err != nil {
return nil, err return nil, err
@ -233,7 +248,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
} }
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
return 0, err return 0, err
} }
@ -242,7 +257,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
curry = m.curry curry = m.curry
iVals, iCurry int iVals, iCurry int
) )
for i := 0; i < len(m.desc.variableLabels); i++ { for i := 0; i < len(m.desc.variableLabels.names); i++ {
if iCurry < len(curry) && curry[iCurry].index == i { if iCurry < len(curry) && curry[iCurry].index == i {
h = m.hashAdd(h, curry[iCurry].value) h = m.hashAdd(h, curry[iCurry].value)
iCurry++ iCurry++
@ -256,7 +271,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
} }
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
return 0, err return 0, err
} }
@ -265,17 +280,17 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
curry = m.curry curry = m.curry
iCurry int iCurry int
) )
for i, label := range m.desc.variableLabels { for i, labelName := range m.desc.variableLabels.names {
val, ok := labels[label] val, ok := labels[labelName]
if iCurry < len(curry) && curry[iCurry].index == i { if iCurry < len(curry) && curry[iCurry].index == i {
if ok { if ok {
return 0, fmt.Errorf("label name %q is already curried", label) return 0, fmt.Errorf("label name %q is already curried", labelName)
} }
h = m.hashAdd(h, curry[iCurry].value) h = m.hashAdd(h, curry[iCurry].value)
iCurry++ iCurry++
} else { } else {
if !ok { if !ok {
return 0, fmt.Errorf("label name %q missing in label map", label) return 0, fmt.Errorf("label name %q missing in label map", labelName)
} }
h = m.hashAdd(h, val) h = m.hashAdd(h, val)
} }
@ -453,7 +468,7 @@ func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []
func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
for l, v := range labels { for l, v := range labels {
// Check if the target label exists in our metrics and get the index. // Check if the target label exists in our metrics and get the index.
varLabelIndex, validLabel := indexOf(l, desc.variableLabels) varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names)
if validLabel { if validLabel {
// Check the value of that label against the target value. // Check the value of that label against the target value.
// We don't consider curried values in partial matches. // We don't consider curried values in partial matches.
@ -597,7 +612,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
return false return false
} }
iCurry := 0 iCurry := 0
for i, k := range desc.variableLabels { for i, k := range desc.variableLabels.names {
if iCurry < len(curry) && curry[iCurry].index == i { if iCurry < len(curry) && curry[iCurry].index == i {
if values[i] != curry[iCurry].value { if values[i] != curry[iCurry].value {
return false return false
@ -615,7 +630,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
labelValues := make([]string, len(labels)+len(curry)) labelValues := make([]string, len(labels)+len(curry))
iCurry := 0 iCurry := 0
for i, k := range desc.variableLabels { for i, k := range desc.variableLabels.names {
if iCurry < len(curry) && curry[iCurry].index == i { if iCurry < len(curry) && curry[iCurry].index == i {
labelValues[i] = curry[iCurry].value labelValues[i] = curry[iCurry].value
iCurry++ iCurry++
@ -640,3 +655,55 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
} }
return labelValues return labelValues
} }
var labelsPool = &sync.Pool{
New: func() interface{} {
return make(Labels)
},
}
func constrainLabels(desc *Desc, labels Labels) (Labels, func()) {
if len(desc.variableLabels.labelConstraints) == 0 {
// Fast path when there's no constraints
return labels, func() {}
}
constrainedLabels := labelsPool.Get().(Labels)
for l, v := range labels {
constrainedLabels[l] = desc.variableLabels.constrain(l, v)
}
return constrainedLabels, func() {
for k := range constrainedLabels {
delete(constrainedLabels, k)
}
labelsPool.Put(constrainedLabels)
}
}
func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
if len(desc.variableLabels.labelConstraints) == 0 {
// Fast path when there's no constraints
return lvs
}
constrainedValues := make([]string, len(lvs))
var iCurry, iLVs int
for i := 0; i < len(lvs)+len(curry); i++ {
if iCurry < len(curry) && curry[iCurry].index == i {
iCurry++
continue
}
if i < len(desc.variableLabels.names) {
constrainedValues[iLVs] = desc.variableLabels.constrain(
desc.variableLabels.names[i],
lvs[iLVs],
)
} else {
constrainedValues[iLVs] = lvs[iLVs]
}
iLVs++
}
return constrainedValues
}

View File

@ -0,0 +1,23 @@
// Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
type v2 struct{}
// V2 is a struct that can be referenced to access experimental API that might
// be present in v2 of client golang someday. It offers extended functionality
// of v1 with slightly changed API. It is acceptable to use some pieces from v1
// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc`
// in the same codebase.
var V2 = v2{}

View File

@ -17,12 +17,10 @@ import (
"fmt" "fmt"
"sort" "sort"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/prometheus/client_golang/prometheus/internal"
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/proto"
"github.com/prometheus/client_golang/prometheus/internal"
) )
// WrapRegistererWith returns a Registerer wrapping the provided // WrapRegistererWith returns a Registerer wrapping the provided
@ -206,7 +204,7 @@ func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
constLabels[ln] = lv constLabels[ln] = lv
} }
// NewDesc will do remaining validations. // NewDesc will do remaining validations.
newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
// Propagate errors if there was any. This will override any errer // Propagate errors if there was any. This will override any errer
// created by NewDesc above, i.e. earlier errors get precedence. // created by NewDesc above, i.e. earlier errors get precedence.
if desc.err != nil { if desc.err != nil {

File diff suppressed because it is too large Load Diff

View File

@ -132,7 +132,10 @@ func (d *textDecoder) Decode(v *dto.MetricFamily) error {
} }
// Pick off one MetricFamily per Decode until there's nothing left. // Pick off one MetricFamily per Decode until there's nothing left.
for key, fam := range d.fams { for key, fam := range d.fams {
*v = *fam v.Name = fam.Name
v.Help = fam.Help
v.Type = fam.Type
v.Metric = fam.Metric
delete(d.fams, key) delete(d.fams, key)
return nil return nil
} }

View File

@ -18,9 +18,9 @@ import (
"io" "io"
"net/http" "net/http"
"github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/matttproud/golang_protobuf_extensions/pbutil" "github.com/matttproud/golang_protobuf_extensions/pbutil"
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
"google.golang.org/protobuf/encoding/prototext"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
) )
@ -99,8 +99,11 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
return FmtText return FmtText
} }
if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") { if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
return FmtOpenMetrics if ver == OpenMetricsVersion_1_0_0 {
return FmtOpenMetrics_1_0_0
}
return FmtOpenMetrics_0_0_1
} }
} }
return FmtText return FmtText
@ -133,7 +136,7 @@ func NewEncoder(w io.Writer, format Format) Encoder {
case FmtProtoText: case FmtProtoText:
return encoderCloser{ return encoderCloser{
encode: func(v *dto.MetricFamily) error { encode: func(v *dto.MetricFamily) error {
_, err := fmt.Fprintln(w, proto.MarshalTextString(v)) _, err := fmt.Fprintln(w, prototext.Format(v))
return err return err
}, },
close: func() error { return nil }, close: func() error { return nil },
@ -146,7 +149,7 @@ func NewEncoder(w io.Writer, format Format) Encoder {
}, },
close: func() error { return nil }, close: func() error { return nil },
} }
case FmtOpenMetrics: case FmtOpenMetrics_0_0_1, FmtOpenMetrics_1_0_0:
return encoderCloser{ return encoderCloser{
encode: func(v *dto.MetricFamily) error { encode: func(v *dto.MetricFamily) error {
_, err := MetricFamilyToOpenMetrics(w, v) _, err := MetricFamilyToOpenMetrics(w, v)

View File

@ -19,20 +19,22 @@ type Format string
// Constants to assemble the Content-Type values for the different wire protocols. // Constants to assemble the Content-Type values for the different wire protocols.
const ( const (
TextVersion = "0.0.4" TextVersion = "0.0.4"
ProtoType = `application/vnd.google.protobuf` ProtoType = `application/vnd.google.protobuf`
ProtoProtocol = `io.prometheus.client.MetricFamily` ProtoProtocol = `io.prometheus.client.MetricFamily`
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
OpenMetricsType = `application/openmetrics-text` OpenMetricsType = `application/openmetrics-text`
OpenMetricsVersion = "0.0.1" OpenMetricsVersion_0_0_1 = "0.0.1"
OpenMetricsVersion_1_0_0 = "1.0.0"
// The Content-Type values for the different wire protocols. // The Content-Type values for the different wire protocols.
FmtUnknown Format = `<unknown>` FmtUnknown Format = `<unknown>`
FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
FmtProtoText Format = ProtoFmt + ` encoding=text` FmtProtoText Format = ProtoFmt + ` encoding=text`
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8` FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
) )
const ( const (

View File

@ -24,8 +24,8 @@ import (
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"google.golang.org/protobuf/proto"
) )
// A stateFn is a function that represents a state in a state machine. By // A stateFn is a function that represents a state in a state machine. By

View File

@ -2,6 +2,7 @@
linters: linters:
enable: enable:
- godot - godot
- misspell
- revive - revive
linter-settings: linter-settings:
@ -10,3 +11,5 @@ linter-settings:
exclude: exclude:
# Ignore "See: URL" # Ignore "See: URL"
- 'See:' - 'See:'
misspell:
locale: US

View File

@ -49,19 +49,19 @@ endif
GOTEST := $(GO) test GOTEST := $(GO) test
GOTEST_DIR := GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),) ifneq ($(CIRCLE_JOB),)
ifneq ($(shell which gotestsum),) ifneq ($(shell command -v gotestsum > /dev/null),)
GOTEST_DIR := test-results GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif endif
endif endif
PROMU_VERSION ?= 0.14.0 PROMU_VERSION ?= 0.15.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
SKIP_GOLANGCI_LINT := SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT := GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.49.0 GOLANGCI_LINT_VERSION ?= v1.54.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different. # windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@ -91,6 +91,8 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
ifeq ($(GOHOSTARCH),amd64) ifeq ($(GOHOSTARCH),amd64)
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
# Only supported on amd64 # Only supported on amd64
@ -176,7 +178,7 @@ endif
.PHONY: common-yamllint .PHONY: common-yamllint
common-yamllint: common-yamllint:
@echo ">> running yamllint on all YAML files in the repository" @echo ">> running yamllint on all YAML files in the repository"
ifeq (, $(shell which yamllint)) ifeq (, $(shell command -v yamllint > /dev/null))
@echo "yamllint not installed so skipping" @echo "yamllint not installed so skipping"
else else
yamllint . yamllint .
@ -205,7 +207,7 @@ common-tarball: promu
.PHONY: common-docker $(BUILD_DOCKER_ARCHS) .PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%: $(BUILD_DOCKER_ARCHS): common-docker-%:
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
-f $(DOCKERFILE_PATH) \ -f $(DOCKERFILE_PATH) \
--build-arg ARCH="$*" \ --build-arg ARCH="$*" \
--build-arg OS="linux" \ --build-arg OS="linux" \
@ -214,19 +216,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%:
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: $(TAG_DOCKER_ARCHS) common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
.PHONY: common-docker-manifest .PHONY: common-docker-manifest
common-docker-manifest: common-docker-manifest:
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
.PHONY: promu .PHONY: promu
promu: $(PROMU) promu: $(PROMU)

View File

@ -51,11 +51,11 @@ ensure the `fixtures` directory is up to date by removing the existing directory
extracting the ttar file using `make fixtures/.unpacked` or just `make test`. extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
```bash ```bash
rm -rf fixtures rm -rf testdata/fixtures
make test make test
``` ```
Next, make the required changes to the extracted files in the `fixtures` directory. When Next, make the required changes to the extracted files in the `fixtures` directory. When
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
based on the updated `fixtures` directory. And finally, verify the changes using based on the updated `fixtures` directory. And finally, verify the changes using
`git diff fixtures.ttar`. `git diff testdata/fixtures.ttar`.

View File

@ -55,7 +55,7 @@ type ARPEntry struct {
func (fs FS) GatherARPEntries() ([]ARPEntry, error) { func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
data, err := os.ReadFile(fs.proc.Path("net/arp")) data, err := os.ReadFile(fs.proc.Path("net/arp"))
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err) return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
} }
return parseARPEntries(data) return parseARPEntries(data)
@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
} else if width == expectedDataWidth { } else if width == expectedDataWidth {
entry, err := parseARPEntry(columns) entry, err := parseARPEntry(columns)
if err != nil { if err != nil {
return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err) return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
} }
entries = append(entries, entry) entries = append(entries, entry)
} else { } else {
return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth) return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
} }
} }

View File

@ -55,7 +55,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
parts := strings.Fields(line) parts := strings.Fields(line)
if len(parts) < 4 { if len(parts) < 4 {
return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
} }
node := strings.TrimRight(parts[1], ",") node := strings.TrimRight(parts[1], ",")
@ -66,7 +66,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
bucketCount = arraySize bucketCount = arraySize
} else { } else {
if bucketCount != arraySize { if bucketCount != arraySize {
return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize)
} }
} }
@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
for i := 0; i < arraySize; i++ { for i := 0; i < arraySize; i++ {
sizes[i], err = strconv.ParseFloat(parts[i+4], 64) sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid value in buddyinfo: %w", err) return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
} }
} }

View File

@ -79,7 +79,7 @@ func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
// find the first "processor" line // find the first "processor" line
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32) v, err := strconv.ParseUint(field[1], 0, 32)
@ -192,9 +192,10 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info)) scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
match, _ := regexp.MatchString("^[Pp]rocessor", firstLine) match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
if !match || !strings.Contains(firstLine, ":") { if !match || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{} cpuinfo := []CPUInfo{}
@ -258,7 +259,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{} cpuinfo := []CPUInfo{}
@ -283,7 +284,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
if strings.HasPrefix(line, "processor") { if strings.HasPrefix(line, "processor") {
match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
if len(match) < 2 { if len(match) < 2 {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
} }
cpu := commonCPUInfo cpu := commonCPUInfo
v, err := strconv.ParseUint(match[1], 0, 32) v, err := strconv.ParseUint(match[1], 0, 32)
@ -343,7 +344,7 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
// find the first "processor" line // find the first "processor" line
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{} cpuinfo := []CPUInfo{}
@ -421,7 +422,7 @@ func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32) v, err := strconv.ParseUint(field[1], 0, 32)
@ -466,7 +467,7 @@ func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32) v, err := strconv.ParseUint(field[1], 0, 32)

View File

@ -55,12 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) {
path := fs.proc.Path("crypto") path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path) b, err := util.ReadFileNoStat(path)
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading crypto %q: %w", path, err) return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err)
} }
crypto, err := parseCrypto(bytes.NewReader(b)) crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing crypto %q: %w", path, err) return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err)
} }
return crypto, nil return crypto, nil
@ -83,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) {
kv := strings.Split(text, ":") kv := strings.Split(text, ":")
if len(kv) != 2 { if len(kv) != 2 {
return nil, fmt.Errorf("malformed crypto line: %q", text) return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text)
} }
k := strings.TrimSpace(kv[0]) k := strings.TrimSpace(kv[0])

View File

@ -20,7 +20,8 @@ import (
// FS represents the pseudo-filesystem sys, which provides an interface to // FS represents the pseudo-filesystem sys, which provides an interface to
// kernel data structures. // kernel data structures.
type FS struct { type FS struct {
proc fs.FS proc fs.FS
isReal bool
} }
// DefaultMountPoint is the common mount point of the proc filesystem. // DefaultMountPoint is the common mount point of the proc filesystem.
@ -39,5 +40,11 @@ func NewFS(mountPoint string) (FS, error) {
if err != nil { if err != nil {
return FS{}, err return FS{}, err
} }
return FS{fs}, nil
isReal, err := isRealProc(mountPoint)
if err != nil {
return FS{}, err
}
return FS{fs, isReal}, nil
} }

View File

@ -0,0 +1,23 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !freebsd && !linux
// +build !freebsd,!linux
package procfs
// isRealProc returns true on architectures that don't have a Type argument
// in their Statfs_t struct
func isRealProc(mountPoint string) (bool, error) {
return true, nil
}

33
vendor/github.com/prometheus/procfs/fs_statfs_type.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build freebsd || linux
// +build freebsd linux
package procfs
import (
"syscall"
)
// isRealProc determines whether supplied mountpoint is really a proc filesystem.
func isRealProc(mountPoint string) (bool, error) {
stat := syscall.Statfs_t{}
err := syscall.Statfs(mountPoint, &stat)
if err != nil {
return false, err
}
// 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87
return stat.Type == 0x9fa0, nil
}

View File

@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
m, err := parseFscacheinfo(bytes.NewReader(b)) m, err := parseFscacheinfo(bytes.NewReader(b))
if err != nil { if err != nil {
return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err) return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err)
} }
return *m, nil return *m, nil
@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
func setFSCacheFields(fields []string, setFields ...*uint64) error { func setFSCacheFields(fields []string, setFields ...*uint64) error {
var err error var err error
if len(fields) < len(setFields) { if len(fields) < len(setFields) {
return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields)) return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
} }
for i := range setFields { for i := range setFields {
@ -263,7 +263,7 @@ func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) {
for s.Scan() { for s.Scan() {
fields := strings.Fields(s.Text()) fields := strings.Fields(s.Text())
if len(fields) < 2 { if len(fields) < 2 {
return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text()) return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text())
} }
switch fields[0] { switch fields[0] {

View File

@ -64,6 +64,21 @@ func ParsePInt64s(ss []string) ([]*int64, error) {
return us, nil return us, nil
} }
// Parses a uint64 from given hex in string.
func ParseHexUint64s(ss []string) ([]*uint64, error) {
us := make([]*uint64, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 16, 64)
if err != nil {
return nil, err
}
us = append(us, &u)
}
return us, nil
}
// ReadUintFromFile reads a file and attempts to parse a uint64 from it. // ReadUintFromFile reads a file and attempts to parse a uint64 from it.
func ReadUintFromFile(path string) (uint64, error) { func ReadUintFromFile(path string) (uint64, error) {
data, err := os.ReadFile(path) data, err := os.ReadFile(path)

View File

@ -221,15 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) {
case 46: case 46:
ip = net.ParseIP(s[1:40]) ip = net.ParseIP(s[1:40])
if ip == nil { if ip == nil {
return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
} }
default: default:
return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
} }
portString := s[len(s)-4:] portString := s[len(s)-4:]
if len(portString) != 4 { if len(portString) != 4 {
return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) return nil, 0,
fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err)
} }
port, err := strconv.ParseUint(portString, 16, 16) port, err := strconv.ParseUint(portString, 16, 16)
if err != nil { if err != nil {

View File

@ -44,14 +44,14 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
loads := make([]float64, 3) loads := make([]float64, 3)
parts := strings.Fields(string(loadavgBytes)) parts := strings.Fields(string(loadavgBytes))
if len(parts) < 3 { if len(parts) < 3 {
return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes)) return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes))
} }
var err error var err error
for i, load := range parts[0:3] { for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64) loads[i], err = strconv.ParseFloat(load, 64)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not parse load %q: %w", load, err) return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
} }
} }
return &LoadAvg{ return &LoadAvg{

View File

@ -70,7 +70,7 @@ func (fs FS) MDStat() ([]MDStat, error) {
} }
mdstat, err := parseMDStat(data) mdstat, err := parseMDStat(data)
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err) return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
} }
return mdstat, nil return mdstat, nil
} }
@ -90,13 +90,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
deviceFields := strings.Fields(line) deviceFields := strings.Fields(line)
if len(deviceFields) < 3 { if len(deviceFields) < 3 {
return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line)
} }
mdName := deviceFields[0] // mdx mdName := deviceFields[0] // mdx
state := deviceFields[2] // active or inactive state := deviceFields[2] // active or inactive
if len(lines) <= i+3 { if len(lines) <= i+3 {
return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName) return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName)
} }
// Failed disks have the suffix (F) & Spare disks have the suffix (S). // Failed disks have the suffix (F) & Spare disks have the suffix (S).
@ -105,7 +105,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing md device lines: %w", err) return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
} }
syncLineIdx := i + 2 syncLineIdx := i + 2
@ -140,7 +140,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
} else { } else {
syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err) return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
} }
} }
} }
@ -168,13 +168,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
statusFields := strings.Fields(statusLine) statusFields := strings.Fields(statusLine)
if len(statusFields) < 1 { if len(statusFields) < 1 {
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q", statusLine) return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
} }
sizeStr := statusFields[0] sizeStr := statusFields[0]
size, err = strconv.ParseInt(sizeStr, 10, 64) size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
} }
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
@ -189,17 +189,17 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in
matches := statusLineRE.FindStringSubmatch(statusLine) matches := statusLineRE.FindStringSubmatch(statusLine)
if len(matches) != 5 { if len(matches) != 5 {
return 0, 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
} }
total, err = strconv.ParseInt(matches[2], 10, 64) total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
} }
active, err = strconv.ParseInt(matches[3], 10, 64) active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err)
} }
down = int64(strings.Count(matches[4], "_")) down = int64(strings.Count(matches[4], "_"))
@ -209,42 +209,42 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) { func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) {
matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 { if len(matches) != 2 {
return 0, 0, 0, 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err)
} }
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err) return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err)
} }
// Get percentage complete // Get percentage complete
matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 { if len(matches) != 2 {
return syncedBlocks, 0, 0, 0, fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLine) return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
} }
pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
if err != nil { if err != nil {
return syncedBlocks, 0, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
} }
// Get time expected left to complete // Get time expected left to complete
matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 { if len(matches) != 2 {
return syncedBlocks, pct, 0, 0, fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLine) return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
} }
finish, err = strconv.ParseFloat(matches[1], 64) finish, err = strconv.ParseFloat(matches[1], 64)
if err != nil { if err != nil {
return syncedBlocks, pct, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
} }
// Get recovery speed // Get recovery speed
matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 { if len(matches) != 2 {
return syncedBlocks, pct, finish, 0, fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLine) return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
} }
speed, err = strconv.ParseFloat(matches[1], 64) speed, err = strconv.ParseFloat(matches[1], 64)
if err != nil { if err != nil {
return syncedBlocks, pct, finish, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
} }
return syncedBlocks, pct, finish, speed, nil return syncedBlocks, pct, finish, speed, nil

View File

@ -152,7 +152,7 @@ func (fs FS) Meminfo() (Meminfo, error) {
m, err := parseMemInfo(bytes.NewReader(b)) m, err := parseMemInfo(bytes.NewReader(b))
if err != nil { if err != nil {
return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err) return Meminfo{}, fmt.Errorf("%s: %w", ErrFileParse, err)
} }
return *m, nil return *m, nil
@ -165,7 +165,7 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
// Each line has at least a name and value; we ignore the unit. // Each line has at least a name and value; we ignore the unit.
fields := strings.Fields(s.Text()) fields := strings.Fields(s.Text())
if len(fields) < 2 { if len(fields) < 2 {
return nil, fmt.Errorf("malformed meminfo line: %q", s.Text()) return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
} }
v, err := strconv.ParseUint(fields[1], 0, 64) v, err := strconv.ParseUint(fields[1], 0, 64)

View File

@ -78,11 +78,11 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
mountInfo := strings.Split(mountString, " ") mountInfo := strings.Split(mountString, " ")
mountInfoLength := len(mountInfo) mountInfoLength := len(mountInfo)
if mountInfoLength < 10 { if mountInfoLength < 10 {
return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString)
} }
if mountInfo[mountInfoLength-4] != "-" { if mountInfo[mountInfoLength-4] != "-" {
return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4]) return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4])
} }
mount := &MountInfo{ mount := &MountInfo{
@ -98,18 +98,18 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
mount.MountID, err = strconv.Atoi(mountInfo[0]) mount.MountID, err = strconv.Atoi(mountInfo[0])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse mount ID") return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID)
} }
mount.ParentID, err = strconv.Atoi(mountInfo[1]) mount.ParentID, err = strconv.Atoi(mountInfo[1])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse parent ID") return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID)
} }
// Has optional fields, which is a space separated list of values. // Has optional fields, which is a space separated list of values.
// Example: shared:2 master:7 // Example: shared:2 master:7
if mountInfo[6] != "" { if mountInfo[6] != "" {
mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("%s: %w", ErrFileParse, err)
} }
} }
return mount, nil return mount, nil

View File

@ -44,6 +44,14 @@ const (
fieldTransport11TCPLen = 13 fieldTransport11TCPLen = 13
fieldTransport11UDPLen = 10 fieldTransport11UDPLen = 10
// kernel version >= 4.14 MaxLen
// See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
fieldTransport11RDMAMaxLen = 28
// kernel version <= 4.2 MinLen
// See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
fieldTransport11RDMAMinLen = 20
) )
// A Mount is a device mount parsed from /proc/[pid]/mountstats. // A Mount is a device mount parsed from /proc/[pid]/mountstats.
@ -186,6 +194,8 @@ type NFSOperationStats struct {
CumulativeTotalResponseMilliseconds uint64 CumulativeTotalResponseMilliseconds uint64
// Duration from when a request was enqueued to when it was completely handled. // Duration from when a request was enqueued to when it was completely handled.
CumulativeTotalRequestMilliseconds uint64 CumulativeTotalRequestMilliseconds uint64
// The average time from the point the client sends RPC requests until it receives the response.
AverageRTTMilliseconds float64
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
Errors uint64 Errors uint64
} }
@ -231,6 +241,33 @@ type NFSTransportStats struct {
// A running counter, incremented on each request as the current size of the // A running counter, incremented on each request as the current size of the
// pending queue. // pending queue.
CumulativePendingQueue uint64 CumulativePendingQueue uint64
// Stats below only available with stat version 1.1.
// Transport over RDMA
// accessed when sending a call
ReadChunkCount uint64
WriteChunkCount uint64
ReplyChunkCount uint64
TotalRdmaRequest uint64
// rarely accessed error counters
PullupCopyCount uint64
HardwayRegisterCount uint64
FailedMarshalCount uint64
BadReplyCount uint64
MrsRecovered uint64
MrsOrphaned uint64
MrsAllocated uint64
EmptySendctxQ uint64
// accessed when receiving a reply
TotalRdmaReply uint64
FixupCopyCount uint64
ReplyWaitsForSend uint64
LocalInvNeeded uint64
NomsgCallCount uint64
BcallCount uint64
} }
// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice // parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
@ -264,7 +301,7 @@ func parseMountStats(r io.Reader) ([]*Mount, error) {
if len(ss) > deviceEntryLen { if len(ss) > deviceEntryLen {
// Only NFSv3 and v4 are supported for parsing statistics // Only NFSv3 and v4 are supported for parsing statistics
if m.Type != nfs3Type && m.Type != nfs4Type { if m.Type != nfs3Type && m.Type != nfs4Type {
return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type)
} }
statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
@ -288,7 +325,7 @@ func parseMountStats(r io.Reader) ([]*Mount, error) {
// device [device] mounted on [mount] with fstype [type] // device [device] mounted on [mount] with fstype [type]
func parseMount(ss []string) (*Mount, error) { func parseMount(ss []string) (*Mount, error) {
if len(ss) < deviceEntryLen { if len(ss) < deviceEntryLen {
return nil, fmt.Errorf("invalid device entry: %v", ss) return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
} }
// Check for specific words appearing at specific indices to ensure // Check for specific words appearing at specific indices to ensure
@ -306,7 +343,7 @@ func parseMount(ss []string) (*Mount, error) {
for _, f := range format { for _, f := range format {
if ss[f.i] != f.s { if ss[f.i] != f.s {
return nil, fmt.Errorf("invalid device entry: %v", ss) return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
} }
} }
@ -343,7 +380,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
switch ss[0] { switch ss[0] {
case fieldOpts: case fieldOpts:
if len(ss) < 2 { if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
} }
if stats.Opts == nil { if stats.Opts == nil {
stats.Opts = map[string]string{} stats.Opts = map[string]string{}
@ -358,7 +395,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
} }
case fieldAge: case fieldAge:
if len(ss) < 2 { if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
} }
// Age integer is in seconds // Age integer is in seconds
d, err := time.ParseDuration(ss[1] + "s") d, err := time.ParseDuration(ss[1] + "s")
@ -369,7 +406,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Age = d stats.Age = d
case fieldBytes: case fieldBytes:
if len(ss) < 2 { if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
} }
bstats, err := parseNFSBytesStats(ss[1:]) bstats, err := parseNFSBytesStats(ss[1:])
if err != nil { if err != nil {
@ -379,7 +416,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Bytes = *bstats stats.Bytes = *bstats
case fieldEvents: case fieldEvents:
if len(ss) < 2 { if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss)
} }
estats, err := parseNFSEventsStats(ss[1:]) estats, err := parseNFSEventsStats(ss[1:])
if err != nil { if err != nil {
@ -389,7 +426,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Events = *estats stats.Events = *estats
case fieldTransport: case fieldTransport:
if len(ss) < 3 { if len(ss) < 3 {
return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss)
} }
tstats, err := parseNFSTransportStats(ss[1:], statVersion) tstats, err := parseNFSTransportStats(ss[1:], statVersion)
@ -428,7 +465,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
// integer fields. // integer fields.
func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
if len(ss) != fieldBytesLen { if len(ss) != fieldBytesLen {
return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss)
} }
ns := make([]uint64, 0, fieldBytesLen) ns := make([]uint64, 0, fieldBytesLen)
@ -457,7 +494,7 @@ func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
// integer fields. // integer fields.
func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
if len(ss) != fieldEventsLen { if len(ss) != fieldEventsLen {
return nil, fmt.Errorf("invalid NFS events stats: %v", ss) return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss)
} }
ns := make([]uint64, 0, fieldEventsLen) ns := make([]uint64, 0, fieldEventsLen)
@ -521,7 +558,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
} }
if len(ss) < minFields { if len(ss) < minFields {
return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss)
} }
// Skip string operation name for integers // Skip string operation name for integers
@ -534,7 +571,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
ns = append(ns, n) ns = append(ns, n)
} }
opStats := NFSOperationStats{ opStats := NFSOperationStats{
Operation: strings.TrimSuffix(ss[0], ":"), Operation: strings.TrimSuffix(ss[0], ":"),
Requests: ns[0], Requests: ns[0],
@ -546,6 +582,9 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
CumulativeTotalResponseMilliseconds: ns[6], CumulativeTotalResponseMilliseconds: ns[6],
CumulativeTotalRequestMilliseconds: ns[7], CumulativeTotalRequestMilliseconds: ns[7],
} }
if ns[0] != 0 {
opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0])
}
if len(ns) > 8 { if len(ns) > 8 {
opStats.Errors = ns[8] opStats.Errors = ns[8]
@ -572,10 +611,10 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
} else if protocol == "udp" { } else if protocol == "udp" {
expectedLength = fieldTransport10UDPLen expectedLength = fieldTransport10UDPLen
} else { } else {
return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
} }
if len(ss) != expectedLength { if len(ss) != expectedLength {
return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss)
} }
case statVersion11: case statVersion11:
var expectedLength int var expectedLength int
@ -583,14 +622,17 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
expectedLength = fieldTransport11TCPLen expectedLength = fieldTransport11TCPLen
} else if protocol == "udp" { } else if protocol == "udp" {
expectedLength = fieldTransport11UDPLen expectedLength = fieldTransport11UDPLen
} else if protocol == "rdma" {
expectedLength = fieldTransport11RDMAMinLen
} else { } else {
return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
} }
if len(ss) != expectedLength { if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) (protocol == "rdma" && len(ss) < expectedLength) {
return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
} }
default: default:
return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
} }
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
@ -600,7 +642,9 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
// Note: slice length must be set to length of v1.1 stats to avoid a panic when // Note: slice length must be set to length of v1.1 stats to avoid a panic when
// only v1.0 stats are present. // only v1.0 stats are present.
// See: https://github.com/prometheus/node_exporter/issues/571. // See: https://github.com/prometheus/node_exporter/issues/571.
ns := make([]uint64, fieldTransport11TCPLen) //
// Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen
ns := make([]uint64, fieldTransport11RDMAMaxLen+3)
for i, s := range ss { for i, s := range ss {
n, err := strconv.ParseUint(s, 10, 64) n, err := strconv.ParseUint(s, 10, 64)
if err != nil { if err != nil {
@ -618,9 +662,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
// we set them to 0 here. // we set them to 0 here.
if protocol == "udp" { if protocol == "udp" {
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
} else if protocol == "tcp" {
ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
} else if protocol == "rdma" {
ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
} }
return &NFSTransportStats{ return &NFSTransportStats{
// NFS xprt over tcp or udp
Protocol: protocol, Protocol: protocol,
Port: ns[0], Port: ns[0],
Bind: ns[1], Bind: ns[1],
@ -632,8 +681,32 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
BadTransactionIDs: ns[7], BadTransactionIDs: ns[7],
CumulativeActiveRequests: ns[8], CumulativeActiveRequests: ns[8],
CumulativeBacklog: ns[9], CumulativeBacklog: ns[9],
MaximumRPCSlotsUsed: ns[10],
CumulativeSendingQueue: ns[11], // NFS xprt over tcp or udp
CumulativePendingQueue: ns[12], // And statVersion 1.1
MaximumRPCSlotsUsed: ns[10],
CumulativeSendingQueue: ns[11],
CumulativePendingQueue: ns[12],
// NFS xprt over rdma
// And stat Version 1.1
ReadChunkCount: ns[13],
WriteChunkCount: ns[14],
ReplyChunkCount: ns[15],
TotalRdmaRequest: ns[16],
PullupCopyCount: ns[17],
HardwayRegisterCount: ns[18],
FailedMarshalCount: ns[19],
BadReplyCount: ns[20],
MrsRecovered: ns[21],
MrsOrphaned: ns[22],
MrsAllocated: ns[23],
EmptySendctxQ: ns[24],
TotalRdmaReply: ns[25],
FixupCopyCount: ns[26],
ReplyWaitsForSend: ns[27],
LocalInvNeeded: ns[28],
NomsgCallCount: ns[29],
BcallCount: ns[30],
}, nil }, nil
} }

View File

@ -18,7 +18,6 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"io" "io"
"strconv"
"strings" "strings"
"github.com/prometheus/procfs/internal/util" "github.com/prometheus/procfs/internal/util"
@ -28,9 +27,13 @@ import (
// and contains netfilter conntrack statistics at one CPU core. // and contains netfilter conntrack statistics at one CPU core.
type ConntrackStatEntry struct { type ConntrackStatEntry struct {
Entries uint64 Entries uint64
Searched uint64
Found uint64 Found uint64
New uint64
Invalid uint64 Invalid uint64
Ignore uint64 Ignore uint64
Delete uint64
DeleteList uint64
Insert uint64 Insert uint64
InsertFailed uint64 InsertFailed uint64
Drop uint64 Drop uint64
@ -55,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
stat, err := parseConntrackStat(bytes.NewReader(b)) stat, err := parseConntrackStat(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err) return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, path, err)
} }
return stat, nil return stat, nil
@ -81,73 +84,35 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
// Parses a ConntrackStatEntry from given array of fields. // Parses a ConntrackStatEntry from given array of fields.
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
if len(fields) != 17 { entries, err := util.ParseHexUint64s(fields)
return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
}
entry := &ConntrackStatEntry{}
entries, err := parseConntrackStatField(fields[0])
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("%s: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
} }
entry.Entries = entries numEntries := len(entries)
if numEntries < 16 || numEntries > 17 {
found, err := parseConntrackStatField(fields[2]) return nil,
if err != nil { fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries)
return nil, err
} }
entry.Found = found
invalid, err := parseConntrackStatField(fields[4]) stats := &ConntrackStatEntry{
if err != nil { Entries: *entries[0],
return nil, err Searched: *entries[1],
Found: *entries[2],
New: *entries[3],
Invalid: *entries[4],
Ignore: *entries[5],
Delete: *entries[6],
DeleteList: *entries[7],
Insert: *entries[8],
InsertFailed: *entries[9],
Drop: *entries[10],
EarlyDrop: *entries[11],
} }
entry.Invalid = invalid
ignore, err := parseConntrackStatField(fields[5]) // Ignore missing search_restart on Linux < 2.6.35.
if err != nil { if numEntries == 17 {
return nil, err stats.SearchRestart = *entries[16]
} }
entry.Ignore = ignore
insert, err := parseConntrackStatField(fields[8]) return stats, nil
if err != nil {
return nil, err
}
entry.Insert = insert
insertFailed, err := parseConntrackStatField(fields[9])
if err != nil {
return nil, err
}
entry.InsertFailed = insertFailed
drop, err := parseConntrackStatField(fields[10])
if err != nil {
return nil, err
}
entry.Drop = drop
earlyDrop, err := parseConntrackStatField(fields[11])
if err != nil {
return nil, err
}
entry.EarlyDrop = earlyDrop
searchRestart, err := parseConntrackStatField(fields[16])
if err != nil {
return nil, err
}
entry.SearchRestart = searchRestart
return entry, nil
}
// Parses a uint64 from given hex in string.
func parseConntrackStatField(field string) (uint64, error) {
val, err := strconv.ParseUint(field, 16, 64)
if err != nil {
return 0, fmt.Errorf("couldn't parse %q field: %w", field, err)
}
return val, err
} }

View File

@ -130,7 +130,7 @@ func parseIP(hexIP string) (net.IP, error) {
var byteIP []byte var byteIP []byte
byteIP, err := hex.DecodeString(hexIP) byteIP, err := hex.DecodeString(hexIP)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP) return nil, fmt.Errorf("%s: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
} }
switch len(byteIP) { switch len(byteIP) {
case 4: case 4:
@ -144,7 +144,7 @@ func parseIP(hexIP string) (net.IP, error) {
} }
return i, nil return i, nil
default: default:
return nil, fmt.Errorf("Unable to parse IP %s", hexIP) return nil, fmt.Errorf("%s: Unable to parse IP %s: %w", ErrFileParse, hexIP, nil)
} }
} }
@ -153,7 +153,8 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
line := &netIPSocketLine{} line := &netIPSocketLine{}
if len(fields) < 10 { if len(fields) < 10 {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"cannot parse net socket line as it has less then 10 columns %q", "%w: Less than 10 columns found %q",
ErrFileParse,
strings.Join(fields, " "), strings.Join(fields, " "),
) )
} }
@ -162,64 +163,65 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
// sl // sl
s := strings.Split(fields[0], ":") s := strings.Split(fields[0], ":")
if len(s) != 2 { if len(s) != 2 {
return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0]) return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0])
} }
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err) return nil, fmt.Errorf("%s: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
} }
// local_address // local_address
l := strings.Split(fields[1], ":") l := strings.Split(fields[1], ":")
if len(l) != 2 { if len(l) != 2 {
return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1]) return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1])
} }
if line.LocalAddr, err = parseIP(l[0]); err != nil { if line.LocalAddr, err = parseIP(l[0]); err != nil {
return nil, err return nil, err
} }
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err) return nil, fmt.Errorf("%s: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
} }
// remote_address // remote_address
r := strings.Split(fields[2], ":") r := strings.Split(fields[2], ":")
if len(r) != 2 { if len(r) != 2 {
return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1]) return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1])
} }
if line.RemAddr, err = parseIP(r[0]); err != nil { if line.RemAddr, err = parseIP(r[0]); err != nil {
return nil, err return nil, err
} }
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err) return nil, fmt.Errorf("%s: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
} }
// st // st
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse st value in socket line: %w", err) return nil, fmt.Errorf("%s: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
} }
// tx_queue and rx_queue // tx_queue and rx_queue
q := strings.Split(fields[4], ":") q := strings.Split(fields[4], ":")
if len(q) != 2 { if len(q) != 2 {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"cannot parse tx/rx queues in socket line as it has a missing colon %q", "%w: Missing colon for tx/rx queues in socket line %q",
ErrFileParse,
fields[4], fields[4],
) )
} }
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err) return nil, fmt.Errorf("%s: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
} }
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err) return nil, fmt.Errorf("%s: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
} }
// uid // uid
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err) return nil, fmt.Errorf("%s: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
} }
// inode // inode
if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse inode value in socket line: %w", err) return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
} }
return line, nil return line, nil

View File

@ -131,7 +131,7 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro
} else if fields[6] == disabled { } else if fields[6] == disabled {
line.Slab = false line.Slab = false
} else { } else {
return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name) return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
} }
line.ModuleName = fields[7] line.ModuleName = fields[7]
@ -173,7 +173,7 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro
} else if capabilities[i] == "n" { } else if capabilities[i] == "n" {
*capabilityFields[i] = false *capabilityFields[i] = false
} else { } else {
return fmt.Errorf("unable to parse capability block for protocol: position %d", i) return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
} }
} }
return nil return nil

143
vendor/github.com/prometheus/procfs/net_route.go generated vendored Normal file
View File

@ -0,0 +1,143 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
const (
blackholeRepresentation string = "*"
blackholeIfaceName string = "blackhole"
routeLineColumns int = 11
)
// A NetRouteLine represents one line from net/route.
type NetRouteLine struct {
Iface string
Destination uint32
Gateway uint32
Flags uint32
RefCnt uint32
Use uint32
Metric uint32
Mask uint32
MTU uint32
Window uint32
IRTT uint32
}
func (fs FS) NetRoute() ([]NetRouteLine, error) {
return readNetRoute(fs.proc.Path("net", "route"))
}
func readNetRoute(path string) ([]NetRouteLine, error) {
b, err := util.ReadFileNoStat(path)
if err != nil {
return nil, err
}
routelines, err := parseNetRoute(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to read net route from %s: %w", path, err)
}
return routelines, nil
}
func parseNetRoute(r io.Reader) ([]NetRouteLine, error) {
var routelines []NetRouteLine
scanner := bufio.NewScanner(r)
scanner.Scan()
for scanner.Scan() {
fields := strings.Fields(scanner.Text())
routeline, err := parseNetRouteLine(fields)
if err != nil {
return nil, err
}
routelines = append(routelines, *routeline)
}
return routelines, nil
}
func parseNetRouteLine(fields []string) (*NetRouteLine, error) {
if len(fields) != routeLineColumns {
return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields))
}
iface := fields[0]
if iface == blackholeRepresentation {
iface = blackholeIfaceName
}
destination, err := strconv.ParseUint(fields[1], 16, 32)
if err != nil {
return nil, err
}
gateway, err := strconv.ParseUint(fields[2], 16, 32)
if err != nil {
return nil, err
}
flags, err := strconv.ParseUint(fields[3], 10, 32)
if err != nil {
return nil, err
}
refcnt, err := strconv.ParseUint(fields[4], 10, 32)
if err != nil {
return nil, err
}
use, err := strconv.ParseUint(fields[5], 10, 32)
if err != nil {
return nil, err
}
metric, err := strconv.ParseUint(fields[6], 10, 32)
if err != nil {
return nil, err
}
mask, err := strconv.ParseUint(fields[7], 16, 32)
if err != nil {
return nil, err
}
mtu, err := strconv.ParseUint(fields[8], 10, 32)
if err != nil {
return nil, err
}
window, err := strconv.ParseUint(fields[9], 10, 32)
if err != nil {
return nil, err
}
irtt, err := strconv.ParseUint(fields[10], 10, 32)
if err != nil {
return nil, err
}
routeline := &NetRouteLine{
Iface: iface,
Destination: uint32(destination),
Gateway: uint32(gateway),
Flags: uint32(flags),
RefCnt: uint32(refcnt),
Use: uint32(use),
Metric: uint32(metric),
Mask: uint32(mask),
MTU: uint32(mtu),
Window: uint32(window),
IRTT: uint32(irtt),
}
return routeline, nil
}

View File

@ -16,7 +16,6 @@ package procfs
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"errors"
"fmt" "fmt"
"io" "io"
"strings" "strings"
@ -70,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) {
stat, err := parseSockstat(bytes.NewReader(b)) stat, err := parseSockstat(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err) return nil, fmt.Errorf("%s: sockstats from %q: %w", ErrFileRead, name, err)
} }
return stat, nil return stat, nil
@ -84,13 +83,13 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
// Expect a minimum of a protocol and one key/value pair. // Expect a minimum of a protocol and one key/value pair.
fields := strings.Split(s.Text(), " ") fields := strings.Split(s.Text(), " ")
if len(fields) < 3 { if len(fields) < 3 {
return nil, fmt.Errorf("malformed sockstat line: %q", s.Text()) return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text())
} }
// The remaining fields are key/value pairs. // The remaining fields are key/value pairs.
kvs, err := parseSockstatKVs(fields[1:]) kvs, err := parseSockstatKVs(fields[1:])
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err) return nil, fmt.Errorf("%s: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
} }
// The first field is the protocol. We must trim its colon suffix. // The first field is the protocol. We must trim its colon suffix.
@ -119,7 +118,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
// parseSockstatKVs parses a string slice into a map of key/value pairs. // parseSockstatKVs parses a string slice into a map of key/value pairs.
func parseSockstatKVs(kvs []string) (map[string]int, error) { func parseSockstatKVs(kvs []string) (map[string]int, error) {
if len(kvs)%2 != 0 { if len(kvs)%2 != 0 {
return nil, errors.New("odd number of fields in key/value pairs") return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs)
} }
// Iterate two values at a time to gather key/value pairs. // Iterate two values at a time to gather key/value pairs.

View File

@ -64,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
entries, err := parseSoftnet(bytes.NewReader(b)) entries, err := parseSoftnet(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err) return nil, fmt.Errorf("%s: /proc/net/softnet_stat: %w", ErrFileParse, err)
} }
return entries, nil return entries, nil
@ -76,13 +76,14 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
s := bufio.NewScanner(r) s := bufio.NewScanner(r)
var stats []SoftnetStat var stats []SoftnetStat
cpuIndex := 0
for s.Scan() { for s.Scan() {
columns := strings.Fields(s.Text()) columns := strings.Fields(s.Text())
width := len(columns) width := len(columns)
softnetStat := SoftnetStat{} softnetStat := SoftnetStat{}
if width < minColumns { if width < minColumns {
return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns)
} }
// Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347 // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347
@ -127,9 +128,13 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
softnetStat.SoftnetBacklogLen = us[0] softnetStat.SoftnetBacklogLen = us[0]
softnetStat.Index = us[1] softnetStat.Index = us[1]
} else {
// For older kernels, create the Index based on the scan line number.
softnetStat.Index = uint32(cpuIndex)
} }
softnetStat.Width = width softnetStat.Width = width
stats = append(stats, softnetStat) stats = append(stats, softnetStat)
cpuIndex++
} }
return stats, nil return stats, nil

View File

@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
line := s.Text() line := s.Text()
item, err := nu.parseLine(line, hasInode, minFields) item, err := nu.parseLine(line, hasInode, minFields)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err) return nil, fmt.Errorf("%s: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
} }
nu.Rows = append(nu.Rows, item) nu.Rows = append(nu.Rows, item)
} }
if err := s.Err(); err != nil { if err := s.Err(); err != nil {
return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err) return nil, fmt.Errorf("%s: /proc/net/unix encountered data: %w", ErrFileParse, err)
} }
return &nu, nil return &nu, nil
@ -126,7 +126,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
l := len(fields) l := len(fields)
if l < min { if l < min {
return nil, fmt.Errorf("expected at least %d fields but got %d", min, l) return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l)
} }
// Field offsets are as follows: // Field offsets are as follows:
@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
users, err := u.parseUsers(fields[1]) users, err := u.parseUsers(fields[1])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err) return nil, fmt.Errorf("%s: ref count %q: %w", ErrFileParse, fields[1], err)
} }
flags, err := u.parseFlags(fields[3]) flags, err := u.parseFlags(fields[3])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err) return nil, fmt.Errorf("%s: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
} }
typ, err := u.parseType(fields[4]) typ, err := u.parseType(fields[4])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err) return nil, fmt.Errorf("%s: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
} }
state, err := u.parseState(fields[5]) state, err := u.parseState(fields[5])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err) return nil, fmt.Errorf("%s: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
} }
var inode uint64 var inode uint64
if hasInode { if hasInode {
inode, err = u.parseInode(fields[6]) inode, err = u.parseInode(fields[6])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err) return nil, fmt.Errorf("%s failed to parse inode %q: %w", ErrFileParse, fields[6], err)
} }
} }

182
vendor/github.com/prometheus/procfs/net_wireless.go generated vendored Normal file
View File

@ -0,0 +1,182 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Wireless models the content of /proc/net/wireless.
type Wireless struct {
Name string
// Status is the current 4-digit hex value status of the interface.
Status uint64
// QualityLink is the link quality.
QualityLink int
// QualityLevel is the signal gain (dBm).
QualityLevel int
// QualityNoise is the signal noise baseline (dBm).
QualityNoise int
// DiscardedNwid is the number of discarded packets with wrong nwid/essid.
DiscardedNwid int
// DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP).
DiscardedCrypt int
// DiscardedFrag is the number of discarded packets that can't perform MAC reassembly.
DiscardedFrag int
// DiscardedRetry is the number of discarded packets that reached max MAC retries.
DiscardedRetry int
// DiscardedMisc is the number of discarded packets for other reasons.
DiscardedMisc int
// MissedBeacon is the number of missed beacons/superframe.
MissedBeacon int
}
// Wireless returns kernel wireless statistics.
func (fs FS) Wireless() ([]*Wireless, error) {
b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless"))
if err != nil {
return nil, err
}
m, err := parseWireless(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("%s: wireless: %w", ErrFileParse, err)
}
return m, nil
}
// parseWireless parses the contents of /proc/net/wireless.
/*
Inter-| sta-| Quality | Discarded packets | Missed | WE
face | tus | link level noise | nwid crypt frag retry misc | beacon | 22
eth1: 0000 5. -256. -10. 0 1 0 3 0 0
eth2: 0000 5. -256. -20. 0 2 0 4 0 0
*/
func parseWireless(r io.Reader) ([]*Wireless, error) {
var (
interfaces []*Wireless
scanner = bufio.NewScanner(r)
)
for n := 0; scanner.Scan(); n++ {
// Skip the 2 header lines.
if n < 2 {
continue
}
line := scanner.Text()
parts := strings.Split(line, ":")
if len(parts) != 2 {
return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line)
}
name := strings.TrimSpace(parts[0])
stats := strings.Fields(parts[1])
if len(stats) < 10 {
return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line)
}
status, err := strconv.ParseUint(stats[0], 16, 16)
if err != nil {
return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line)
}
qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
if err != nil {
return nil, fmt.Errorf("%s: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
}
qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
if err != nil {
return nil, fmt.Errorf("%s: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
}
qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
if err != nil {
return nil, fmt.Errorf("%s: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
}
dnwid, err := strconv.Atoi(stats[4])
if err != nil {
return nil, fmt.Errorf("%s: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
}
dcrypt, err := strconv.Atoi(stats[5])
if err != nil {
return nil, fmt.Errorf("%s: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
}
dfrag, err := strconv.Atoi(stats[6])
if err != nil {
return nil, fmt.Errorf("%s: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
}
dretry, err := strconv.Atoi(stats[7])
if err != nil {
return nil, fmt.Errorf("%s: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
}
dmisc, err := strconv.Atoi(stats[8])
if err != nil {
return nil, fmt.Errorf("%s: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
}
mbeacon, err := strconv.Atoi(stats[9])
if err != nil {
return nil, fmt.Errorf("%s: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
}
w := &Wireless{
Name: name,
Status: status,
QualityLink: qlink,
QualityLevel: qlevel,
QualityNoise: qnoise,
DiscardedNwid: dnwid,
DiscardedCrypt: dcrypt,
DiscardedFrag: dfrag,
DiscardedRetry: dretry,
DiscardedMisc: dmisc,
MissedBeacon: mbeacon,
}
interfaces = append(interfaces, w)
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("%s: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
}
return interfaces, nil
}

View File

@ -115,7 +115,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) {
fields := strings.Fields(s.Text()) fields := strings.Fields(s.Text())
if len(fields) != 2 { if len(fields) != 2 {
return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text()) return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
} }
name := fields[0] name := fields[0]

View File

@ -15,7 +15,6 @@ package procfs
import ( import (
"bufio" "bufio"
"io"
"os" "os"
"path/filepath" "path/filepath"
"strconv" "strconv"
@ -38,12 +37,7 @@ func (fs FS) NetStat() ([]NetStat, error) {
var netStatsTotal []NetStat var netStatsTotal []NetStat
for _, filePath := range statFiles { for _, filePath := range statFiles {
file, err := os.Open(filePath) procNetstat, err := parseNetstat(filePath)
if err != nil {
return nil, err
}
procNetstat, err := parseNetstat(file)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -56,14 +50,17 @@ func (fs FS) NetStat() ([]NetStat, error) {
// parseNetstat parses the metrics from `/proc/net/stat/` file // parseNetstat parses the metrics from `/proc/net/stat/` file
// and returns a NetStat structure. // and returns a NetStat structure.
func parseNetstat(r io.Reader) (NetStat, error) { func parseNetstat(filePath string) (NetStat, error) {
var ( netStat := NetStat{
scanner = bufio.NewScanner(r) Stats: make(map[string][]uint64),
netStat = NetStat{ }
Stats: make(map[string][]uint64), file, err := os.Open(filePath)
} if err != nil {
) return netStat, err
}
defer file.Close()
scanner := bufio.NewScanner(file)
scanner.Scan() scanner.Scan()
// First string is always a header for stats // First string is always a header for stats

View File

@ -15,13 +15,13 @@ package procfs
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"io" "io"
"os" "os"
"strconv" "strconv"
"strings" "strings"
"github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util" "github.com/prometheus/procfs/internal/util"
) )
@ -30,12 +30,18 @@ type Proc struct {
// The process ID. // The process ID.
PID int PID int
fs fs.FS fs FS
} }
// Procs represents a list of Proc structs. // Procs represents a list of Proc structs.
type Procs []Proc type Procs []Proc
var (
ErrFileParse = errors.New("Error Parsing File")
ErrFileRead = errors.New("Error Reading File")
ErrMountPoint = errors.New("Error Accessing Mount point")
)
func (p Procs) Len() int { return len(p) } func (p Procs) Len() int { return len(p) }
func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
@ -43,7 +49,7 @@ func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
// Self returns a process for the current process read via /proc/self. // Self returns a process for the current process read via /proc/self.
func Self() (Proc, error) { func Self() (Proc, error) {
fs, err := NewFS(DefaultMountPoint) fs, err := NewFS(DefaultMountPoint)
if err != nil { if err != nil || errors.Unwrap(err) == ErrMountPoint {
return Proc{}, err return Proc{}, err
} }
return fs.Self() return fs.Self()
@ -92,7 +98,7 @@ func (fs FS) Proc(pid int) (Proc, error) {
if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
return Proc{}, err return Proc{}, err
} }
return Proc{PID: pid, fs: fs.proc}, nil return Proc{PID: pid, fs: fs}, nil
} }
// AllProcs returns a list of all currently available processes. // AllProcs returns a list of all currently available processes.
@ -105,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) {
names, err := d.Readdirnames(-1) names, err := d.Readdirnames(-1)
if err != nil { if err != nil {
return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) return Procs{}, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err)
} }
p := Procs{} p := Procs{}
@ -114,7 +120,7 @@ func (fs FS) AllProcs() (Procs, error) {
if err != nil { if err != nil {
continue continue
} }
p = append(p, Proc{PID: int(pid), fs: fs.proc}) p = append(p, Proc{PID: int(pid), fs: fs})
} }
return p, nil return p, nil
@ -206,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) {
for i, n := range names { for i, n := range names {
fd, err := strconv.ParseInt(n, 10, 32) fd, err := strconv.ParseInt(n, 10, 32)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not parse fd %q: %w", n, err) return nil, fmt.Errorf("%s: Cannot parse line: %v: %w", ErrFileParse, i, err)
} }
fds[i] = uintptr(fd) fds[i] = uintptr(fd)
} }
@ -237,6 +243,19 @@ func (p Proc) FileDescriptorTargets() ([]string, error) {
// FileDescriptorsLen returns the number of currently open file descriptors of // FileDescriptorsLen returns the number of currently open file descriptors of
// a process. // a process.
func (p Proc) FileDescriptorsLen() (int, error) { func (p Proc) FileDescriptorsLen() (int, error) {
// Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901
if p.fs.isReal {
stat, err := os.Stat(p.path("fd"))
if err != nil {
return 0, err
}
size := stat.Size()
if size > 0 {
return int(size), nil
}
}
fds, err := p.fileDescriptors() fds, err := p.fileDescriptors()
if err != nil { if err != nil {
return 0, err return 0, err
@ -278,14 +297,14 @@ func (p Proc) fileDescriptors() ([]string, error) {
names, err := d.Readdirnames(-1) names, err := d.Readdirnames(-1)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not read %q: %w", d.Name(), err) return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err)
} }
return names, nil return names, nil
} }
func (p Proc) path(pa ...string) string { func (p Proc) path(pa ...string) string {
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
} }
// FileDescriptorsInfo retrieves information about all file descriptors of // FileDescriptorsInfo retrieves information about all file descriptors of

View File

@ -51,7 +51,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) {
fields := strings.SplitN(cgroupStr, ":", 3) fields := strings.SplitN(cgroupStr, ":", 3)
if len(fields) < 3 { if len(fields) < 3 {
return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr) return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr)
} }
cgroup := &Cgroup{ cgroup := &Cgroup{
@ -60,7 +60,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) {
} }
cgroup.HierarchyID, err = strconv.Atoi(fields[0]) cgroup.HierarchyID, err = strconv.Atoi(fields[0])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse hierarchy ID") return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID)
} }
if fields[1] != "" { if fields[1] != "" {
ssNames := strings.Split(fields[1], ",") ssNames := strings.Split(fields[1], ",")

View File

@ -46,7 +46,7 @@ func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) {
fields := strings.Fields(CgroupSummaryStr) fields := strings.Fields(CgroupSummaryStr)
// require at least 4 fields // require at least 4 fields
if len(fields) < 4 { if len(fields) < 4 {
return nil, fmt.Errorf("at least 4 fields required, found %d fields in cgroup info string: %s", len(fields), CgroupSummaryStr) return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr)
} }
CgroupSummary := &CgroupSummary{ CgroupSummary := &CgroupSummary{
@ -54,15 +54,15 @@ func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) {
} }
CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1]) CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse hierarchy ID") return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1])
} }
CgroupSummary.Cgroups, err = strconv.Atoi(fields[2]) CgroupSummary.Cgroups, err = strconv.Atoi(fields[2])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse Cgroup Num") return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2])
} }
CgroupSummary.Enabled, err = strconv.Atoi(fields[3]) CgroupSummary.Enabled, err = strconv.Atoi(fields[3])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse Enabled") return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3])
} }
return CgroupSummary, nil return CgroupSummary, nil
} }

View File

@ -26,6 +26,7 @@ var (
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
rIno = regexp.MustCompile(`^ino:\s+(\d+)$`)
rInotify = regexp.MustCompile(`^inotify`) rInotify = regexp.MustCompile(`^inotify`)
rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`)
) )
@ -40,6 +41,8 @@ type ProcFDInfo struct {
Flags string Flags string
// Mount point ID // Mount point ID
MntID string MntID string
// Inode number
Ino string
// List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only)
InotifyInfos []InotifyInfo InotifyInfos []InotifyInfo
} }
@ -51,7 +54,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
return nil, err return nil, err
} }
var text, pos, flags, mntid string var text, pos, flags, mntid, ino string
var inotify []InotifyInfo var inotify []InotifyInfo
scanner := bufio.NewScanner(bytes.NewReader(data)) scanner := bufio.NewScanner(bytes.NewReader(data))
@ -63,6 +66,8 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
flags = rFlags.FindStringSubmatch(text)[1] flags = rFlags.FindStringSubmatch(text)[1]
} else if rMntID.MatchString(text) { } else if rMntID.MatchString(text) {
mntid = rMntID.FindStringSubmatch(text)[1] mntid = rMntID.FindStringSubmatch(text)[1]
} else if rIno.MatchString(text) {
ino = rIno.FindStringSubmatch(text)[1]
} else if rInotify.MatchString(text) { } else if rInotify.MatchString(text) {
newInotify, err := parseInotifyInfo(text) newInotify, err := parseInotifyInfo(text)
if err != nil { if err != nil {
@ -77,6 +82,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
Pos: pos, Pos: pos,
Flags: flags, Flags: flags,
MntID: mntid, MntID: mntid,
Ino: ino,
InotifyInfos: inotify, InotifyInfos: inotify,
} }
@ -111,7 +117,7 @@ func parseInotifyInfo(line string) (*InotifyInfo, error) {
} }
return i, nil return i, nil
} }
return nil, fmt.Errorf("invalid inode entry: %q", line) return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line)
} }
// ProcFDInfos represents a list of ProcFDInfo structs. // ProcFDInfos represents a list of ProcFDInfo structs.

View File

@ -66,7 +66,7 @@ func parseInterrupts(r io.Reader) (Interrupts, error) {
continue continue
} }
if len(parts) < 2 { if len(parts) < 2 {
return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts) return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts)
} }
intName := parts[0][:len(parts[0])-1] // remove trailing : intName := parts[0][:len(parts[0])-1] // remove trailing :

View File

@ -103,7 +103,7 @@ func (p Proc) Limits() (ProcLimits, error) {
//fields := limitsMatch.Split(s.Text(), limitsFields) //fields := limitsMatch.Split(s.Text(), limitsFields)
fields := limitsMatch.FindStringSubmatch(s.Text()) fields := limitsMatch.FindStringSubmatch(s.Text())
if len(fields) != limitsFields { if len(fields) != limitsFields {
return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text()) return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text())
} }
switch fields[1] { switch fields[1] {
@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) {
} }
i, err := strconv.ParseUint(s, 10, 64) i, err := strconv.ParseUint(s, 10, 64)
if err != nil { if err != nil {
return 0, fmt.Errorf("couldn't parse value %q: %w", s, err) return 0, fmt.Errorf("%s: couldn't parse value %q: %w", ErrFileParse, s, err)
} }
return i, nil return i, nil
} }

View File

@ -63,17 +63,17 @@ type ProcMap struct {
// parseDevice parses the device token of a line and converts it to a dev_t // parseDevice parses the device token of a line and converts it to a dev_t
// (mkdev) like structure. // (mkdev) like structure.
func parseDevice(s string) (uint64, error) { func parseDevice(s string) (uint64, error) {
toks := strings.Split(s, ":") i := strings.Index(s, ":")
if len(toks) < 2 { if i == -1 {
return 0, fmt.Errorf("unexpected number of fields") return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s)
} }
major, err := strconv.ParseUint(toks[0], 16, 0) major, err := strconv.ParseUint(s[0:i], 16, 0)
if err != nil { if err != nil {
return 0, err return 0, err
} }
minor, err := strconv.ParseUint(toks[1], 16, 0) minor, err := strconv.ParseUint(s[i+1:], 16, 0)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -93,17 +93,17 @@ func parseAddress(s string) (uintptr, error) {
// parseAddresses parses the start-end address. // parseAddresses parses the start-end address.
func parseAddresses(s string) (uintptr, uintptr, error) { func parseAddresses(s string) (uintptr, uintptr, error) {
toks := strings.Split(s, "-") idx := strings.Index(s, "-")
if len(toks) < 2 { if idx == -1 {
return 0, 0, fmt.Errorf("invalid address") return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s)
} }
saddr, err := parseAddress(toks[0]) saddr, err := parseAddress(s[0:idx])
if err != nil { if err != nil {
return 0, 0, err return 0, 0, err
} }
eaddr, err := parseAddress(toks[1]) eaddr, err := parseAddress(s[idx+1:])
if err != nil { if err != nil {
return 0, 0, err return 0, 0, err
} }
@ -114,7 +114,7 @@ func parseAddresses(s string) (uintptr, uintptr, error) {
// parsePermissions parses a token and returns any that are set. // parsePermissions parses a token and returns any that are set.
func parsePermissions(s string) (*ProcMapPermissions, error) { func parsePermissions(s string) (*ProcMapPermissions, error) {
if len(s) < 4 { if len(s) < 4 {
return nil, fmt.Errorf("invalid permissions token") return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse)
} }
perms := ProcMapPermissions{} perms := ProcMapPermissions{}
@ -141,7 +141,7 @@ func parsePermissions(s string) (*ProcMapPermissions, error) {
func parseProcMap(text string) (*ProcMap, error) { func parseProcMap(text string) (*ProcMap, error) {
fields := strings.Fields(text) fields := strings.Fields(text)
if len(fields) < 5 { if len(fields) < 5 {
return nil, fmt.Errorf("truncated procmap entry") return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse)
} }
saddr, eaddr, err := parseAddresses(fields[0]) saddr, eaddr, err := parseAddresses(fields[0])

View File

@ -195,8 +195,8 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
// Remove trailing :. // Remove trailing :.
protocol := strings.TrimSuffix(nameParts[0], ":") protocol := strings.TrimSuffix(nameParts[0], ":")
if len(nameParts) != len(valueParts) { if len(nameParts) != len(valueParts) {
return procNetstat, fmt.Errorf("mismatch field count mismatch in %s: %s", return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
fileName, protocol) ErrFileParse, fileName, protocol)
} }
for i := 1; i < len(nameParts); i++ { for i := 1; i < len(nameParts); i++ {
value, err := strconv.ParseFloat(valueParts[i], 64) value, err := strconv.ParseFloat(valueParts[i], 64)

View File

@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
names, err := d.Readdirnames(-1) names, err := d.Readdirnames(-1)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read contents of ns dir: %w", err) return nil, fmt.Errorf("%s: failed to read contents of ns dir: %w", ErrFileRead, err)
} }
ns := make(Namespaces, len(names)) ns := make(Namespaces, len(names))
@ -52,13 +52,13 @@ func (p Proc) Namespaces() (Namespaces, error) {
fields := strings.SplitN(target, ":", 2) fields := strings.SplitN(target, ":", 2)
if len(fields) != 2 { if len(fields) != 2 {
return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target) return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target)
} }
typ := fields[0] typ := fields[0]
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err) return nil, fmt.Errorf("%s: inode from %q: %w", ErrFileParse, fields[1], err)
} }
ns[name] = Namespace{typ, uint32(inode)} ns[name] = Namespace{typ, uint32(inode)}

View File

@ -61,14 +61,14 @@ type PSIStats struct {
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
if err != nil { if err != nil {
return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err) return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
} }
return parsePSIStats(resource, bytes.NewReader(data)) return parsePSIStats(bytes.NewReader(data))
} }
// parsePSIStats parses the specified file for pressure stall information. // parsePSIStats parses the specified file for pressure stall information.
func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { func parsePSIStats(r io.Reader) (PSIStats, error) {
psiStats := PSIStats{} psiStats := PSIStats{}
scanner := bufio.NewScanner(r) scanner := bufio.NewScanner(r)

View File

@ -135,12 +135,12 @@ func (s *ProcSMapsRollup) parseLine(line string) error {
} }
vBytes := vKBytes * 1024 vBytes := vKBytes * 1024
s.addValue(k, v, vKBytes, vBytes) s.addValue(k, vBytes)
return nil return nil
} }
func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) { func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) {
switch k { switch k {
case "Rss": case "Rss":
s.Rss += vUintBytes s.Rss += vUintBytes

View File

@ -159,8 +159,8 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
// Remove trailing :. // Remove trailing :.
protocol := strings.TrimSuffix(nameParts[0], ":") protocol := strings.TrimSuffix(nameParts[0], ":")
if len(nameParts) != len(valueParts) { if len(nameParts) != len(valueParts) {
return procSnmp, fmt.Errorf("mismatch field count mismatch in %s: %s", return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
fileName, protocol) ErrFileParse, fileName, protocol)
} }
for i := 1; i < len(nameParts); i++ { for i := 1; i < len(nameParts); i++ {
value, err := strconv.ParseFloat(valueParts[i], 64) value, err := strconv.ParseFloat(valueParts[i], 64)

View File

@ -18,7 +18,6 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util" "github.com/prometheus/procfs/internal/util"
) )
@ -112,7 +111,7 @@ type ProcStat struct {
// Aggregated block I/O delays, measured in clock ticks (centiseconds). // Aggregated block I/O delays, measured in clock ticks (centiseconds).
DelayAcctBlkIOTicks uint64 DelayAcctBlkIOTicks uint64
proc fs.FS proc FS
} }
// NewStat returns the current status information of the process. // NewStat returns the current status information of the process.
@ -139,7 +138,7 @@ func (p Proc) Stat() (ProcStat, error) {
) )
if l < 0 || r < 0 { if l < 0 || r < 0 {
return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data) return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data)
} }
s.Comm = string(data[l+1 : r]) s.Comm = string(data[l+1 : r])
@ -210,8 +209,7 @@ func (s ProcStat) ResidentMemory() int {
// StartTime returns the unix timestamp of the process in seconds. // StartTime returns the unix timestamp of the process in seconds.
func (s ProcStat) StartTime() (float64, error) { func (s ProcStat) StartTime() (float64, error) {
fs := FS{proc: s.proc} stat, err := s.proc.Stat()
stat, err := fs.Stat()
if err != nil { if err != nil {
return 0, err return 0, err
} }

View File

@ -15,6 +15,7 @@ package procfs
import ( import (
"bytes" "bytes"
"sort"
"strconv" "strconv"
"strings" "strings"
@ -22,7 +23,7 @@ import (
) )
// ProcStatus provides status information about the process, // ProcStatus provides status information about the process,
// read from /proc/[pid]/stat. // read from /proc/[pid]/status.
type ProcStatus struct { type ProcStatus struct {
// The process ID. // The process ID.
PID int PID int
@ -31,6 +32,8 @@ type ProcStatus struct {
// Thread group ID. // Thread group ID.
TGID int TGID int
// List of Pid namespace.
NSpids []uint64
// Peak virtual memory size. // Peak virtual memory size.
VmPeak uint64 // nolint:revive VmPeak uint64 // nolint:revive
@ -76,6 +79,9 @@ type ProcStatus struct {
UIDs [4]string UIDs [4]string
// GIDs of the process (Real, effective, saved set, and filesystem GIDs) // GIDs of the process (Real, effective, saved set, and filesystem GIDs)
GIDs [4]string GIDs [4]string
// CpusAllowedList: List of cpu cores processes are allowed to run on.
CpusAllowedList []uint64
} }
// NewStatus returns the current status information of the process. // NewStatus returns the current status information of the process.
@ -123,6 +129,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
copy(s.UIDs[:], strings.Split(vString, "\t")) copy(s.UIDs[:], strings.Split(vString, "\t"))
case "Gid": case "Gid":
copy(s.GIDs[:], strings.Split(vString, "\t")) copy(s.GIDs[:], strings.Split(vString, "\t"))
case "NSpid":
s.NSpids = calcNSPidsList(vString)
case "VmPeak": case "VmPeak":
s.VmPeak = vUintBytes s.VmPeak = vUintBytes
case "VmSize": case "VmSize":
@ -161,10 +169,53 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
s.VoluntaryCtxtSwitches = vUint s.VoluntaryCtxtSwitches = vUint
case "nonvoluntary_ctxt_switches": case "nonvoluntary_ctxt_switches":
s.NonVoluntaryCtxtSwitches = vUint s.NonVoluntaryCtxtSwitches = vUint
case "Cpus_allowed_list":
s.CpusAllowedList = calcCpusAllowedList(vString)
} }
} }
// TotalCtxtSwitches returns the total context switch. // TotalCtxtSwitches returns the total context switch.
func (s ProcStatus) TotalCtxtSwitches() uint64 { func (s ProcStatus) TotalCtxtSwitches() uint64 {
return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
} }
func calcCpusAllowedList(cpuString string) []uint64 {
s := strings.Split(cpuString, ",")
var g []uint64
for _, cpu := range s {
// parse cpu ranges, example: 1-3=[1,2,3]
if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 {
startCPU, _ := strconv.ParseUint(l[0], 10, 64)
endCPU, _ := strconv.ParseUint(l[1], 10, 64)
for i := startCPU; i <= endCPU; i++ {
g = append(g, i)
}
} else if len(l) == 1 {
cpu, _ := strconv.ParseUint(l[0], 10, 64)
g = append(g, cpu)
}
}
sort.Slice(g, func(i, j int) bool { return g[i] < g[j] })
return g
}
func calcNSPidsList(nspidsString string) []uint64 {
s := strings.Split(nspidsString, " ")
var nspids []uint64
for _, nspid := range s {
nspid, _ := strconv.ParseUint(nspid, 10, 64)
if nspid == 0 {
continue
}
nspids = append(nspids, nspid)
}
return nspids
}

View File

@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) {
vp := util.NewValueParser(f) vp := util.NewValueParser(f)
values[i] = vp.Int() values[i] = vp.Int()
if err := vp.Err(); err != nil { if err := vp.Err(); err != nil {
return nil, fmt.Errorf("field %d in sysctl %s is not a valid int: %w", i, sysctl, err) return nil, fmt.Errorf("%s: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
} }
} }
return values, nil return values, nil

View File

@ -68,7 +68,7 @@ func parseV21SlabEntry(line string) (*Slab, error) {
l := slabSpace.ReplaceAllString(line, " ") l := slabSpace.ReplaceAllString(line, " ")
s := strings.Split(l, " ") s := strings.Split(l, " ")
if len(s) != 16 { if len(s) != 16 {
return nil, fmt.Errorf("unable to parse: %q", line) return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line)
} }
var err error var err error
i := &Slab{Name: s[0]} i := &Slab{Name: s[0]}

View File

@ -57,7 +57,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
) )
if !scanner.Scan() { if !scanner.Scan() {
return Softirqs{}, fmt.Errorf("softirqs empty") return Softirqs{}, fmt.Errorf("%w: softirqs empty", ErrFileRead)
} }
for scanner.Scan() { for scanner.Scan() {
@ -74,7 +74,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Hi = make([]uint64, len(perCPU)) softirqs.Hi = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse %q (HI%d): %w", count, i, err) return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "TIMER:": case parts[0] == "TIMER:":
@ -82,7 +82,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Timer = make([]uint64, len(perCPU)) softirqs.Timer = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse %q (TIMER%d): %w", count, i, err) return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "NET_TX:": case parts[0] == "NET_TX:":
@ -90,7 +90,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.NetTx = make([]uint64, len(perCPU)) softirqs.NetTx = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_TX%d): %w", count, i, err) return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "NET_RX:": case parts[0] == "NET_RX:":
@ -98,7 +98,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.NetRx = make([]uint64, len(perCPU)) softirqs.NetRx = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_RX%d): %w", count, i, err) return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "BLOCK:": case parts[0] == "BLOCK:":
@ -106,7 +106,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Block = make([]uint64, len(perCPU)) softirqs.Block = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse %q (BLOCK%d): %w", count, i, err) return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "IRQ_POLL:": case parts[0] == "IRQ_POLL:":
@ -114,7 +114,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.IRQPoll = make([]uint64, len(perCPU)) softirqs.IRQPoll = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse %q (IRQ_POLL%d): %w", count, i, err) return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "TASKLET:": case parts[0] == "TASKLET:":
@ -122,7 +122,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Tasklet = make([]uint64, len(perCPU)) softirqs.Tasklet = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse %q (TASKLET%d): %w", count, i, err) return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "SCHED:": case parts[0] == "SCHED:":
@ -130,7 +130,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Sched = make([]uint64, len(perCPU)) softirqs.Sched = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse %q (SCHED%d): %w", count, i, err) return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "HRTIMER:": case parts[0] == "HRTIMER:":
@ -138,7 +138,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.HRTimer = make([]uint64, len(perCPU)) softirqs.HRTimer = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse %q (HRTIMER%d): %w", count, i, err) return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "RCU:": case parts[0] == "RCU:":
@ -146,14 +146,14 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.RCU = make([]uint64, len(perCPU)) softirqs.RCU = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse %q (RCU%d): %w", count, i, err) return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
} }
} }
} }
} }
if err := scanner.Err(); err != nil { if err := scanner.Err(); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse softirqs: %w", err) return Softirqs{}, fmt.Errorf("%s: couldn't parse softirqs: %w", ErrFileParse, err)
} }
return softirqs, scanner.Err() return softirqs, scanner.Err()

View File

@ -93,10 +93,10 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
&cpuStat.Guest, &cpuStat.GuestNice) &cpuStat.Guest, &cpuStat.GuestNice)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err) return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
} }
if count == 0 { if count == 0 {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line) return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line)
} }
cpuStat.User /= userHZ cpuStat.User /= userHZ
@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
if err != nil { if err != nil {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err) return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
} }
return cpuStat, cpuID, nil return cpuStat, cpuID, nil
@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
&softIRQStat.Hrtimer, &softIRQStat.Rcu) &softIRQStat.Hrtimer, &softIRQStat.Rcu)
if err != nil { if err != nil {
return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err) return SoftIRQStat{}, 0, fmt.Errorf("%s: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
} }
return softIRQStat, total, nil return softIRQStat, total, nil
@ -187,6 +187,10 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
err error err error
) )
// Increase default scanner buffer to handle very long `intr` lines.
buf := make([]byte, 0, 8*1024)
scanner.Buffer(buf, 1024*1024)
for scanner.Scan() { for scanner.Scan() {
line := scanner.Text() line := scanner.Text()
parts := strings.Fields(scanner.Text()) parts := strings.Fields(scanner.Text())
@ -197,34 +201,34 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
switch { switch {
case parts[0] == "btime": case parts[0] == "btime":
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err) return Stat{}, fmt.Errorf("%s: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
} }
case parts[0] == "intr": case parts[0] == "intr":
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err) return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
} }
numberedIRQs := parts[2:] numberedIRQs := parts[2:]
stat.IRQ = make([]uint64, len(numberedIRQs)) stat.IRQ = make([]uint64, len(numberedIRQs))
for i, count := range numberedIRQs { for i, count := range numberedIRQs {
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err) return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "ctxt": case parts[0] == "ctxt":
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err) return Stat{}, fmt.Errorf("%s: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
} }
case parts[0] == "processes": case parts[0] == "processes":
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err) return Stat{}, fmt.Errorf("%s: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
} }
case parts[0] == "procs_running": case parts[0] == "procs_running":
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err) return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
} }
case parts[0] == "procs_blocked": case parts[0] == "procs_blocked":
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err) return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
} }
case parts[0] == "softirq": case parts[0] == "softirq":
softIRQStats, total, err := parseSoftIRQStat(line) softIRQStats, total, err := parseSoftIRQStat(line)
@ -247,7 +251,7 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
} }
if err := scanner.Err(); err != nil { if err := scanner.Err(); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err) return Stat{}, fmt.Errorf("%s: couldn't parse %q: %w", ErrFileParse, fileName, err)
} }
return stat, nil return stat, nil

View File

@ -64,7 +64,7 @@ func parseSwapString(swapString string) (*Swap, error) {
swapFields := strings.Fields(swapString) swapFields := strings.Fields(swapString)
swapLength := len(swapFields) swapLength := len(swapFields)
if swapLength < 5 { if swapLength < 5 {
return nil, fmt.Errorf("too few fields in swap string: %s", swapString) return nil, fmt.Errorf("%w: too few fields in swap string: %s", ErrFileParse, swapString)
} }
swap := &Swap{ swap := &Swap{
@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) {
swap.Size, err = strconv.Atoi(swapFields[2]) swap.Size, err = strconv.Atoi(swapFields[2])
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid swap size: %s", swapFields[2]) return nil, fmt.Errorf("%s: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
} }
swap.Used, err = strconv.Atoi(swapFields[3]) swap.Used, err = strconv.Atoi(swapFields[3])
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid swap used: %s", swapFields[3]) return nil, fmt.Errorf("%s: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
} }
swap.Priority, err = strconv.Atoi(swapFields[4]) swap.Priority, err = strconv.Atoi(swapFields[4])
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4]) return nil, fmt.Errorf("%s: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
} }
return swap, nil return swap, nil

View File

@ -45,7 +45,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) {
names, err := d.Readdirnames(-1) names, err := d.Readdirnames(-1)
if err != nil { if err != nil {
return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) return Procs{}, fmt.Errorf("%s: could not read %q: %w", ErrFileRead, d.Name(), err)
} }
t := Procs{} t := Procs{}
@ -54,7 +54,8 @@ func (fs FS) AllThreads(pid int) (Procs, error) {
if err != nil { if err != nil {
continue continue
} }
t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)})
t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.isReal}})
} }
return t, nil return t, nil
@ -66,13 +67,13 @@ func (fs FS) Thread(pid, tid int) (Proc, error) {
if _, err := os.Stat(taskPath); err != nil { if _, err := os.Stat(taskPath); err != nil {
return Proc{}, err return Proc{}, err
} }
return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.isReal}}, nil
} }
// Thread returns a process for a given TID of Proc. // Thread returns a process for a given TID of Proc.
func (proc Proc) Thread(tid int) (Proc, error) { func (proc Proc) Thread(tid int) (Proc, error) {
tfs := fsi.FS(proc.path("task")) tfs := FS{fsi.FS(proc.path("task")), proc.fs.isReal}
if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil { if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil {
return Proc{}, err return Proc{}, err
} }
return Proc{PID: tid, fs: tfs}, nil return Proc{PID: tid, fs: tfs}, nil

View File

@ -86,7 +86,7 @@ func (fs FS) VM() (*VM, error) {
return nil, err return nil, err
} }
if !file.Mode().IsDir() { if !file.Mode().IsDir() {
return nil, fmt.Errorf("%s is not a directory", path) return nil, fmt.Errorf("%w: %s is not a directory", ErrFileRead, path)
} }
files, err := os.ReadDir(path) files, err := os.ReadDir(path)

View File

@ -75,11 +75,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
func (fs FS) Zoneinfo() ([]Zoneinfo, error) { func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
data, err := os.ReadFile(fs.proc.Path("zoneinfo")) data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) return nil, fmt.Errorf("%s: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
} }
zoneinfo, err := parseZoneinfo(data) zoneinfo, err := parseZoneinfo(data)
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) return nil, fmt.Errorf("%s: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
} }
return zoneinfo, nil return zoneinfo, nil
} }

View File

@ -34,7 +34,7 @@ const (
RequestCount = "http.server.request_count" // Incoming request count total RequestCount = "http.server.request_count" // Incoming request count total
RequestContentLength = "http.server.request_content_length" // Incoming request bytes total RequestContentLength = "http.server.request_content_length" // Incoming request bytes total
ResponseContentLength = "http.server.response_content_length" // Incoming response bytes total ResponseContentLength = "http.server.response_content_length" // Incoming response bytes total
ServerLatency = "http.server.duration" // Incoming end to end duration, microseconds ServerLatency = "http.server.duration" // Incoming end to end duration, milliseconds
) )
// Filter is a predicate used to determine whether a given http.request should // Filter is a predicate used to determine whether a given http.request should
@ -42,5 +42,5 @@ const (
type Filter func(*http.Request) bool type Filter func(*http.Request) bool
func newTracer(tp trace.TracerProvider) trace.Tracer { func newTracer(tp trace.TracerProvider) trace.Tracer {
return tp.Tracer(instrumentationName, trace.WithInstrumentationVersion(Version())) return tp.Tracer(ScopeName, trace.WithInstrumentationVersion(Version()))
} }

View File

@ -25,9 +25,8 @@ import (
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
) )
const ( // ScopeName is the instrumentation scope name.
instrumentationName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" const ScopeName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
)
// config represents the configuration options available for the http.Handler // config represents the configuration options available for the http.Handler
// and http.Transport types. // and http.Transport types.
@ -76,7 +75,7 @@ func newConfig(opts ...Option) *config {
} }
c.Meter = c.MeterProvider.Meter( c.Meter = c.MeterProvider.Meter(
instrumentationName, ScopeName,
metric.WithInstrumentationVersion(Version()), metric.WithInstrumentationVersion(Version()),
) )

View File

@ -107,13 +107,25 @@ func (h *middleware) createMeasures() {
h.counters = make(map[string]metric.Int64Counter) h.counters = make(map[string]metric.Int64Counter)
h.valueRecorders = make(map[string]metric.Float64Histogram) h.valueRecorders = make(map[string]metric.Float64Histogram)
requestBytesCounter, err := h.meter.Int64Counter(RequestContentLength) requestBytesCounter, err := h.meter.Int64Counter(
RequestContentLength,
metric.WithUnit("By"),
metric.WithDescription("Measures the size of HTTP request content length (uncompressed)"),
)
handleErr(err) handleErr(err)
responseBytesCounter, err := h.meter.Int64Counter(ResponseContentLength) responseBytesCounter, err := h.meter.Int64Counter(
ResponseContentLength,
metric.WithUnit("By"),
metric.WithDescription("Measures the size of HTTP response content length (uncompressed)"),
)
handleErr(err) handleErr(err)
serverLatencyMeasure, err := h.meter.Float64Histogram(ServerLatency) serverLatencyMeasure, err := h.meter.Float64Histogram(
ServerLatency,
metric.WithUnit("ms"),
metric.WithDescription("Measures the duration of HTTP request handling"),
)
handleErr(err) handleErr(err)
h.counters[RequestContentLength] = requestBytesCounter h.counters[RequestContentLength] = requestBytesCounter

View File

@ -16,7 +16,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http
// Version is the current release version of the otelhttp instrumentation. // Version is the current release version of the otelhttp instrumentation.
func Version() string { func Version() string {
return "0.45.0" return "0.46.1"
// This string is updated by the pre_release.sh script during release // This string is updated by the pre_release.sh script during release
} }

View File

@ -14,12 +14,9 @@ go.work.sum
gen/ gen/
/example/dice/dice /example/dice/dice
/example/fib/fib
/example/fib/traces.txt
/example/jaeger/jaeger
/example/namedtracer/namedtracer /example/namedtracer/namedtracer
/example/otel-collector/otel-collector
/example/opencensus/opencensus /example/opencensus/opencensus
/example/passthrough/passthrough /example/passthrough/passthrough
/example/prometheus/prometheus /example/prometheus/prometheus
/example/zipkin/zipkin /example/zipkin/zipkin
/example/otel-collector/otel-collector

View File

@ -12,8 +12,9 @@ linters:
- depguard - depguard
- errcheck - errcheck
- godot - godot
- gofmt - gofumpt
- goimports - goimports
- gosec
- gosimple - gosimple
- govet - govet
- ineffassign - ineffassign
@ -53,6 +54,20 @@ issues:
text: "calls to (.+) only in main[(][)] or init[(][)] functions" text: "calls to (.+) only in main[(][)] or init[(][)] functions"
linters: linters:
- revive - revive
# It's okay to not run gosec in a test.
- path: _test\.go
linters:
- gosec
# Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
# as we commonly use it in tests and examples.
- text: "G404:"
linters:
- gosec
# Igonoring gosec G402: TLS MinVersion too low
# as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
- text: "G402: TLS MinVersion too low."
linters:
- gosec
include: include:
# revive exported should have comment or be unexported. # revive exported should have comment or be unexported.
- EXC0012 - EXC0012

View File

@ -8,6 +8,85 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased] ## [Unreleased]
## [1.21.0/0.44.0] 2023-11-16
### Removed
- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706)
- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707)
- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708)
- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723)
### Fixed
- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719)
- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719)
## [1.20.0/0.43.0] 2023-11-10
This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this.
### Added
- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567)
- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584)
- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620)
- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620)
- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644)
- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649)
- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603)
- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660)
- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660)
- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622)
- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585)
- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605)
- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668)
### Deprecated
- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567)
- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618)
- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`.
Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620)
- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649)
- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693)
### Changed
- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583)
- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type.
This extends the `TracerProvider` interface and is is a breaking change for any existing implementation.
Implementors need to update their implementations based on what they want the default behavior of the interface to be.
See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type.
This extends the `Tracer` interface and is is a breaking change for any existing implementation.
Implementors need to update their implementations based on what they want the default behavior of the interface to be.
See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type.
This extends the `Span` interface and is is a breaking change for any existing implementation.
Implementors need to update their implementations based on what they want the default behavior of the interface to be.
See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670)
- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670)
- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669)
- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669)
- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679)
- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679)
### Fixed
- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667)
- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699)
- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699)
- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699)
- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699)
- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699)
- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648)
- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695)
- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695)
## [1.19.0/0.42.0/0.0.7] 2023-09-28 ## [1.19.0/0.42.0/0.0.7] 2023-09-28
This release contains the first stable release of the OpenTelemetry Go [metric SDK]. This release contains the first stable release of the OpenTelemetry Go [metric SDK].
@ -2656,7 +2735,9 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files. - CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project. - CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.19.0...HEAD [Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.21.0...HEAD
[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0
[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0
[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 [1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0
[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1 [1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1
[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0 [1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0
@ -2731,7 +2812,7 @@ It contains api and sdk for trace and meter.
[Go 1.20]: https://go.dev/doc/go1.20 [Go 1.20]: https://go.dev/doc/go1.20
[Go 1.19]: https://go.dev/doc/go1.19 [Go 1.19]: https://go.dev/doc/go1.19
[Go 1.18]: https://go.dev/doc/go1.18 [Go 1.18]: https://go.dev/doc/go1.18
[Go 1.19]: https://go.dev/doc/go1.19
[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric [metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric
[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric [metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric
[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace

View File

@ -90,6 +90,10 @@ git push <YOUR_FORK> <YOUR_BRANCH_NAME>
Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull
request ID to the entry you added to `CHANGELOG.md`. request ID to the entry you added to `CHANGELOG.md`.
Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request.
Rewriting Git history makes it difficult to keep track of iterations during code review.
All pull requests are squashed to a single commit upon merge to `main`.
### How to Receive Comments ### How to Receive Comments
* If the PR is not ready for review, please put `[WIP]` in the title, * If the PR is not ready for review, please put `[WIP]` in the title,

View File

@ -77,6 +77,9 @@ $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl
GORELEASE = $(TOOLS)/gorelease GORELEASE = $(TOOLS)/gorelease
$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease $(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease
GOVULNCHECK = $(TOOLS)/govulncheck
$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
.PHONY: tools .PHONY: tools
tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
@ -189,6 +192,18 @@ test-coverage: | $(GOCOVMERGE)
done; \ done; \
$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
# Adding a directory will include all benchmarks in that direcotry if a filter is not specified.
BENCHMARK_TARGETS := sdk/trace
.PHONY: benchmark
benchmark: $(BENCHMARK_TARGETS:%=benchmark/%)
BENCHMARK_FILTER = .
# You can override the filter for a particular directory by adding a rule here.
benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample
benchmark/%:
@echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \
&& cd $* \
$(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter))
.PHONY: golangci-lint golangci-lint-fix .PHONY: golangci-lint golangci-lint-fix
golangci-lint-fix: ARGS=--fix golangci-lint-fix: ARGS=--fix
golangci-lint-fix: golangci-lint golangci-lint-fix: golangci-lint
@ -216,7 +231,7 @@ go-mod-tidy/%: | crosslink
lint-modules: go-mod-tidy lint-modules: go-mod-tidy
.PHONY: lint .PHONY: lint
lint: misspell lint-modules golangci-lint lint: misspell lint-modules golangci-lint govulncheck
.PHONY: vanity-import-check .PHONY: vanity-import-check
vanity-import-check: | $(PORTO) vanity-import-check: | $(PORTO)
@ -226,6 +241,14 @@ vanity-import-check: | $(PORTO)
misspell: | $(MISSPELL) misspell: | $(MISSPELL)
@$(MISSPELL) -w $(ALL_DOCS) @$(MISSPELL) -w $(ALL_DOCS)
.PHONY: govulncheck
govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%)
govulncheck/%: DIR=$*
govulncheck/%: | $(GOVULNCHECK)
@echo "govulncheck ./... in $(DIR)" \
&& cd $(DIR) \
&& $(GOVULNCHECK) ./...
.PHONY: codespell .PHONY: codespell
codespell: | $(CODESPELL) codespell: | $(CODESPELL)
@$(DOCKERPY) $(CODESPELL) @$(DOCKERPY) $(CODESPELL)
@ -289,3 +312,7 @@ COMMIT ?= "HEAD"
add-tags: | $(MULTIMOD) add-tags: | $(MULTIMOD)
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
$(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}
.PHONY: lint-markdown
lint-markdown:
docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md

View File

@ -11,16 +11,13 @@ It provides a set of APIs to directly measure performance and behavior of your s
## Project Status ## Project Status
| Signal | Status | Project | | Signal | Status |
|---------|------------|-----------------------| |---------|------------|
| Traces | Stable | N/A | | Traces | Stable |
| Metrics | Mixed [1] | [Go: Metric SDK (GA)] | | Metrics | Stable |
| Logs | Frozen [2] | N/A | | Logs | Design [1] |
[Go: Metric SDK (GA)]: https://github.com/orgs/open-telemetry/projects/34 - [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)).
- [1]: [Metrics API](https://pkg.go.dev/go.opentelemetry.io/otel/metric) is Stable. [Metrics SDK](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric) is Beta.
- [2]: The Logs signal development is halted for this project while we stabilize the Metrics SDK.
No Logs Pull Requests are currently being accepted. No Logs Pull Requests are currently being accepted.
Progress and status specific to this repository is tracked in our Progress and status specific to this repository is tracked in our

View File

@ -254,7 +254,7 @@ func NewMember(key, value string, props ...Property) (Member, error) {
if err := m.validate(); err != nil { if err := m.validate(); err != nil {
return newInvalidMember(), err return newInvalidMember(), err
} }
decodedValue, err := url.QueryUnescape(value) decodedValue, err := url.PathUnescape(value)
if err != nil { if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
} }
@ -301,7 +301,7 @@ func parseMember(member string) (Member, error) {
// when converting the header into a data structure." // when converting the header into a data structure."
key = strings.TrimSpace(k) key = strings.TrimSpace(k)
var err error var err error
value, err = url.QueryUnescape(strings.TrimSpace(v)) value, err = url.PathUnescape(strings.TrimSpace(v))
if err != nil { if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %q", err, value) return newInvalidMember(), fmt.Errorf("%w: %q", err, value)
} }

View File

@ -34,11 +34,13 @@ type afCounter struct {
name string name string
opts []metric.Float64ObservableCounterOption opts []metric.Float64ObservableCounterOption
delegate atomic.Value //metric.Float64ObservableCounter delegate atomic.Value // metric.Float64ObservableCounter
} }
var _ unwrapper = (*afCounter)(nil) var (
var _ metric.Float64ObservableCounter = (*afCounter)(nil) _ unwrapper = (*afCounter)(nil)
_ metric.Float64ObservableCounter = (*afCounter)(nil)
)
func (i *afCounter) setDelegate(m metric.Meter) { func (i *afCounter) setDelegate(m metric.Meter) {
ctr, err := m.Float64ObservableCounter(i.name, i.opts...) ctr, err := m.Float64ObservableCounter(i.name, i.opts...)
@ -63,11 +65,13 @@ type afUpDownCounter struct {
name string name string
opts []metric.Float64ObservableUpDownCounterOption opts []metric.Float64ObservableUpDownCounterOption
delegate atomic.Value //metric.Float64ObservableUpDownCounter delegate atomic.Value // metric.Float64ObservableUpDownCounter
} }
var _ unwrapper = (*afUpDownCounter)(nil) var (
var _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) _ unwrapper = (*afUpDownCounter)(nil)
_ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil)
)
func (i *afUpDownCounter) setDelegate(m metric.Meter) { func (i *afUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...)
@ -92,11 +96,13 @@ type afGauge struct {
name string name string
opts []metric.Float64ObservableGaugeOption opts []metric.Float64ObservableGaugeOption
delegate atomic.Value //metric.Float64ObservableGauge delegate atomic.Value // metric.Float64ObservableGauge
} }
var _ unwrapper = (*afGauge)(nil) var (
var _ metric.Float64ObservableGauge = (*afGauge)(nil) _ unwrapper = (*afGauge)(nil)
_ metric.Float64ObservableGauge = (*afGauge)(nil)
)
func (i *afGauge) setDelegate(m metric.Meter) { func (i *afGauge) setDelegate(m metric.Meter) {
ctr, err := m.Float64ObservableGauge(i.name, i.opts...) ctr, err := m.Float64ObservableGauge(i.name, i.opts...)
@ -121,11 +127,13 @@ type aiCounter struct {
name string name string
opts []metric.Int64ObservableCounterOption opts []metric.Int64ObservableCounterOption
delegate atomic.Value //metric.Int64ObservableCounter delegate atomic.Value // metric.Int64ObservableCounter
} }
var _ unwrapper = (*aiCounter)(nil) var (
var _ metric.Int64ObservableCounter = (*aiCounter)(nil) _ unwrapper = (*aiCounter)(nil)
_ metric.Int64ObservableCounter = (*aiCounter)(nil)
)
func (i *aiCounter) setDelegate(m metric.Meter) { func (i *aiCounter) setDelegate(m metric.Meter) {
ctr, err := m.Int64ObservableCounter(i.name, i.opts...) ctr, err := m.Int64ObservableCounter(i.name, i.opts...)
@ -150,11 +158,13 @@ type aiUpDownCounter struct {
name string name string
opts []metric.Int64ObservableUpDownCounterOption opts []metric.Int64ObservableUpDownCounterOption
delegate atomic.Value //metric.Int64ObservableUpDownCounter delegate atomic.Value // metric.Int64ObservableUpDownCounter
} }
var _ unwrapper = (*aiUpDownCounter)(nil) var (
var _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) _ unwrapper = (*aiUpDownCounter)(nil)
_ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil)
)
func (i *aiUpDownCounter) setDelegate(m metric.Meter) { func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...)
@ -179,11 +189,13 @@ type aiGauge struct {
name string name string
opts []metric.Int64ObservableGaugeOption opts []metric.Int64ObservableGaugeOption
delegate atomic.Value //metric.Int64ObservableGauge delegate atomic.Value // metric.Int64ObservableGauge
} }
var _ unwrapper = (*aiGauge)(nil) var (
var _ metric.Int64ObservableGauge = (*aiGauge)(nil) _ unwrapper = (*aiGauge)(nil)
_ metric.Int64ObservableGauge = (*aiGauge)(nil)
)
func (i *aiGauge) setDelegate(m metric.Meter) { func (i *aiGauge) setDelegate(m metric.Meter) {
ctr, err := m.Int64ObservableGauge(i.name, i.opts...) ctr, err := m.Int64ObservableGauge(i.name, i.opts...)
@ -208,7 +220,7 @@ type sfCounter struct {
name string name string
opts []metric.Float64CounterOption opts []metric.Float64CounterOption
delegate atomic.Value //metric.Float64Counter delegate atomic.Value // metric.Float64Counter
} }
var _ metric.Float64Counter = (*sfCounter)(nil) var _ metric.Float64Counter = (*sfCounter)(nil)
@ -234,7 +246,7 @@ type sfUpDownCounter struct {
name string name string
opts []metric.Float64UpDownCounterOption opts []metric.Float64UpDownCounterOption
delegate atomic.Value //metric.Float64UpDownCounter delegate atomic.Value // metric.Float64UpDownCounter
} }
var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil) var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil)
@ -260,7 +272,7 @@ type sfHistogram struct {
name string name string
opts []metric.Float64HistogramOption opts []metric.Float64HistogramOption
delegate atomic.Value //metric.Float64Histogram delegate atomic.Value // metric.Float64Histogram
} }
var _ metric.Float64Histogram = (*sfHistogram)(nil) var _ metric.Float64Histogram = (*sfHistogram)(nil)
@ -286,7 +298,7 @@ type siCounter struct {
name string name string
opts []metric.Int64CounterOption opts []metric.Int64CounterOption
delegate atomic.Value //metric.Int64Counter delegate atomic.Value // metric.Int64Counter
} }
var _ metric.Int64Counter = (*siCounter)(nil) var _ metric.Int64Counter = (*siCounter)(nil)
@ -312,7 +324,7 @@ type siUpDownCounter struct {
name string name string
opts []metric.Int64UpDownCounterOption opts []metric.Int64UpDownCounterOption
delegate atomic.Value //metric.Int64UpDownCounter delegate atomic.Value // metric.Int64UpDownCounter
} }
var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil) var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil)
@ -338,7 +350,7 @@ type siHistogram struct {
name string name string
opts []metric.Int64HistogramOption opts []metric.Int64HistogramOption
delegate atomic.Value //metric.Int64Histogram delegate atomic.Value // metric.Int64Histogram
} }
var _ metric.Int64Histogram = (*siHistogram)(nil) var _ metric.Int64Histogram = (*siHistogram)(nil)

View File

@ -39,6 +39,7 @@ import (
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
) )
// tracerProvider is a placeholder for a configured SDK TracerProvider. // tracerProvider is a placeholder for a configured SDK TracerProvider.
@ -46,6 +47,8 @@ import (
// All TracerProvider functionality is forwarded to a delegate once // All TracerProvider functionality is forwarded to a delegate once
// configured. // configured.
type tracerProvider struct { type tracerProvider struct {
embedded.TracerProvider
mtx sync.Mutex mtx sync.Mutex
tracers map[il]*tracer tracers map[il]*tracer
delegate trace.TracerProvider delegate trace.TracerProvider
@ -119,6 +122,8 @@ type il struct {
// All Tracer functionality is forwarded to a delegate once configured. // All Tracer functionality is forwarded to a delegate once configured.
// Otherwise, all functionality is forwarded to a NoopTracer. // Otherwise, all functionality is forwarded to a NoopTracer.
type tracer struct { type tracer struct {
embedded.Tracer
name string name string
opts []trace.TracerOption opts []trace.TracerOption
provider *tracerProvider provider *tracerProvider
@ -156,6 +161,8 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart
// SpanContext. It performs no operations other than to return the wrapped // SpanContext. It performs no operations other than to return the wrapped
// SpanContext. // SpanContext.
type nonRecordingSpan struct { type nonRecordingSpan struct {
embedded.Span
sc trace.SpanContext sc trace.SpanContext
tracer *tracer tracer *tracer
} }

View File

@ -149,7 +149,7 @@ of [go.opentelemetry.io/otel/metric].
Finally, an author can embed another implementation in theirs. The embedded Finally, an author can embed another implementation in theirs. The embedded
implementation will be used for methods not defined by the author. For example, implementation will be used for methods not defined by the author. For example,
an author who want to default to silently dropping the call can use an author who wants to default to silently dropping the call can use
[go.opentelemetry.io/otel/metric/noop]: [go.opentelemetry.io/otel/metric/noop]:
import "go.opentelemetry.io/otel/metric/noop" import "go.opentelemetry.io/otel/metric/noop"

View File

@ -39,6 +39,12 @@ type InstrumentOption interface {
Float64ObservableGaugeOption Float64ObservableGaugeOption
} }
// HistogramOption applies options to histogram instruments.
type HistogramOption interface {
Int64HistogramOption
Float64HistogramOption
}
type descOpt string type descOpt string
func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig {
@ -171,6 +177,23 @@ func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64Ob
// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code. // The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code.
func WithUnit(u string) InstrumentOption { return unitOpt(u) } func WithUnit(u string) InstrumentOption { return unitOpt(u) }
// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries.
//
// This option is considered "advisory", and may be ignored by API implementations.
func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) }
type bucketOpt []float64
func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
c.explicitBucketBoundaries = o
return c
}
func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
c.explicitBucketBoundaries = o
return c
}
// AddOption applies options to an addition measurement. See // AddOption applies options to an addition measurement. See
// [MeasurementOption] for other options that can be used as an AddOption. // [MeasurementOption] for other options that can be used as an AddOption.
type AddOption interface { type AddOption interface {

View File

@ -147,8 +147,9 @@ type Float64Histogram interface {
// Float64HistogramConfig contains options for synchronous counter instruments // Float64HistogramConfig contains options for synchronous counter instruments
// that record int64 values. // that record int64 values.
type Float64HistogramConfig struct { type Float64HistogramConfig struct {
description string description string
unit string unit string
explicitBucketBoundaries []float64
} }
// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all // NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all
@ -171,6 +172,11 @@ func (c Float64HistogramConfig) Unit() string {
return c.unit return c.unit
} }
// ExplicitBucketBoundaries returns the configured explicit bucket boundaries.
func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 {
return c.explicitBucketBoundaries
}
// Float64HistogramOption applies options to a [Float64HistogramConfig]. See // Float64HistogramOption applies options to a [Float64HistogramConfig]. See
// [InstrumentOption] for other options that can be used as a // [InstrumentOption] for other options that can be used as a
// Float64HistogramOption. // Float64HistogramOption.

Some files were not shown because too many files have changed in this diff Show More