From f3c77df31e88493536040f1e6deabf2f4288bdb6 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Mon, 26 Feb 2024 17:45:07 +0100 Subject: [PATCH] vendor: github.com/prometheus/client_golang v1.17.0 full diffs: - https://github.com/prometheus/client_golang/compare/v1.14.0...v1.17.0 - https://github.com/prometheus/client_model/compare/v0.3.0...v0.5.0 - https://github.com/prometheus/common/compare/v0.42.0...v0.44.0 - https://github.com/prometheus/procfs/compare/v0.9.0...v0.12.0 Signed-off-by: Sebastiaan van Stijn --- vendor.mod | 8 +- vendor.sum | 20 +- .../client_golang/prometheus/counter.go | 48 +- .../client_golang/prometheus/desc.go | 58 +- .../client_golang/prometheus/doc.go | 44 +- .../prometheus/expvar_collector.go | 2 +- .../client_golang/prometheus/gauge.go | 30 +- .../prometheus/go_collector_latest.go | 7 +- .../client_golang/prometheus/histogram.go | 169 +- .../prometheus/internal/difflib.go | 2 +- .../client_golang/prometheus/labels.go | 98 + .../client_golang/prometheus/metric.go | 9 +- .../client_golang/prometheus/promhttp/http.go | 19 +- .../prometheus/promhttp/instrument_client.go | 26 +- .../prometheus/promhttp/instrument_server.go | 112 +- .../prometheus/promhttp/option.go | 38 +- .../client_golang/prometheus/registry.go | 21 +- .../client_golang/prometheus/summary.go | 72 +- .../client_golang/prometheus/timer.go | 28 +- .../client_golang/prometheus/value.go | 61 +- .../client_golang/prometheus/vec.go | 95 +- .../client_golang/prometheus/vnext.go | 23 + .../client_golang/prometheus/wrap.go | 8 +- .../prometheus/client_model/go/metrics.pb.go | 1630 +++++++++++------ .../prometheus/common/expfmt/decode.go | 5 +- .../prometheus/common/expfmt/encode.go | 13 +- .../prometheus/common/expfmt/expfmt.go | 26 +- .../prometheus/common/expfmt/text_parse.go | 2 +- .../prometheus/procfs/.golangci.yml | 3 + .../prometheus/procfs/Makefile.common | 22 +- vendor/github.com/prometheus/procfs/README.md | 4 +- vendor/github.com/prometheus/procfs/arp.go | 6 +- .../github.com/prometheus/procfs/buddyinfo.go | 6 +- .../github.com/prometheus/procfs/cpuinfo.go | 17 +- vendor/github.com/prometheus/procfs/crypto.go | 7 +- vendor/github.com/prometheus/procfs/fs.go | 11 +- .../prometheus/procfs/fs_statfs_notype.go | 23 + .../prometheus/procfs/fs_statfs_type.go | 33 + .../github.com/prometheus/procfs/fscache.go | 6 +- .../prometheus/procfs/internal/util/parse.go | 15 + vendor/github.com/prometheus/procfs/ipvs.go | 7 +- .../github.com/prometheus/procfs/loadavg.go | 4 +- vendor/github.com/prometheus/procfs/mdstat.go | 36 +- .../github.com/prometheus/procfs/meminfo.go | 4 +- .../github.com/prometheus/procfs/mountinfo.go | 10 +- .../prometheus/procfs/mountstats.go | 117 +- .../prometheus/procfs/net_conntrackstat.go | 91 +- .../prometheus/procfs/net_ip_socket.go | 32 +- .../prometheus/procfs/net_protocols.go | 4 +- .../github.com/prometheus/procfs/net_route.go | 143 ++ .../prometheus/procfs/net_sockstat.go | 9 +- .../prometheus/procfs/net_softnet.go | 9 +- .../github.com/prometheus/procfs/net_unix.go | 16 +- .../prometheus/procfs/net_wireless.go | 182 ++ .../github.com/prometheus/procfs/net_xfrm.go | 2 +- .../github.com/prometheus/procfs/netstat.go | 25 +- vendor/github.com/prometheus/procfs/proc.go | 37 +- .../prometheus/procfs/proc_cgroup.go | 4 +- .../prometheus/procfs/proc_cgroups.go | 8 +- .../prometheus/procfs/proc_fdinfo.go | 10 +- .../prometheus/procfs/proc_interrupts.go | 2 +- .../prometheus/procfs/proc_limits.go | 4 +- .../github.com/prometheus/procfs/proc_maps.go | 24 +- .../prometheus/procfs/proc_netstat.go | 4 +- .../github.com/prometheus/procfs/proc_ns.go | 6 +- .../github.com/prometheus/procfs/proc_psi.go | 6 +- .../prometheus/procfs/proc_smaps.go | 4 +- .../github.com/prometheus/procfs/proc_snmp.go | 4 +- .../github.com/prometheus/procfs/proc_stat.go | 8 +- .../prometheus/procfs/proc_status.go | 53 +- .../github.com/prometheus/procfs/proc_sys.go | 2 +- vendor/github.com/prometheus/procfs/slab.go | 2 +- .../github.com/prometheus/procfs/softirqs.go | 24 +- vendor/github.com/prometheus/procfs/stat.go | 28 +- vendor/github.com/prometheus/procfs/swaps.go | 8 +- vendor/github.com/prometheus/procfs/thread.go | 11 +- vendor/github.com/prometheus/procfs/vm.go | 2 +- .../github.com/prometheus/procfs/zoneinfo.go | 4 +- vendor/modules.txt | 14 +- 79 files changed, 2620 insertions(+), 1167 deletions(-) create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vnext.go create mode 100644 vendor/github.com/prometheus/procfs/fs_statfs_notype.go create mode 100644 vendor/github.com/prometheus/procfs/fs_statfs_type.go create mode 100644 vendor/github.com/prometheus/procfs/net_route.go create mode 100644 vendor/github.com/prometheus/procfs/net_wireless.go diff --git a/vendor.mod b/vendor.mod index c1868dd74b..ad660ef5ed 100644 --- a/vendor.mod +++ b/vendor.mod @@ -68,10 +68,10 @@ require ( github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/sys/symlink v0.2.0 // indirect github.com/moby/sys/user v0.1.0 // indirect - github.com/prometheus/client_golang v1.14.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.42.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/prometheus/client_golang v1.17.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect diff --git a/vendor.sum b/vendor.sum index e508faa317..44cd25d116 100644 --- a/vendor.sum +++ b/vendor.sum @@ -102,7 +102,6 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= @@ -143,8 +142,8 @@ github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -209,26 +208,27 @@ github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06/go github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index a912b75a05..4ce84e7a80 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -20,6 +20,7 @@ import ( "time" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/types/known/timestamppb" ) // Counter is a Metric that represents a single numerical value that only ever @@ -59,6 +60,18 @@ type ExemplarAdder interface { // CounterOpts is an alias for Opts. See there for doc comments. type CounterOpts Opts +// CounterVecOpts bundles the options to create a CounterVec metric. +// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type CounterVecOpts struct { + CounterOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Constraint + // function, if provided. + VariableLabels ConstrainableLabels +} + // NewCounter creates a new Counter based on the provided CounterOpts. // // The returned implementation also implements ExemplarAdder. It is safe to @@ -78,8 +91,12 @@ func NewCounter(opts CounterOpts) Counter { nil, opts.ConstLabels, ) - result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now} + if opts.now == nil { + opts.now = time.Now + } + result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now} result.init(result) // Init self-collection. + result.createdTs = timestamppb.New(opts.now()) return result } @@ -94,10 +111,12 @@ type counter struct { selfCollector desc *Desc + createdTs *timestamppb.Timestamp labelPairs []*dto.LabelPair exemplar atomic.Value // Containing nil or a *dto.Exemplar. - now func() time.Time // To mock out time.Now() for testing. + // now is for testing purposes, by default it's time.Now. + now func() time.Time } func (c *counter) Desc() *Desc { @@ -147,8 +166,7 @@ func (c *counter) Write(out *dto.Metric) error { exemplar = e.(*dto.Exemplar) } val := c.get() - - return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) + return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs) } func (c *counter) updateExemplar(v float64, l Labels) { @@ -174,19 +192,31 @@ type CounterVec struct { // NewCounterVec creates a new CounterVec based on the provided CounterOpts and // partitioned by the given label names. func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { - desc := NewDesc( + return V2.NewCounterVec(CounterVecOpts{ + CounterOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts. +func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec { + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) + if opts.now == nil { + opts.now = time.Now + } return &CounterVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + if len(lvs) != len(desc.variableLabels.names) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs)) } - result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now} + result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now} result.init(result) // Init self-collection. + result.createdTs = timestamppb.New(opts.now()) return result }), } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 8bc5e44e2f..68ffe3c248 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -14,20 +14,16 @@ package prometheus import ( - "errors" "fmt" "sort" "strings" "github.com/cespare/xxhash/v2" + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" + "google.golang.org/protobuf/proto" "github.com/prometheus/client_golang/prometheus/internal" - - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" ) // Desc is the descriptor used by every Prometheus Metric. It is essentially @@ -54,9 +50,9 @@ type Desc struct { // constLabelPairs contains precalculated DTO label pairs based on // the constant labels. constLabelPairs []*dto.LabelPair - // variableLabels contains names of labels for which the metric - // maintains variable values. - variableLabels []string + // variableLabels contains names of labels and normalization function for + // which the metric maintains variable values. + variableLabels *compiledLabels // id is a hash of the values of the ConstLabels and fqName. This // must be unique among all registered descriptors and can therefore be // used as an identifier of the descriptor. @@ -80,10 +76,24 @@ type Desc struct { // For constLabels, the label values are constant. Therefore, they are fully // specified in the Desc. See the Collector example for a usage pattern. func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels) +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName must not be empty. +// +// variableLabels only contain the label names and normalization functions. Their +// label values are variable and therefore not part of the Desc. (They are managed +// within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Collector example for a usage pattern. +func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc { d := &Desc{ fqName: fqName, help: help, - variableLabels: variableLabels, + variableLabels: variableLabels.compile(), } if !model.IsValidMetricName(model.LabelValue(fqName)) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) @@ -93,7 +103,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * // their sorted label names) plus the fqName (at position 0). labelValues := make([]string, 1, len(constLabels)+1) labelValues[0] = fqName - labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names)) labelNameSet := map[string]struct{}{} // First add only the const label names and sort them... for labelName := range constLabels { @@ -118,16 +128,16 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * // Now add the variable label names, but prefix them with something that // cannot be in a regular label name. That prevents matching the label // dimension with a different mix between preset and variable labels. - for _, labelName := range variableLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) + for _, label := range d.variableLabels.names { + if !checkLabelName(label) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName) return d } - labelNames = append(labelNames, "$"+labelName) - labelNameSet[labelName] = struct{}{} + labelNames = append(labelNames, "$"+label) + labelNameSet[label] = struct{}{} } if len(labelNames) != len(labelNameSet) { - d.err = errors.New("duplicate label names") + d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName) return d } @@ -179,11 +189,19 @@ func (d *Desc) String() string { fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), ) } + vlStrings := make([]string, 0, len(d.variableLabels.names)) + for _, vl := range d.variableLabels.names { + if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil { + vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl)) + } else { + vlStrings = append(vlStrings, vl) + } + } return fmt.Sprintf( - "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}", d.fqName, d.help, strings.Join(lpStrings, ","), - d.variableLabels, + strings.Join(vlStrings, ","), ) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 811072cbd5..962608f02c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -37,35 +37,35 @@ // // type metrics struct { // cpuTemp prometheus.Gauge -// hdFailures *prometheus.CounterVec +// hdFailures *prometheus.CounterVec // } // // func NewMetrics(reg prometheus.Registerer) *metrics { -// m := &metrics{ -// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }), -// hdFailures: prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }, -// []string{"device"}, -// ), -// } -// reg.MustRegister(m.cpuTemp) -// reg.MustRegister(m.hdFailures) -// return m +// m := &metrics{ +// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }), +// hdFailures: prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ), +// } +// reg.MustRegister(m.cpuTemp) +// reg.MustRegister(m.hdFailures) +// return m // } // // func main() { -// // Create a non-global registry. -// reg := prometheus.NewRegistry() +// // Create a non-global registry. +// reg := prometheus.NewRegistry() // -// // Create new metrics and register them using the custom registry. -// m := NewMetrics(reg) -// // Set values for the new created metrics. +// // Create new metrics and register them using the custom registry. +// m := NewMetrics(reg) +// // Set values for the new created metrics. // m.cpuTemp.Set(65.3) // m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() // diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go index c41ab37f3b..de5a856293 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -48,7 +48,7 @@ func (e *expvarCollector) Collect(ch chan<- Metric) { continue } var v interface{} - labels := make([]string, len(desc.variableLabels)) + labels := make([]string, len(desc.variableLabels.names)) if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { ch <- NewInvalidMetric(desc, err) continue diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index 21271a5bb4..dd2eac9406 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -55,6 +55,18 @@ type Gauge interface { // GaugeOpts is an alias for Opts. See there for doc comments. type GaugeOpts Opts +// GaugeVecOpts bundles the options to create a GaugeVec metric. +// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type GaugeVecOpts struct { + GaugeOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Constraint + // function, if provided. + VariableLabels ConstrainableLabels +} + // NewGauge creates a new Gauge based on the provided GaugeOpts. // // The returned implementation is optimized for a fast Set method. If you have a @@ -123,7 +135,7 @@ func (g *gauge) Sub(val float64) { func (g *gauge) Write(out *dto.Metric) error { val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) - return populateMetric(GaugeValue, val, g.labelPairs, nil, out) + return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil) } // GaugeVec is a Collector that bundles a set of Gauges that all share the same @@ -138,16 +150,24 @@ type GaugeVec struct { // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and // partitioned by the given label names. func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { - desc := NewDesc( + return V2.NewGaugeVec(GaugeVecOpts{ + GaugeOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts. +func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec { + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &GaugeVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + if len(lvs) != len(desc.variableLabels.names) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs)) } result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} result.init(result) // Init self-collection. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index 3a2d55e84b..2d8d9f64f4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -23,11 +23,10 @@ import ( "strings" "sync" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/client_golang/prometheus/internal" + + dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" ) const ( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 4c873a01c3..1feba62c6c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -22,10 +22,10 @@ import ( "sync/atomic" "time" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" ) // nativeHistogramBounds for the frac of observed values. Only relevant for @@ -392,7 +392,7 @@ type HistogramOpts struct { // zero, it is replaced by default buckets. The default buckets are // DefBuckets if no buckets for a native histogram (see below) are used, // otherwise the default is no buckets. (In other words, if you want to - // use both reguler buckets and buckets for a native histogram, you have + // use both regular buckets and buckets for a native histogram, you have // to define the regular buckets here explicitly.) Buckets []float64 @@ -402,7 +402,7 @@ type HistogramOpts struct { // Histogram by a Prometheus server with that feature enabled (requires // Prometheus v2.40+). Sparse buckets are exponential buckets covering // the whole float64 range (with the exception of the “zero” bucket, see - // SparseBucketsZeroThreshold below). From any one bucket to the next, + // NativeHistogramZeroThreshold below). From any one bucket to the next, // the width of the bucket grows by a constant // factor. NativeHistogramBucketFactor provides an upper bound for this // factor (exception see below). The smaller @@ -414,8 +414,8 @@ type HistogramOpts struct { // and 2, same as between 2 and 4, and 4 and 8, etc.). // // Details about the actually used factor: The factor is calculated as - // 2^(2^n), where n is an integer number between (and including) -8 and - // 4. n is chosen so that the resulting factor is the largest that is + // 2^(2^-n), where n is an integer number between (and including) -4 and + // 8. n is chosen so that the resulting factor is the largest that is // still smaller or equal to NativeHistogramBucketFactor. Note that the // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) // ). If NativeHistogramBucketFactor is greater than 1 but smaller than @@ -429,12 +429,12 @@ type HistogramOpts struct { // a major version bump. NativeHistogramBucketFactor float64 // All observations with an absolute value of less or equal - // NativeHistogramZeroThreshold are accumulated into a “zero” - // bucket. For best results, this should be close to a bucket - // boundary. This is usually the case if picking a power of two. If + // NativeHistogramZeroThreshold are accumulated into a “zero” bucket. + // For best results, this should be close to a bucket boundary. This is + // usually the case if picking a power of two. If // NativeHistogramZeroThreshold is left at zero, - // DefSparseBucketsZeroThreshold is used as the threshold. To configure - // a zero bucket with an actual threshold of zero (i.e. only + // DefNativeHistogramZeroThreshold is used as the threshold. To + // configure a zero bucket with an actual threshold of zero (i.e. only // observations of precisely zero will go into the zero bucket), set // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero // constant (or any negative float value). @@ -447,26 +447,46 @@ type HistogramOpts struct { // Histogram are sufficiently wide-spread. In particular, this could be // used as a DoS attack vector. Where the observed values depend on // external inputs, it is highly recommended to set a - // NativeHistogramMaxBucketNumber.) Once the set + // NativeHistogramMaxBucketNumber.) Once the set // NativeHistogramMaxBucketNumber is exceeded, the following strategy is - // enacted: First, if the last reset (or the creation) of the histogram - // is at least NativeHistogramMinResetDuration ago, then the whole - // histogram is reset to its initial state (including regular - // buckets). If less time has passed, or if - // NativeHistogramMinResetDuration is zero, no reset is - // performed. Instead, the zero threshold is increased sufficiently to - // reduce the number of buckets to or below - // NativeHistogramMaxBucketNumber, but not to more than - // NativeHistogramMaxZeroThreshold. Thus, if - // NativeHistogramMaxZeroThreshold is already at or below the current - // zero threshold, nothing happens at this step. After that, if the - // number of buckets still exceeds NativeHistogramMaxBucketNumber, the - // resolution of the histogram is reduced by doubling the width of the - // sparse buckets (up to a growth factor between one bucket to the next - // of 2^(2^4) = 65536, see above). + // enacted: + // - First, if the last reset (or the creation) of the histogram is at + // least NativeHistogramMinResetDuration ago, then the whole + // histogram is reset to its initial state (including regular + // buckets). + // - If less time has passed, or if NativeHistogramMinResetDuration is + // zero, no reset is performed. Instead, the zero threshold is + // increased sufficiently to reduce the number of buckets to or below + // NativeHistogramMaxBucketNumber, but not to more than + // NativeHistogramMaxZeroThreshold. Thus, if + // NativeHistogramMaxZeroThreshold is already at or below the current + // zero threshold, nothing happens at this step. + // - After that, if the number of buckets still exceeds + // NativeHistogramMaxBucketNumber, the resolution of the histogram is + // reduced by doubling the width of the sparse buckets (up to a + // growth factor between one bucket to the next of 2^(2^4) = 65536, + // see above). + // - Any increased zero threshold or reduced resolution is reset back + // to their original values once NativeHistogramMinResetDuration has + // passed (since the last reset or the creation of the histogram). NativeHistogramMaxBucketNumber uint32 NativeHistogramMinResetDuration time.Duration NativeHistogramMaxZeroThreshold float64 + + // now is for testing purposes, by default it's time.Now. + now func() time.Time +} + +// HistogramVecOpts bundles the options to create a HistogramVec metric. +// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type HistogramVecOpts struct { + HistogramOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Constraint + // function, if provided. + VariableLabels ConstrainableLabels } // NewHistogram creates a new Histogram based on the provided HistogramOpts. It @@ -488,11 +508,11 @@ func NewHistogram(opts HistogramOpts) Histogram { } func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + if len(desc.variableLabels.names) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues)) } - for _, n := range desc.variableLabels { + for _, n := range desc.variableLabels.names { if n == bucketLabel { panic(errBucketLabelNotAllowed) } @@ -503,6 +523,10 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } + if opts.now == nil { + opts.now = time.Now + } + h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -510,8 +534,8 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber, nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold, nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, - lastResetTime: time.Now(), - now: time.Now, + lastResetTime: opts.now(), + now: opts.now, } if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { h.upperBounds = DefBuckets @@ -544,16 +568,12 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } // Finally we know the final length of h.upperBounds and can make buckets // for both counts as well as exemplars: - h.counts[0] = &histogramCounts{ - buckets: make([]uint64, len(h.upperBounds)), - nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), - nativeHistogramSchema: h.nativeHistogramSchema, - } - h.counts[1] = &histogramCounts{ - buckets: make([]uint64, len(h.upperBounds)), - nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), - nativeHistogramSchema: h.nativeHistogramSchema, - } + h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))} + atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema) + h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))} + atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema) h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) h.init(h) // Init self-collection. @@ -632,8 +652,8 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { if frac == 0.5 { key-- } - div := 1 << -schema - key = (key + div - 1) / div + offset := (1 << -schema) - 1 + key = (key + offset) >> -schema } if isInf { key++ @@ -694,9 +714,11 @@ type histogram struct { nativeHistogramMaxZeroThreshold float64 nativeHistogramMaxBuckets uint32 nativeHistogramMinResetDuration time.Duration - lastResetTime time.Time // Protected by mtx. + // lastResetTime is protected by mtx. It is also used as created timestamp. + lastResetTime time.Time - now func() time.Time // To mock out time.Now() for testing. + // now is for testing purposes, by default it's time.Now. + now func() time.Time } func (h *histogram) Desc() *Desc { @@ -735,9 +757,10 @@ func (h *histogram) Write(out *dto.Metric) error { waitForCooldown(count, coldCounts) his := &dto.Histogram{ - Bucket: make([]*dto.Bucket, len(h.upperBounds)), - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + Bucket: make([]*dto.Bucket, len(h.upperBounds)), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + CreatedTimestamp: timestamppb.New(h.lastResetTime), } out.Histogram = his out.Label = h.labelPairs @@ -775,6 +798,16 @@ func (h *histogram) Write(out *dto.Metric) error { his.ZeroCount = proto.Uint64(zeroBucket) his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative) his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive) + + // Add a no-op span to a histogram without observations and with + // a zero threshold of zero. Otherwise, a native histogram would + // look like a classic histogram to scrapers. + if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 { + his.PositiveSpan = []*dto.BucketSpan{{ + Offset: proto.Int32(0), + Length: proto.Uint32(0), + }} + } } addAndResetCounts(hotCounts, coldCounts) return nil @@ -810,7 +843,7 @@ func (h *histogram) observe(v float64, bucket int) { } } -// limitSparsebuckets applies a strategy to limit the number of populated sparse +// limitBuckets applies a strategy to limit the number of populated sparse // buckets. It's generally best effort, and there are situations where the // number can go higher (if even the lowest resolution isn't enough to reduce // the number sufficiently, or if the provided counts aren't fully updated yet @@ -847,20 +880,23 @@ func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket h.doubleBucketWidth(hotCounts, coldCounts) } -// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration +// maybeReset resets the whole histogram if at least h.nativeHistogramMinResetDuration // has been passed. It returns true if the histogram has been reset. The caller // must have locked h.mtx. -func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool { +func (h *histogram) maybeReset( + hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int, +) bool { // We are using the possibly mocked h.now() rather than // time.Since(h.lastResetTime) to enable testing. - if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { + if h.nativeHistogramMinResetDuration == 0 || + h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { return false } // Completely reset coldCounts. h.resetCounts(cold) // Repeat the latest observation to not lose it completely. cold.observe(value, bucket, true) - // Make coldCounts the new hot counts while ressetting countAndHotIdx. + // Make coldCounts the new hot counts while resetting countAndHotIdx. n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) count := n & ((1 << 63) - 1) waitForCooldown(count, hot) @@ -1034,15 +1070,23 @@ type HistogramVec struct { // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and // partitioned by the given label names. func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { - desc := NewDesc( + return V2.NewHistogramVec(HistogramVecOpts{ + HistogramOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts. +func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec { + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &HistogramVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - return newHistogram(desc, opts, lvs...) + return newHistogram(desc, opts.HistogramOpts, lvs...) }), } } @@ -1161,6 +1205,7 @@ type constHistogram struct { sum float64 buckets map[float64]uint64 labelPairs []*dto.LabelPair + createdTs *timestamppb.Timestamp } func (h *constHistogram) Desc() *Desc { @@ -1168,7 +1213,9 @@ func (h *constHistogram) Desc() *Desc { } func (h *constHistogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} + his := &dto.Histogram{ + CreatedTimestamp: h.createdTs, + } buckets := make([]*dto.Bucket, 0, len(h.buckets)) @@ -1215,7 +1262,7 @@ func NewConstHistogram( if desc.err != nil { return nil, desc.err } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { return nil, err } return &constHistogram{ @@ -1309,7 +1356,7 @@ func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) { // Multiple spans with only small gaps in between are probably // encoded more efficiently as one larger span with a few empty // buckets. Needs some research to find the sweet spot. For now, - // we assume that gaps of one ore two buckets should not create + // we assume that gaps of one or two buckets should not create // a new span. iDelta := int32(i - nextI) if n == 0 || iDelta > 2 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go index fd0750f2cf..a595a20362 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -14,7 +14,7 @@ // It provides tools to compare sequences of strings and generate textual diffs. // // Maintaining `GetUnifiedDiffString` here because original repository -// (https://github.com/pmezard/go-difflib) is no loger maintained. +// (https://github.com/pmezard/go-difflib) is no longer maintained. package internal import ( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index c1b8fad36a..b3c4eca2bc 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -32,6 +32,104 @@ import ( // create a Desc. type Labels map[string]string +// LabelConstraint normalizes label values. +type LabelConstraint func(string) string + +// ConstrainedLabels represents a label name and its constrain function +// to normalize label values. This type is commonly used when constructing +// metric vector Collectors. +type ConstrainedLabel struct { + Name string + Constraint LabelConstraint +} + +// ConstrainableLabels is an interface that allows creating of labels that can +// be optionally constrained. +// +// prometheus.V2().NewCounterVec(CounterVecOpts{ +// CounterOpts: {...}, // Usual CounterOpts fields +// VariableLabels: []ConstrainedLabels{ +// {Name: "A"}, +// {Name: "B", Constraint: func(v string) string { ... }}, +// }, +// }) +type ConstrainableLabels interface { + compile() *compiledLabels + labelNames() []string +} + +// ConstrainedLabels represents a collection of label name -> constrain function +// to normalize label values. This type is commonly used when constructing +// metric vector Collectors. +type ConstrainedLabels []ConstrainedLabel + +func (cls ConstrainedLabels) compile() *compiledLabels { + compiled := &compiledLabels{ + names: make([]string, len(cls)), + labelConstraints: map[string]LabelConstraint{}, + } + + for i, label := range cls { + compiled.names[i] = label.Name + if label.Constraint != nil { + compiled.labelConstraints[label.Name] = label.Constraint + } + } + + return compiled +} + +func (cls ConstrainedLabels) labelNames() []string { + names := make([]string, len(cls)) + for i, label := range cls { + names[i] = label.Name + } + return names +} + +// UnconstrainedLabels represents collection of label without any constraint on +// their value. Thus, it is simply a collection of label names. +// +// UnconstrainedLabels([]string{ "A", "B" }) +// +// is equivalent to +// +// ConstrainedLabels { +// { Name: "A" }, +// { Name: "B" }, +// } +type UnconstrainedLabels []string + +func (uls UnconstrainedLabels) compile() *compiledLabels { + return &compiledLabels{ + names: uls, + } +} + +func (uls UnconstrainedLabels) labelNames() []string { + return uls +} + +type compiledLabels struct { + names []string + labelConstraints map[string]LabelConstraint +} + +func (cls *compiledLabels) compile() *compiledLabels { + return cls +} + +func (cls *compiledLabels) labelNames() []string { + return cls.names +} + +func (cls *compiledLabels) constrain(labelName, value string) string { + if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil { + return fn(value) + } + return value +} + // reservedLabelPrefix is a prefix which is not legal in user-supplied // label names. const reservedLabelPrefix = "__" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index b5119c5041..f018e57237 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -20,11 +20,9 @@ import ( "strings" "time" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" - dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" + "google.golang.org/protobuf/proto" ) var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. @@ -94,6 +92,9 @@ type Opts struct { // machine_role metric). See also // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels ConstLabels Labels + + // now is for testing purposes, by default it's time.Now. + now func() time.Time } // BuildFQName joins the given three name components by "_". Empty name diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index a4cc9810b0..09b8d2fbea 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -37,6 +37,7 @@ import ( "fmt" "io" "net/http" + "strconv" "strings" "sync" "time" @@ -47,9 +48,10 @@ import ( ) const ( - contentTypeHeader = "Content-Type" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" + contentTypeHeader = "Content-Type" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" + processStartTimeHeader = "Process-Start-Time-Unix" ) var gzipPool = sync.Pool{ @@ -121,6 +123,9 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { + if !opts.ProcessStartTime.IsZero() { + rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10)) + } if inFlightSem != nil { select { case inFlightSem <- struct{}{}: // All good, carry on. @@ -366,6 +371,14 @@ type HandlerOpts struct { // (which changes the identity of the resulting series on the Prometheus // server). EnableOpenMetrics bool + // ProcessStartTime allows setting process start timevalue that will be exposed + // with "Process-Start-Time-Unix" response header along with the metrics + // payload. This allow callers to have efficient transformations to cumulative + // counters (e.g. OpenTelemetry) or generally _created timestamp estimation per + // scrape target. + // NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus + // exposition format. + ProcessStartTime time.Time } // gzipAccepted returns whether the client will accept gzip-encoded content. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go index 2108678162..d3482c40ca 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -68,16 +68,17 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou o.apply(rtOpts) } - code, method := checkLabels(counter) + // Curry the counter with dynamic labels before checking the remaining labels. + code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels())) return func(r *http.Request) (*http.Response, error) { resp, err := next.RoundTrip(r) if err == nil { - addWithExemplar( - counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), - 1, - rtOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...) + for label, resolve := range rtOpts.extraLabelsFromCtx { + l[label] = resolve(resp.Request.Context()) + } + addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context())) } return resp, err } @@ -110,17 +111,18 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT o.apply(rtOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels())) return func(r *http.Request) (*http.Response, error) { start := time.Now() resp, err := next.RoundTrip(r) if err == nil { - observeWithExemplar( - obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), - time.Since(start).Seconds(), - rtOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...) + for label, resolve := range rtOpts.extraLabelsFromCtx { + l[label] = resolve(resp.Request.Context()) + } + observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context())) } return resp, err } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index cca67a78a9..356edb7868 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -87,7 +87,8 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op o.apply(hOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) if code { return func(w http.ResponseWriter, r *http.Request) { @@ -95,23 +96,22 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op d := newDelegator(w, nil) next.ServeHTTP(d, r) - observeWithExemplar( - obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), - time.Since(now).Seconds(), - hOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) } } return func(w http.ResponseWriter, r *http.Request) { now := time.Now() next.ServeHTTP(w, r) - - observeWithExemplar( - obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), - time.Since(now).Seconds(), - hOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, 0, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) } } @@ -138,28 +138,30 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, o.apply(hOpts) } - code, method := checkLabels(counter) + // Curry the counter with dynamic labels before checking the remaining labels. + code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels())) if code { return func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - addWithExemplar( - counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), - 1, - hOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context())) } } return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) - addWithExemplar( - counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), - 1, - hOpts.getExemplarFn(r.Context()), - ) + + l := labels(code, method, r.Method, 0, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context())) } } @@ -191,16 +193,17 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha o.apply(hOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) return func(w http.ResponseWriter, r *http.Request) { now := time.Now() d := newDelegator(w, func(status int) { - observeWithExemplar( - obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)), - time.Since(now).Seconds(), - hOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, status, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) }) next.ServeHTTP(d, r) } @@ -231,28 +234,32 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, o.apply(hOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) + if code { return func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) size := computeApproximateRequestSize(r) - observeWithExemplar( - obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), - float64(size), - hOpts.getExemplarFn(r.Context()), - ) + + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context())) } } return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) size := computeApproximateRequestSize(r) - observeWithExemplar( - obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), - float64(size), - hOpts.getExemplarFn(r.Context()), - ) + + l := labels(code, method, r.Method, 0, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context())) } } @@ -281,16 +288,18 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler o.apply(hOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - observeWithExemplar( - obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), - float64(d.Written()), - hOpts.getExemplarFn(r.Context()), - ) + + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context())) }) } @@ -380,16 +389,13 @@ func isLabelCurried(c prometheus.Collector, label string) bool { return true } -// emptyLabels is a one-time allocation for non-partitioned metrics to avoid -// unnecessary allocations on each request. -var emptyLabels = prometheus.Labels{} - func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { - if !(code || method) { - return emptyLabels - } labels := prometheus.Labels{} + if !(code || method) { + return labels + } + if code { labels["code"] = sanitizeCode(status) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go index c590d912c9..5d4383aa14 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go @@ -24,14 +24,32 @@ type Option interface { apply(*options) } +// LabelValueFromCtx are used to compute the label value from request context. +// Context can be filled with values from request through middleware. +type LabelValueFromCtx func(ctx context.Context) string + // options store options for both a handler or round tripper. type options struct { - extraMethods []string - getExemplarFn func(requestCtx context.Context) prometheus.Labels + extraMethods []string + getExemplarFn func(requestCtx context.Context) prometheus.Labels + extraLabelsFromCtx map[string]LabelValueFromCtx } func defaultOptions() *options { - return &options{getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }} + return &options{ + getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }, + extraLabelsFromCtx: map[string]LabelValueFromCtx{}, + } +} + +func (o *options) emptyDynamicLabels() prometheus.Labels { + labels := prometheus.Labels{} + + for label := range o.extraLabelsFromCtx { + labels[label] = "" + } + + return labels } type optionApplyFunc func(*options) @@ -48,11 +66,19 @@ func WithExtraMethods(methods ...string) Option { }) } -// WithExemplarFromContext adds allows to put a hook to all counter and histogram metrics. -// If the hook function returns non-nil labels, exemplars will be added for that request, otherwise metric -// will get instrumented without exemplar. +// WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics. +// If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but +// metric will continue to observe/increment. func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option { return optionApplyFunc(func(o *options) { o.getExemplarFn = getExemplarFn }) } + +// WithLabelFromCtx registers a label for dynamic resolution with access to context. +// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage +func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option { + return optionApplyFunc(func(o *options) { + o.extraLabelsFromCtx[name] = valueFn + }) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 09e34d307c..5e2ced25a0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -21,18 +21,17 @@ import ( "path/filepath" "runtime" "sort" + "strconv" "strings" "sync" "unicode/utf8" - "github.com/cespare/xxhash/v2" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/expfmt" - - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/client_golang/prometheus/internal" + + "github.com/cespare/xxhash/v2" + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" + "google.golang.org/protobuf/proto" ) const ( @@ -549,7 +548,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { goroutineBudget-- runtime.Gosched() } - // Once both checkedMetricChan and uncheckdMetricChan are closed + // Once both checkedMetricChan and uncheckedMetricChan are closed // and drained, the contraption above will nil out cmc and umc, // and then we can leave the collect loop here. if cmc == nil && umc == nil { @@ -933,6 +932,10 @@ func checkMetricConsistency( h.WriteString(lp.GetValue()) h.Write(separatorByteSlice) } + if dtoMetric.TimestampMs != nil { + h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10)) + h.Write(separatorByteSlice) + } hSum := h.Sum64() if _, exists := metricHashes[hSum]; exists { return fmt.Errorf( @@ -960,7 +963,7 @@ func checkDescConsistency( // Is the desc consistent with the content of the metric? lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) copy(lpsFromDesc, desc.constLabelPairs) - for _, l := range desc.variableLabels { + for _, l := range desc.variableLabels.names { lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ Name: proto.String(l), }) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 7bc448a893..1462704446 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -22,11 +22,11 @@ import ( "sync/atomic" "time" - "github.com/beorn7/perks/quantile" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" + + "github.com/beorn7/perks/quantile" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" ) // quantileLabel is used for the label that defines the quantile in a @@ -146,6 +146,21 @@ type SummaryOpts struct { // is the internal buffer size of the underlying package // "github.com/bmizerany/perks/quantile"). BufCap uint32 + + // now is for testing purposes, by default it's time.Now. + now func() time.Time +} + +// SummaryVecOpts bundles the options to create a SummaryVec metric. +// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type SummaryVecOpts struct { + SummaryOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Constraint + // function, if provided. + VariableLabels ConstrainableLabels } // Problem with the sliding-window decay algorithm... The Merge method of @@ -177,11 +192,11 @@ func NewSummary(opts SummaryOpts) Summary { } func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + if len(desc.variableLabels.names) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues)) } - for _, n := range desc.variableLabels { + for _, n := range desc.variableLabels.names { if n == quantileLabel { panic(errQuantileLabelNotAllowed) } @@ -211,6 +226,9 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { opts.BufCap = DefBufCap } + if opts.now == nil { + opts.now = time.Now + } if len(opts.Objectives) == 0 { // Use the lock-free implementation of a Summary without objectives. s := &noObjectivesSummary{ @@ -219,6 +237,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { counts: [2]*summaryCounts{{}, {}}, } s.init(s) // Init self-collection. + s.createdTs = timestamppb.New(opts.now()) return s } @@ -234,7 +253,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { coldBuf: make([]float64, 0, opts.BufCap), streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), } - s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.headStreamExpTime = opts.now().Add(s.streamDuration) s.hotBufExpTime = s.headStreamExpTime for i := uint32(0); i < opts.AgeBuckets; i++ { @@ -248,6 +267,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { sort.Float64s(s.sortedObjectives) s.init(s) // Init self-collection. + s.createdTs = timestamppb.New(opts.now()) return s } @@ -275,6 +295,8 @@ type summary struct { headStream *quantile.Stream headStreamIdx int headStreamExpTime, hotBufExpTime time.Time + + createdTs *timestamppb.Timestamp } func (s *summary) Desc() *Desc { @@ -296,7 +318,9 @@ func (s *summary) Observe(v float64) { } func (s *summary) Write(out *dto.Metric) error { - sum := &dto.Summary{} + sum := &dto.Summary{ + CreatedTimestamp: s.createdTs, + } qs := make([]*dto.Quantile, 0, len(s.objectives)) s.bufMtx.Lock() @@ -429,6 +453,8 @@ type noObjectivesSummary struct { counts [2]*summaryCounts labelPairs []*dto.LabelPair + + createdTs *timestamppb.Timestamp } func (s *noObjectivesSummary) Desc() *Desc { @@ -479,8 +505,9 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error { } sum := &dto.Summary{ - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + CreatedTimestamp: s.createdTs, } out.Summary = sum @@ -530,20 +557,28 @@ type SummaryVec struct { // it is handled by the Prometheus server internally, “quantile” is an illegal // label name. NewSummaryVec will panic if this label name is used. func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { - for _, ln := range labelNames { + return V2.NewSummaryVec(SummaryVecOpts{ + SummaryOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts. +func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec { + for _, ln := range opts.VariableLabels.labelNames() { if ln == quantileLabel { panic(errQuantileLabelNotAllowed) } } - desc := NewDesc( + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &SummaryVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - return newSummary(desc, opts, lvs...) + return newSummary(desc, opts.SummaryOpts, lvs...) }), } } @@ -662,6 +697,7 @@ type constSummary struct { sum float64 quantiles map[float64]float64 labelPairs []*dto.LabelPair + createdTs *timestamppb.Timestamp } func (s *constSummary) Desc() *Desc { @@ -669,7 +705,9 @@ func (s *constSummary) Desc() *Desc { } func (s *constSummary) Write(out *dto.Metric) error { - sum := &dto.Summary{} + sum := &dto.Summary{ + CreatedTimestamp: s.createdTs, + } qs := make([]*dto.Quantile, 0, len(s.quantiles)) sum.SampleCount = proto.Uint64(s.count) @@ -718,7 +756,7 @@ func NewConstSummary( if desc.err != nil { return nil, desc.err } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { return nil, err } return &constSummary{ diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go index f28a76f3a6..52344fef53 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -23,7 +23,9 @@ type Timer struct { } // NewTimer creates a new Timer. The provided Observer is used to observe a -// duration in seconds. Timer is usually used to time a function call in the +// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar +// later on will be also supported. +// Timer is usually used to time a function call in the // following way: // // func TimeMe() { @@ -31,6 +33,14 @@ type Timer struct { // defer timer.ObserveDuration() // // Do actual work. // } +// +// or +// +// func TimeMeWithExemplar() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDurationWithExemplar(exemplar) +// // Do actual work. +// } func NewTimer(o Observer) *Timer { return &Timer{ begin: time.Now(), @@ -53,3 +63,19 @@ func (t *Timer) ObserveDuration() time.Duration { } return d } + +// ObserveDurationWithExemplar is like ObserveDuration, but it will also +// observe exemplar with the duration unless exemplar is nil or provided Observer can't +// be casted to ExemplarObserver. +func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration { + d := time.Since(t.begin) + eo, ok := t.observer.(ExemplarObserver) + if ok && exemplar != nil { + eo.ObserveWithExemplar(d.Seconds(), exemplar) + return d + } + if t.observer != nil { + t.observer.Observe(d.Seconds()) + } + return d +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index 2d3abc1cbd..cc23011fad 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -14,18 +14,17 @@ package prometheus import ( + "errors" "fmt" "sort" "time" "unicode/utf8" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" - "github.com/prometheus/client_golang/prometheus/internal" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" ) // ValueType is an enumeration of metric types that represent a simple value. @@ -93,7 +92,7 @@ func (v *valueFunc) Desc() *Desc { } func (v *valueFunc) Write(out *dto.Metric) error { - return populateMetric(v.valType, v.function(), v.labelPairs, nil, out) + return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil) } // NewConstMetric returns a metric with one fixed value that cannot be @@ -107,12 +106,12 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues if desc.err != nil { return nil, desc.err } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { return nil, err } metric := &dto.Metric{} - if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric); err != nil { + if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil { return nil, err } @@ -132,6 +131,43 @@ func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelVal return m } +// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters +// with created timestamp set and returns an error for other metric types. +func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + switch valueType { + case CounterValue: + break + default: + return nil, errors.New("created timestamps are only supported for counters") + } + + metric := &dto.Metric{} + if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil { + return nil, err + } + + return &constMetric{ + desc: desc, + metric: metric, + }, nil +} + +// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where +// NewConstMetricWithCreatedTimestamp would have returned an error. +func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric { + m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} + type constMetric struct { desc *Desc metric *dto.Metric @@ -155,11 +191,12 @@ func populateMetric( labelPairs []*dto.LabelPair, e *dto.Exemplar, m *dto.Metric, + ct *timestamppb.Timestamp, ) error { m.Label = labelPairs switch t { case CounterValue: - m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e} + m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct} case GaugeValue: m.Gauge = &dto.Gauge{Value: proto.Float64(v)} case UntypedValue: @@ -178,19 +215,19 @@ func populateMetric( // This function is only needed for custom Metric implementations. See MetricVec // example. func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { - totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs) if totalLen == 0 { // Super fast path. return nil } - if len(desc.variableLabels) == 0 { + if len(desc.variableLabels.names) == 0 { // Moderately fast path. return desc.constLabelPairs } labelPairs := make([]*dto.LabelPair, 0, totalLen) - for i, n := range desc.variableLabels { + for i, l := range desc.variableLabels.names { labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(n), + Name: proto.String(l), Value: proto.String(labelValues[i]), }) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 7ae322590c..955cfd59f8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -72,6 +72,8 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { // with a performance overhead (for creating and processing the Labels map). // See also the CounterVec example. func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { + lvs = constrainLabelValues(m.desc, lvs, m.curry) + h, err := m.hashLabelValues(lvs) if err != nil { return false @@ -91,6 +93,9 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { // This method is used for the same purpose as DeleteLabelValues(...string). See // there for pros and cons of the two methods. func (m *MetricVec) Delete(labels Labels) bool { + labels, closer := constrainLabels(m.desc, labels) + defer closer() + h, err := m.hashLabels(labels) if err != nil { return false @@ -106,6 +111,9 @@ func (m *MetricVec) Delete(labels Labels) bool { // Note that curried labels will never be matched if deleting from the curried vector. // To match curried labels with DeletePartialMatch, it must be called on the base vector. func (m *MetricVec) DeletePartialMatch(labels Labels) int { + labels, closer := constrainLabels(m.desc, labels) + defer closer() + return m.metricMap.deleteByLabels(labels, m.curry) } @@ -144,11 +152,11 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { oldCurry = m.curry iCurry int ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label] + for i, labelName := range m.desc.variableLabels.names { + val, ok := labels[labelName] if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { if ok { - return nil, fmt.Errorf("label name %q is already curried", label) + return nil, fmt.Errorf("label name %q is already curried", labelName) } newCurry = append(newCurry, oldCurry[iCurry]) iCurry++ @@ -156,7 +164,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { if !ok { continue // Label stays uncurried. } - newCurry = append(newCurry, curriedLabelValue{i, val}) + newCurry = append(newCurry, curriedLabelValue{ + i, + m.desc.variableLabels.constrain(labelName, val), + }) } } if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { @@ -199,6 +210,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { // a wrapper around MetricVec, implementing a vector for a specific Metric // implementation, for example GaugeVec. func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { + lvs = constrainLabelValues(m.desc, lvs, m.curry) h, err := m.hashLabelValues(lvs) if err != nil { return nil, err @@ -224,6 +236,9 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { // around MetricVec, implementing a vector for a specific Metric implementation, // for example GaugeVec. func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { + labels, closer := constrainLabels(m.desc, labels) + defer closer() + h, err := m.hashLabels(labels) if err != nil { return nil, err @@ -233,7 +248,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { } func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { - if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { + if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil { return 0, err } @@ -242,7 +257,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { curry = m.curry iVals, iCurry int ) - for i := 0; i < len(m.desc.variableLabels); i++ { + for i := 0; i < len(m.desc.variableLabels.names); i++ { if iCurry < len(curry) && curry[iCurry].index == i { h = m.hashAdd(h, curry[iCurry].value) iCurry++ @@ -256,7 +271,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { } func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { - if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil { return 0, err } @@ -265,17 +280,17 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { curry = m.curry iCurry int ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label] + for i, labelName := range m.desc.variableLabels.names { + val, ok := labels[labelName] if iCurry < len(curry) && curry[iCurry].index == i { if ok { - return 0, fmt.Errorf("label name %q is already curried", label) + return 0, fmt.Errorf("label name %q is already curried", labelName) } h = m.hashAdd(h, curry[iCurry].value) iCurry++ } else { if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label) + return 0, fmt.Errorf("label name %q missing in label map", labelName) } h = m.hashAdd(h, val) } @@ -453,7 +468,7 @@ func valueMatchesVariableOrCurriedValue(targetValue string, index int, values [] func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { for l, v := range labels { // Check if the target label exists in our metrics and get the index. - varLabelIndex, validLabel := indexOf(l, desc.variableLabels) + varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names) if validLabel { // Check the value of that label against the target value. // We don't consider curried values in partial matches. @@ -597,7 +612,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe return false } iCurry := 0 - for i, k := range desc.variableLabels { + for i, k := range desc.variableLabels.names { if iCurry < len(curry) && curry[iCurry].index == i { if values[i] != curry[iCurry].value { return false @@ -615,7 +630,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { labelValues := make([]string, len(labels)+len(curry)) iCurry := 0 - for i, k := range desc.variableLabels { + for i, k := range desc.variableLabels.names { if iCurry < len(curry) && curry[iCurry].index == i { labelValues[i] = curry[iCurry].value iCurry++ @@ -640,3 +655,55 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { } return labelValues } + +var labelsPool = &sync.Pool{ + New: func() interface{} { + return make(Labels) + }, +} + +func constrainLabels(desc *Desc, labels Labels) (Labels, func()) { + if len(desc.variableLabels.labelConstraints) == 0 { + // Fast path when there's no constraints + return labels, func() {} + } + + constrainedLabels := labelsPool.Get().(Labels) + for l, v := range labels { + constrainedLabels[l] = desc.variableLabels.constrain(l, v) + } + + return constrainedLabels, func() { + for k := range constrainedLabels { + delete(constrainedLabels, k) + } + labelsPool.Put(constrainedLabels) + } +} + +func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string { + if len(desc.variableLabels.labelConstraints) == 0 { + // Fast path when there's no constraints + return lvs + } + + constrainedValues := make([]string, len(lvs)) + var iCurry, iLVs int + for i := 0; i < len(lvs)+len(curry); i++ { + if iCurry < len(curry) && curry[iCurry].index == i { + iCurry++ + continue + } + + if i < len(desc.variableLabels.names) { + constrainedValues[iLVs] = desc.variableLabels.constrain( + desc.variableLabels.names[i], + lvs[iLVs], + ) + } else { + constrainedValues[iLVs] = lvs[iLVs] + } + iLVs++ + } + return constrainedValues +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vnext.go b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go new file mode 100644 index 0000000000..42bc3a8f06 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go @@ -0,0 +1,23 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +type v2 struct{} + +// V2 is a struct that can be referenced to access experimental API that might +// be present in v2 of client golang someday. It offers extended functionality +// of v1 with slightly changed API. It is acceptable to use some pieces from v1 +// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc` +// in the same codebase. +var V2 = v2{} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index 1498ee144c..25da157f15 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -17,12 +17,10 @@ import ( "fmt" "sort" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" + "github.com/prometheus/client_golang/prometheus/internal" dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus/internal" + "google.golang.org/protobuf/proto" ) // WrapRegistererWith returns a Registerer wrapping the provided @@ -206,7 +204,7 @@ func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { constLabels[ln] = lv } // NewDesc will do remaining validations. - newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) + newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) // Propagate errors if there was any. This will override any errer // created by NewDesc above, i.e. earlier errors get precedence. if desc.err != nil { diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 35904ea198..cee360db7f 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -1,25 +1,38 @@ +// Copyright 2013 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.20.3 // source: io/prometheus/client/metrics.proto package io_prometheus_client import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type MetricType int32 @@ -38,23 +51,25 @@ const ( MetricType_GAUGE_HISTOGRAM MetricType = 5 ) -var MetricType_name = map[int32]string{ - 0: "COUNTER", - 1: "GAUGE", - 2: "SUMMARY", - 3: "UNTYPED", - 4: "HISTOGRAM", - 5: "GAUGE_HISTOGRAM", -} - -var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, - "GAUGE_HISTOGRAM": 5, -} +// Enum value maps for MetricType. +var ( + MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", + 5: "GAUGE_HISTOGRAM", + } + MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, + "GAUGE_HISTOGRAM": 5, + } +) func (x MetricType) Enum() *MetricType { p := new(MetricType) @@ -63,449 +78,546 @@ func (x MetricType) Enum() *MetricType { } func (x MetricType) String() string { - return proto.EnumName(MetricType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (x *MetricType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") +func (MetricType) Descriptor() protoreflect.EnumDescriptor { + return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor() +} + +func (MetricType) Type() protoreflect.EnumType { + return &file_io_prometheus_client_metrics_proto_enumTypes[0] +} + +func (x MetricType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *MetricType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) if err != nil { return err } - *x = MetricType(value) + *x = MetricType(num) return nil } +// Deprecated: Use MetricType.Descriptor instead. func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{0} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0} } type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` } -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (m *LabelPair) String() string { return proto.CompactTextString(m) } -func (*LabelPair) ProtoMessage() {} +func (x *LabelPair) Reset() { + *x = LabelPair{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LabelPair) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LabelPair) ProtoMessage() {} + +func (x *LabelPair) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead. func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{0} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0} } -func (m *LabelPair) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelPair.Unmarshal(m, b) -} -func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) -} -func (m *LabelPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelPair.Merge(m, src) -} -func (m *LabelPair) XXX_Size() int { - return xxx_messageInfo_LabelPair.Size(m) -} -func (m *LabelPair) XXX_DiscardUnknown() { - xxx_messageInfo_LabelPair.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelPair proto.InternalMessageInfo - -func (m *LabelPair) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (x *LabelPair) GetName() string { + if x != nil && x.Name != nil { + return *x.Name } return "" } -func (m *LabelPair) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value +func (x *LabelPair) GetValue() string { + if x != nil && x.Value != nil { + return *x.Value } return "" } type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` } -func (m *Gauge) Reset() { *m = Gauge{} } -func (m *Gauge) String() string { return proto.CompactTextString(m) } -func (*Gauge) ProtoMessage() {} +func (x *Gauge) Reset() { + *x = Gauge{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Gauge) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Gauge) ProtoMessage() {} + +func (x *Gauge) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Gauge.ProtoReflect.Descriptor instead. func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{1} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1} } -func (m *Gauge) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Gauge.Unmarshal(m, b) -} -func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) -} -func (m *Gauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_Gauge.Merge(m, src) -} -func (m *Gauge) XXX_Size() int { - return xxx_messageInfo_Gauge.Size(m) -} -func (m *Gauge) XXX_DiscardUnknown() { - xxx_messageInfo_Gauge.DiscardUnknown(m) -} - -var xxx_messageInfo_Gauge proto.InternalMessageInfo - -func (m *Gauge) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Gauge) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` + CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` } -func (m *Counter) Reset() { *m = Counter{} } -func (m *Counter) String() string { return proto.CompactTextString(m) } -func (*Counter) ProtoMessage() {} +func (x *Counter) Reset() { + *x = Counter{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Counter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Counter) ProtoMessage() {} + +func (x *Counter) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Counter.ProtoReflect.Descriptor instead. func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{2} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2} } -func (m *Counter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Counter.Unmarshal(m, b) -} -func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Counter.Marshal(b, m, deterministic) -} -func (m *Counter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Counter.Merge(m, src) -} -func (m *Counter) XXX_Size() int { - return xxx_messageInfo_Counter.Size(m) -} -func (m *Counter) XXX_DiscardUnknown() { - xxx_messageInfo_Counter.DiscardUnknown(m) -} - -var xxx_messageInfo_Counter proto.InternalMessageInfo - -func (m *Counter) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Counter) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } -func (m *Counter) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar +func (x *Counter) GetExemplar() *Exemplar { + if x != nil { + return x.Exemplar + } + return nil +} + +func (x *Counter) GetCreatedTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.CreatedTimestamp } return nil } type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` } -func (m *Quantile) Reset() { *m = Quantile{} } -func (m *Quantile) String() string { return proto.CompactTextString(m) } -func (*Quantile) ProtoMessage() {} +func (x *Quantile) Reset() { + *x = Quantile{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Quantile) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Quantile) ProtoMessage() {} + +func (x *Quantile) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Quantile.ProtoReflect.Descriptor instead. func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{3} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3} } -func (m *Quantile) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Quantile.Unmarshal(m, b) -} -func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) -} -func (m *Quantile) XXX_Merge(src proto.Message) { - xxx_messageInfo_Quantile.Merge(m, src) -} -func (m *Quantile) XXX_Size() int { - return xxx_messageInfo_Quantile.Size(m) -} -func (m *Quantile) XXX_DiscardUnknown() { - xxx_messageInfo_Quantile.DiscardUnknown(m) -} - -var xxx_messageInfo_Quantile proto.InternalMessageInfo - -func (m *Quantile) GetQuantile() float64 { - if m != nil && m.Quantile != nil { - return *m.Quantile +func (x *Quantile) GetQuantile() float64 { + if x != nil && x.Quantile != nil { + return *x.Quantile } return 0 } -func (m *Quantile) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Quantile) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` } -func (m *Summary) Reset() { *m = Summary{} } -func (m *Summary) String() string { return proto.CompactTextString(m) } -func (*Summary) ProtoMessage() {} +func (x *Summary) Reset() { + *x = Summary{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Summary) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Summary) ProtoMessage() {} + +func (x *Summary) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Summary.ProtoReflect.Descriptor instead. func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{4} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4} } -func (m *Summary) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Summary.Unmarshal(m, b) -} -func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Summary.Marshal(b, m, deterministic) -} -func (m *Summary) XXX_Merge(src proto.Message) { - xxx_messageInfo_Summary.Merge(m, src) -} -func (m *Summary) XXX_Size() int { - return xxx_messageInfo_Summary.Size(m) -} -func (m *Summary) XXX_DiscardUnknown() { - xxx_messageInfo_Summary.DiscardUnknown(m) -} - -var xxx_messageInfo_Summary proto.InternalMessageInfo - -func (m *Summary) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount +func (x *Summary) GetSampleCount() uint64 { + if x != nil && x.SampleCount != nil { + return *x.SampleCount } return 0 } -func (m *Summary) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum +func (x *Summary) GetSampleSum() float64 { + if x != nil && x.SampleSum != nil { + return *x.SampleSum } return 0 } -func (m *Summary) GetQuantile() []*Quantile { - if m != nil { - return m.Quantile +func (x *Summary) GetQuantile() []*Quantile { + if x != nil { + return x.Quantile + } + return nil +} + +func (x *Summary) GetCreatedTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.CreatedTimestamp } return nil } type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` } -func (m *Untyped) Reset() { *m = Untyped{} } -func (m *Untyped) String() string { return proto.CompactTextString(m) } -func (*Untyped) ProtoMessage() {} +func (x *Untyped) Reset() { + *x = Untyped{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Untyped) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Untyped) ProtoMessage() {} + +func (x *Untyped) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Untyped.ProtoReflect.Descriptor instead. func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{5} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5} } -func (m *Untyped) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Untyped.Unmarshal(m, b) -} -func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) -} -func (m *Untyped) XXX_Merge(src proto.Message) { - xxx_messageInfo_Untyped.Merge(m, src) -} -func (m *Untyped) XXX_Size() int { - return xxx_messageInfo_Untyped.Size(m) -} -func (m *Untyped) XXX_DiscardUnknown() { - xxx_messageInfo_Untyped.DiscardUnknown(m) -} - -var xxx_messageInfo_Untyped proto.InternalMessageInfo - -func (m *Untyped) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Untyped) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Histogram struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` + SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0. SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` // Buckets for the conventional histogram. - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional. + CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and // then each power of two is divided into 2^n logarithmic buckets. // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). // In the future, more bucket schemas may be added using numbers < -4 or > 8. Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"` - ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` - ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` - ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` + ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` // Breadth of the zero bucket. + ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` // Count in zero bucket. + ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0. // Negative buckets for the native histogram. NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"` // Use either "negative_delta" or "negative_count", the former for // regular histograms with integer counts, the latter for float // histograms. - NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` - NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` + NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket. // Positive buckets for the native histogram. + // Use a no-op span (offset 0, length 0) for a native histogram without any + // observations yet and with a zero_threshold of 0. Otherwise, it would be + // indistinguishable from a classic histogram. PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` // Use either "positive_delta" or "positive_count", the former for // regular histograms with integer counts, the latter for float // histograms. - PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` - PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket. } -func (m *Histogram) Reset() { *m = Histogram{} } -func (m *Histogram) String() string { return proto.CompactTextString(m) } -func (*Histogram) ProtoMessage() {} +func (x *Histogram) Reset() { + *x = Histogram{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Histogram) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Histogram) ProtoMessage() {} + +func (x *Histogram) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Histogram.ProtoReflect.Descriptor instead. func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{6} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6} } -func (m *Histogram) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Histogram.Unmarshal(m, b) -} -func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) -} -func (m *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(m, src) -} -func (m *Histogram) XXX_Size() int { - return xxx_messageInfo_Histogram.Size(m) -} -func (m *Histogram) XXX_DiscardUnknown() { - xxx_messageInfo_Histogram.DiscardUnknown(m) -} - -var xxx_messageInfo_Histogram proto.InternalMessageInfo - -func (m *Histogram) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount +func (x *Histogram) GetSampleCount() uint64 { + if x != nil && x.SampleCount != nil { + return *x.SampleCount } return 0 } -func (m *Histogram) GetSampleCountFloat() float64 { - if m != nil && m.SampleCountFloat != nil { - return *m.SampleCountFloat +func (x *Histogram) GetSampleCountFloat() float64 { + if x != nil && x.SampleCountFloat != nil { + return *x.SampleCountFloat } return 0 } -func (m *Histogram) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum +func (x *Histogram) GetSampleSum() float64 { + if x != nil && x.SampleSum != nil { + return *x.SampleSum } return 0 } -func (m *Histogram) GetBucket() []*Bucket { - if m != nil { - return m.Bucket +func (x *Histogram) GetBucket() []*Bucket { + if x != nil { + return x.Bucket } return nil } -func (m *Histogram) GetSchema() int32 { - if m != nil && m.Schema != nil { - return *m.Schema +func (x *Histogram) GetCreatedTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.CreatedTimestamp + } + return nil +} + +func (x *Histogram) GetSchema() int32 { + if x != nil && x.Schema != nil { + return *x.Schema } return 0 } -func (m *Histogram) GetZeroThreshold() float64 { - if m != nil && m.ZeroThreshold != nil { - return *m.ZeroThreshold +func (x *Histogram) GetZeroThreshold() float64 { + if x != nil && x.ZeroThreshold != nil { + return *x.ZeroThreshold } return 0 } -func (m *Histogram) GetZeroCount() uint64 { - if m != nil && m.ZeroCount != nil { - return *m.ZeroCount +func (x *Histogram) GetZeroCount() uint64 { + if x != nil && x.ZeroCount != nil { + return *x.ZeroCount } return 0 } -func (m *Histogram) GetZeroCountFloat() float64 { - if m != nil && m.ZeroCountFloat != nil { - return *m.ZeroCountFloat +func (x *Histogram) GetZeroCountFloat() float64 { + if x != nil && x.ZeroCountFloat != nil { + return *x.ZeroCountFloat } return 0 } -func (m *Histogram) GetNegativeSpan() []*BucketSpan { - if m != nil { - return m.NegativeSpan +func (x *Histogram) GetNegativeSpan() []*BucketSpan { + if x != nil { + return x.NegativeSpan } return nil } -func (m *Histogram) GetNegativeDelta() []int64 { - if m != nil { - return m.NegativeDelta +func (x *Histogram) GetNegativeDelta() []int64 { + if x != nil { + return x.NegativeDelta } return nil } -func (m *Histogram) GetNegativeCount() []float64 { - if m != nil { - return m.NegativeCount +func (x *Histogram) GetNegativeCount() []float64 { + if x != nil { + return x.NegativeCount } return nil } -func (m *Histogram) GetPositiveSpan() []*BucketSpan { - if m != nil { - return m.PositiveSpan +func (x *Histogram) GetPositiveSpan() []*BucketSpan { + if x != nil { + return x.PositiveSpan } return nil } -func (m *Histogram) GetPositiveDelta() []int64 { - if m != nil { - return m.PositiveDelta +func (x *Histogram) GetPositiveDelta() []int64 { + if x != nil { + return x.PositiveDelta } return nil } -func (m *Histogram) GetPositiveCount() []float64 { - if m != nil { - return m.PositiveCount +func (x *Histogram) GetPositiveCount() []float64 { + if x != nil { + return x.PositiveCount } return nil } @@ -513,64 +625,72 @@ func (m *Histogram) GetPositiveCount() []float64 { // A Bucket of a conventional histogram, each of which is treated as // an individual counter-like time series by Prometheus. type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` - CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` // Cumulative in increasing order. + CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0. + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` // Inclusive. Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *Bucket) Reset() { *m = Bucket{} } -func (m *Bucket) String() string { return proto.CompactTextString(m) } -func (*Bucket) ProtoMessage() {} +func (x *Bucket) Reset() { + *x = Bucket{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket) ProtoMessage() {} + +func (x *Bucket) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket.ProtoReflect.Descriptor instead. func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{7} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7} } -func (m *Bucket) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Bucket.Unmarshal(m, b) -} -func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) -} -func (m *Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bucket.Merge(m, src) -} -func (m *Bucket) XXX_Size() int { - return xxx_messageInfo_Bucket.Size(m) -} -func (m *Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_Bucket.DiscardUnknown(m) -} - -var xxx_messageInfo_Bucket proto.InternalMessageInfo - -func (m *Bucket) GetCumulativeCount() uint64 { - if m != nil && m.CumulativeCount != nil { - return *m.CumulativeCount +func (x *Bucket) GetCumulativeCount() uint64 { + if x != nil && x.CumulativeCount != nil { + return *x.CumulativeCount } return 0 } -func (m *Bucket) GetCumulativeCountFloat() float64 { - if m != nil && m.CumulativeCountFloat != nil { - return *m.CumulativeCountFloat +func (x *Bucket) GetCumulativeCountFloat() float64 { + if x != nil && x.CumulativeCountFloat != nil { + return *x.CumulativeCountFloat } return 0 } -func (m *Bucket) GetUpperBound() float64 { - if m != nil && m.UpperBound != nil { - return *m.UpperBound +func (x *Bucket) GetUpperBound() float64 { + if x != nil && x.UpperBound != nil { + return *x.UpperBound } return 0 } -func (m *Bucket) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar +func (x *Bucket) GetExemplar() *Exemplar { + if x != nil { + return x.Exemplar } return nil } @@ -582,333 +702,675 @@ func (m *Bucket) GetExemplar() *Exemplar { // structured here (with all the buckets in a single array separate // from the Spans). type BucketSpan struct { - Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` - Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative). + Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` // Length of consecutive buckets. } -func (m *BucketSpan) Reset() { *m = BucketSpan{} } -func (m *BucketSpan) String() string { return proto.CompactTextString(m) } -func (*BucketSpan) ProtoMessage() {} +func (x *BucketSpan) Reset() { + *x = BucketSpan{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BucketSpan) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BucketSpan) ProtoMessage() {} + +func (x *BucketSpan) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead. func (*BucketSpan) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{8} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8} } -func (m *BucketSpan) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BucketSpan.Unmarshal(m, b) -} -func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) -} -func (m *BucketSpan) XXX_Merge(src proto.Message) { - xxx_messageInfo_BucketSpan.Merge(m, src) -} -func (m *BucketSpan) XXX_Size() int { - return xxx_messageInfo_BucketSpan.Size(m) -} -func (m *BucketSpan) XXX_DiscardUnknown() { - xxx_messageInfo_BucketSpan.DiscardUnknown(m) -} - -var xxx_messageInfo_BucketSpan proto.InternalMessageInfo - -func (m *BucketSpan) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset +func (x *BucketSpan) GetOffset() int32 { + if x != nil && x.Offset != nil { + return *x.Offset } return 0 } -func (m *BucketSpan) GetLength() uint32 { - if m != nil && m.Length != nil { - return *m.Length +func (x *BucketSpan) GetLength() uint32 { + if x != nil && x.Length != nil { + return *x.Length } return 0 } type Exemplar struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style. } -func (m *Exemplar) Reset() { *m = Exemplar{} } -func (m *Exemplar) String() string { return proto.CompactTextString(m) } -func (*Exemplar) ProtoMessage() {} +func (x *Exemplar) Reset() { + *x = Exemplar{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Exemplar) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Exemplar) ProtoMessage() {} + +func (x *Exemplar) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead. func (*Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{9} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9} } -func (m *Exemplar) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Exemplar.Unmarshal(m, b) -} -func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) -} -func (m *Exemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_Exemplar.Merge(m, src) -} -func (m *Exemplar) XXX_Size() int { - return xxx_messageInfo_Exemplar.Size(m) -} -func (m *Exemplar) XXX_DiscardUnknown() { - xxx_messageInfo_Exemplar.DiscardUnknown(m) -} - -var xxx_messageInfo_Exemplar proto.InternalMessageInfo - -func (m *Exemplar) GetLabel() []*LabelPair { - if m != nil { - return m.Label +func (x *Exemplar) GetLabel() []*LabelPair { + if x != nil { + return x.Label } return nil } -func (m *Exemplar) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Exemplar) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } -func (m *Exemplar) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp +func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp } return nil } type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` } -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} +func (x *Metric) Reset() { + *x = Metric{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metric) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metric) ProtoMessage() {} + +func (x *Metric) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metric.ProtoReflect.Descriptor instead. func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{10} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10} } -func (m *Metric) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metric.Unmarshal(m, b) -} -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return xxx_messageInfo_Metric.Size(m) -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) -} - -var xxx_messageInfo_Metric proto.InternalMessageInfo - -func (m *Metric) GetLabel() []*LabelPair { - if m != nil { - return m.Label +func (x *Metric) GetLabel() []*LabelPair { + if x != nil { + return x.Label } return nil } -func (m *Metric) GetGauge() *Gauge { - if m != nil { - return m.Gauge +func (x *Metric) GetGauge() *Gauge { + if x != nil { + return x.Gauge } return nil } -func (m *Metric) GetCounter() *Counter { - if m != nil { - return m.Counter +func (x *Metric) GetCounter() *Counter { + if x != nil { + return x.Counter } return nil } -func (m *Metric) GetSummary() *Summary { - if m != nil { - return m.Summary +func (x *Metric) GetSummary() *Summary { + if x != nil { + return x.Summary } return nil } -func (m *Metric) GetUntyped() *Untyped { - if m != nil { - return m.Untyped +func (x *Metric) GetUntyped() *Untyped { + if x != nil { + return x.Untyped } return nil } -func (m *Metric) GetHistogram() *Histogram { - if m != nil { - return m.Histogram +func (x *Metric) GetHistogram() *Histogram { + if x != nil { + return x.Histogram } return nil } -func (m *Metric) GetTimestampMs() int64 { - if m != nil && m.TimestampMs != nil { - return *m.TimestampMs +func (x *Metric) GetTimestampMs() int64 { + if x != nil && x.TimestampMs != nil { + return *x.TimestampMs } return 0 } type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` } -func (m *MetricFamily) Reset() { *m = MetricFamily{} } -func (m *MetricFamily) String() string { return proto.CompactTextString(m) } -func (*MetricFamily) ProtoMessage() {} +func (x *MetricFamily) Reset() { + *x = MetricFamily{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetricFamily) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetricFamily) ProtoMessage() {} + +func (x *MetricFamily) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead. func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{11} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11} } -func (m *MetricFamily) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricFamily.Unmarshal(m, b) -} -func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) -} -func (m *MetricFamily) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricFamily.Merge(m, src) -} -func (m *MetricFamily) XXX_Size() int { - return xxx_messageInfo_MetricFamily.Size(m) -} -func (m *MetricFamily) XXX_DiscardUnknown() { - xxx_messageInfo_MetricFamily.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricFamily proto.InternalMessageInfo - -func (m *MetricFamily) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (x *MetricFamily) GetName() string { + if x != nil && x.Name != nil { + return *x.Name } return "" } -func (m *MetricFamily) GetHelp() string { - if m != nil && m.Help != nil { - return *m.Help +func (x *MetricFamily) GetHelp() string { + if x != nil && x.Help != nil { + return *x.Help } return "" } -func (m *MetricFamily) GetType() MetricType { - if m != nil && m.Type != nil { - return *m.Type +func (x *MetricFamily) GetType() MetricType { + if x != nil && x.Type != nil { + return *x.Type } return MetricType_COUNTER } -func (m *MetricFamily) GetMetric() []*Metric { - if m != nil { - return m.Metric +func (x *MetricFamily) GetMetric() []*Metric { + if x != nil { + return x.Metric } return nil } -func init() { - proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) - proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") - proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") - proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") - proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") - proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") - proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") - proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") - proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") - proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan") - proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") - proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") - proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") +var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor + +var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, + 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, + 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, + 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, + 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x12, 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xac, 0x05, 0x0a, 0x09, 0x48, + 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, + 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x47, + 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, + 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72, + 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, + 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, + 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, + 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0d, 0x20, + 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, + 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, + 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, + 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, + 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, + 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, + 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, + 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, + 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, + 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, + 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, + 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, + 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, + 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, + 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, + 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, + 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, + 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, + 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, + 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, + 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, 0x62, 0x0a, 0x0a, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x55, + 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10, + 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0b, + 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x48, + 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x41, + 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x05, 0x42, + 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, 0x3b, 0x69, + 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, } -func init() { - proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258) +var ( + file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once + file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc +) + +func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte { + file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() { + file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData) + }) + return file_io_prometheus_client_metrics_proto_rawDescData } -var fileDescriptor_d1e5ddb18987a258 = []byte{ - // 896 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44, - 0x18, 0xc5, 0x9b, 0x5f, 0x7f, 0xd9, 0x6c, 0xd3, 0x61, 0x55, 0x59, 0x0b, 0xcb, 0x06, 0x4b, 0x48, - 0x0b, 0x42, 0x8e, 0x40, 0x5b, 0x81, 0x0a, 0x5c, 0xec, 0xb6, 0xe9, 0x16, 0x89, 0xb4, 0x65, 0x92, - 0x5c, 0x14, 0x2e, 0xac, 0x49, 0x32, 0xeb, 0x58, 0x78, 0x3c, 0xc6, 0x1e, 0x57, 0x2c, 0x2f, 0xc0, - 0x35, 0xaf, 0xc0, 0xc3, 0xf0, 0x22, 0x3c, 0x08, 0x68, 0xfe, 0xec, 0xdd, 0xe2, 0x94, 0xd2, 0x3b, - 0x7f, 0x67, 0xce, 0xf7, 0xcd, 0x39, 0xe3, 0xc9, 0x71, 0xc0, 0x8f, 0xf9, 0x24, 0xcb, 0x39, 0xa3, - 0x62, 0x4b, 0xcb, 0x62, 0xb2, 0x4e, 0x62, 0x9a, 0x8a, 0x09, 0xa3, 0x22, 0x8f, 0xd7, 0x45, 0x90, - 0xe5, 0x5c, 0x70, 0x74, 0x18, 0xf3, 0xa0, 0xe6, 0x04, 0x9a, 0x73, 0x74, 0x12, 0x71, 0x1e, 0x25, - 0x74, 0xa2, 0x38, 0xab, 0xf2, 0x6a, 0x22, 0x62, 0x46, 0x0b, 0x41, 0x58, 0xa6, 0xdb, 0xfc, 0xfb, - 0xe0, 0x7e, 0x47, 0x56, 0x34, 0x79, 0x4e, 0xe2, 0x1c, 0x21, 0x68, 0xa7, 0x84, 0x51, 0xcf, 0x19, - 0x3b, 0xa7, 0x2e, 0x56, 0xcf, 0xe8, 0x10, 0x3a, 0x2f, 0x49, 0x52, 0x52, 0x6f, 0x4f, 0x81, 0xba, - 0xf0, 0x8f, 0xa1, 0x73, 0x49, 0xca, 0xe8, 0xc6, 0xb2, 0xec, 0x71, 0xec, 0xf2, 0x8f, 0xd0, 0x7b, - 0xc8, 0xcb, 0x54, 0xd0, 0xbc, 0x99, 0x80, 0x1e, 0x40, 0x9f, 0xfe, 0x42, 0x59, 0x96, 0x90, 0x5c, - 0x0d, 0x1e, 0x7c, 0xfe, 0x41, 0xd0, 0x64, 0x20, 0x98, 0x1a, 0x16, 0xae, 0xf8, 0xfe, 0xd7, 0xd0, - 0xff, 0xbe, 0x24, 0xa9, 0x88, 0x13, 0x8a, 0x8e, 0xa0, 0xff, 0xb3, 0x79, 0x36, 0x1b, 0x54, 0xf5, - 0x6d, 0xe5, 0x95, 0xb4, 0xdf, 0x1c, 0xe8, 0xcd, 0x4b, 0xc6, 0x48, 0x7e, 0x8d, 0x3e, 0x84, 0xfd, - 0x82, 0xb0, 0x2c, 0xa1, 0xe1, 0x5a, 0xaa, 0x55, 0x13, 0xda, 0x78, 0xa0, 0x31, 0x65, 0x00, 0x1d, - 0x03, 0x18, 0x4a, 0x51, 0x32, 0x33, 0xc9, 0xd5, 0xc8, 0xbc, 0x64, 0xd2, 0x47, 0xb5, 0x7f, 0x6b, - 0xdc, 0xda, 0xed, 0xc3, 0x2a, 0xae, 0xf5, 0xf9, 0x27, 0xd0, 0x5b, 0xa6, 0xe2, 0x3a, 0xa3, 0x9b, - 0x1d, 0xa7, 0xf8, 0x57, 0x1b, 0xdc, 0x27, 0x71, 0x21, 0x78, 0x94, 0x13, 0xf6, 0x26, 0x62, 0x3f, - 0x05, 0x74, 0x93, 0x12, 0x5e, 0x25, 0x9c, 0x08, 0xaf, 0xad, 0x66, 0x8e, 0x6e, 0x10, 0x1f, 0x4b, - 0xfc, 0xbf, 0xac, 0x9d, 0x41, 0x77, 0x55, 0xae, 0x7f, 0xa2, 0xc2, 0x18, 0x7b, 0xbf, 0xd9, 0xd8, - 0x85, 0xe2, 0x60, 0xc3, 0x45, 0xf7, 0xa0, 0x5b, 0xac, 0xb7, 0x94, 0x11, 0xaf, 0x33, 0x76, 0x4e, - 0xef, 0x62, 0x53, 0xa1, 0x8f, 0xe0, 0xe0, 0x57, 0x9a, 0xf3, 0x50, 0x6c, 0x73, 0x5a, 0x6c, 0x79, - 0xb2, 0xf1, 0xba, 0x6a, 0xc3, 0xa1, 0x44, 0x17, 0x16, 0x94, 0x9a, 0x14, 0x4d, 0x5b, 0xec, 0x29, - 0x8b, 0xae, 0x44, 0xb4, 0xc1, 0x53, 0x18, 0xd5, 0xcb, 0xc6, 0x5e, 0x5f, 0xcd, 0x39, 0xa8, 0x48, - 0xda, 0xdc, 0x14, 0x86, 0x29, 0x8d, 0x88, 0x88, 0x5f, 0xd2, 0xb0, 0xc8, 0x48, 0xea, 0xb9, 0xca, - 0xc4, 0xf8, 0x75, 0x26, 0xe6, 0x19, 0x49, 0xf1, 0xbe, 0x6d, 0x93, 0x95, 0x94, 0x5d, 0x8d, 0xd9, - 0xd0, 0x44, 0x10, 0x0f, 0xc6, 0xad, 0x53, 0x84, 0xab, 0xe1, 0x8f, 0x24, 0x78, 0x8b, 0xa6, 0xa5, - 0x0f, 0xc6, 0x2d, 0xe9, 0xce, 0xa2, 0x5a, 0xfe, 0x14, 0x86, 0x19, 0x2f, 0xe2, 0x5a, 0xd4, 0xfe, - 0x9b, 0x8a, 0xb2, 0x6d, 0x56, 0x54, 0x35, 0x46, 0x8b, 0x1a, 0x6a, 0x51, 0x16, 0xad, 0x44, 0x55, - 0x34, 0x2d, 0xea, 0x40, 0x8b, 0xb2, 0xa8, 0x12, 0xe5, 0xff, 0xe9, 0x40, 0x57, 0x6f, 0x85, 0x3e, - 0x86, 0xd1, 0xba, 0x64, 0x65, 0x72, 0xd3, 0x88, 0xbe, 0x66, 0x77, 0x6a, 0x5c, 0x5b, 0x39, 0x83, - 0x7b, 0xaf, 0x52, 0x6f, 0x5d, 0xb7, 0xc3, 0x57, 0x1a, 0xf4, 0x5b, 0x39, 0x81, 0x41, 0x99, 0x65, - 0x34, 0x0f, 0x57, 0xbc, 0x4c, 0x37, 0xe6, 0xce, 0x81, 0x82, 0x2e, 0x24, 0x72, 0x2b, 0x17, 0x5a, - 0xff, 0x3b, 0x17, 0xa0, 0x3e, 0x32, 0x79, 0x11, 0xf9, 0xd5, 0x55, 0x41, 0xb5, 0x83, 0xbb, 0xd8, - 0x54, 0x12, 0x4f, 0x68, 0x1a, 0x89, 0xad, 0xda, 0x7d, 0x88, 0x4d, 0xe5, 0xff, 0xee, 0x40, 0xdf, - 0x0e, 0x45, 0xf7, 0xa1, 0x93, 0xc8, 0x54, 0xf4, 0x1c, 0xf5, 0x82, 0x4e, 0x9a, 0x35, 0x54, 0xc1, - 0x89, 0x35, 0xbb, 0x39, 0x71, 0xd0, 0x97, 0xe0, 0x56, 0xa9, 0x6b, 0x4c, 0x1d, 0x05, 0x3a, 0x97, - 0x03, 0x9b, 0xcb, 0xc1, 0xc2, 0x32, 0x70, 0x4d, 0xf6, 0xff, 0xde, 0x83, 0xee, 0x4c, 0xa5, 0xfc, - 0xdb, 0x2a, 0xfa, 0x0c, 0x3a, 0x91, 0xcc, 0x69, 0x13, 0xb2, 0xef, 0x35, 0xb7, 0xa9, 0x28, 0xc7, - 0x9a, 0x89, 0xbe, 0x80, 0xde, 0x5a, 0x67, 0xb7, 0x11, 0x7b, 0xdc, 0xdc, 0x64, 0x02, 0x1e, 0x5b, - 0xb6, 0x6c, 0x2c, 0x74, 0xb0, 0xaa, 0x3b, 0xb0, 0xb3, 0xd1, 0xa4, 0x2f, 0xb6, 0x6c, 0xd9, 0x58, - 0xea, 0x20, 0x54, 0xa1, 0xb1, 0xb3, 0xd1, 0xa4, 0x25, 0xb6, 0x6c, 0xf4, 0x0d, 0xb8, 0x5b, 0x9b, - 0x8f, 0x2a, 0x2c, 0x76, 0x1e, 0x4c, 0x15, 0xa3, 0xb8, 0xee, 0x90, 0x89, 0x5a, 0x9d, 0x75, 0xc8, - 0x0a, 0x95, 0x48, 0x2d, 0x3c, 0xa8, 0xb0, 0x59, 0xe1, 0xff, 0xe1, 0xc0, 0xbe, 0x7e, 0x03, 0x8f, - 0x09, 0x8b, 0x93, 0xeb, 0xc6, 0x4f, 0x24, 0x82, 0xf6, 0x96, 0x26, 0x99, 0xf9, 0x42, 0xaa, 0x67, - 0x74, 0x06, 0x6d, 0xa9, 0x51, 0x1d, 0xe1, 0xc1, 0xae, 0x5f, 0xb8, 0x9e, 0xbc, 0xb8, 0xce, 0x28, - 0x56, 0x6c, 0x99, 0xb9, 0xfa, 0xab, 0xee, 0xb5, 0x5f, 0x97, 0xb9, 0xba, 0x0f, 0x1b, 0xee, 0x27, - 0x2b, 0x80, 0x7a, 0x12, 0x1a, 0x40, 0xef, 0xe1, 0xb3, 0xe5, 0xd3, 0xc5, 0x14, 0x8f, 0xde, 0x41, - 0x2e, 0x74, 0x2e, 0xcf, 0x97, 0x97, 0xd3, 0x91, 0x23, 0xf1, 0xf9, 0x72, 0x36, 0x3b, 0xc7, 0x2f, - 0x46, 0x7b, 0xb2, 0x58, 0x3e, 0x5d, 0xbc, 0x78, 0x3e, 0x7d, 0x34, 0x6a, 0xa1, 0x21, 0xb8, 0x4f, - 0xbe, 0x9d, 0x2f, 0x9e, 0x5d, 0xe2, 0xf3, 0xd9, 0xa8, 0x8d, 0xde, 0x85, 0x3b, 0xaa, 0x27, 0xac, - 0xc1, 0xce, 0x05, 0x86, 0xc6, 0x3f, 0x18, 0x3f, 0x3c, 0x88, 0x62, 0xb1, 0x2d, 0x57, 0xc1, 0x9a, - 0xb3, 0x7f, 0xff, 0x45, 0x09, 0x19, 0xdf, 0xd0, 0x64, 0x12, 0xf1, 0xaf, 0x62, 0x1e, 0xd6, 0xab, - 0xa1, 0x5e, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x77, 0x81, 0x98, 0xd7, 0x08, 0x00, 0x00, +var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{ + (MetricType)(0), // 0: io.prometheus.client.MetricType + (*LabelPair)(nil), // 1: io.prometheus.client.LabelPair + (*Gauge)(nil), // 2: io.prometheus.client.Gauge + (*Counter)(nil), // 3: io.prometheus.client.Counter + (*Quantile)(nil), // 4: io.prometheus.client.Quantile + (*Summary)(nil), // 5: io.prometheus.client.Summary + (*Untyped)(nil), // 6: io.prometheus.client.Untyped + (*Histogram)(nil), // 7: io.prometheus.client.Histogram + (*Bucket)(nil), // 8: io.prometheus.client.Bucket + (*BucketSpan)(nil), // 9: io.prometheus.client.BucketSpan + (*Exemplar)(nil), // 10: io.prometheus.client.Exemplar + (*Metric)(nil), // 11: io.prometheus.client.Metric + (*MetricFamily)(nil), // 12: io.prometheus.client.MetricFamily + (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp +} +var file_io_prometheus_client_metrics_proto_depIdxs = []int32{ + 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar + 13, // 1: io.prometheus.client.Counter.created_timestamp:type_name -> google.protobuf.Timestamp + 4, // 2: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile + 13, // 3: io.prometheus.client.Summary.created_timestamp:type_name -> google.protobuf.Timestamp + 8, // 4: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket + 13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp + 9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan + 9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan + 10, // 8: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar + 1, // 9: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair + 13, // 10: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp + 1, // 11: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair + 2, // 12: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge + 3, // 13: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter + 5, // 14: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary + 6, // 15: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped + 7, // 16: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram + 0, // 17: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType + 11, // 18: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric + 19, // [19:19] is the sub-list for method output_type + 19, // [19:19] is the sub-list for method input_type + 19, // [19:19] is the sub-list for extension type_name + 19, // [19:19] is the sub-list for extension extendee + 0, // [0:19] is the sub-list for field type_name +} + +func init() { file_io_prometheus_client_metrics_proto_init() } +func file_io_prometheus_client_metrics_proto_init() { + if File_io_prometheus_client_metrics_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LabelPair); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Gauge); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Counter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Quantile); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Summary); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Untyped); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Histogram); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BucketSpan); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Exemplar); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metric); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricFamily); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc, + NumEnums: 1, + NumMessages: 12, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_io_prometheus_client_metrics_proto_goTypes, + DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs, + EnumInfos: file_io_prometheus_client_metrics_proto_enumTypes, + MessageInfos: file_io_prometheus_client_metrics_proto_msgTypes, + }.Build() + File_io_prometheus_client_metrics_proto = out.File + file_io_prometheus_client_metrics_proto_rawDesc = nil + file_io_prometheus_client_metrics_proto_goTypes = nil + file_io_prometheus_client_metrics_proto_depIdxs = nil } diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index f4fc884552..9063978151 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -132,7 +132,10 @@ func (d *textDecoder) Decode(v *dto.MetricFamily) error { } // Pick off one MetricFamily per Decode until there's nothing left. for key, fam := range d.fams { - *v = *fam + v.Name = fam.Name + v.Help = fam.Help + v.Type = fam.Type + v.Metric = fam.Metric delete(d.fams, key) return nil } diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 64dc0eb40c..7f611ffaad 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,9 +18,9 @@ import ( "io" "net/http" - "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/matttproud/golang_protobuf_extensions/pbutil" "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + "google.golang.org/protobuf/encoding/prototext" dto "github.com/prometheus/client_model/go" ) @@ -99,8 +99,11 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { return FmtText } - if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") { - return FmtOpenMetrics + if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { + if ver == OpenMetricsVersion_1_0_0 { + return FmtOpenMetrics_1_0_0 + } + return FmtOpenMetrics_0_0_1 } } return FmtText @@ -133,7 +136,7 @@ func NewEncoder(w io.Writer, format Format) Encoder { case FmtProtoText: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + _, err := fmt.Fprintln(w, prototext.Format(v)) return err }, close: func() error { return nil }, @@ -146,7 +149,7 @@ func NewEncoder(w io.Writer, format Format) Encoder { }, close: func() error { return nil }, } - case FmtOpenMetrics: + case FmtOpenMetrics_0_0_1, FmtOpenMetrics_1_0_0: return encoderCloser{ encode: func(v *dto.MetricFamily) error { _, err := MetricFamilyToOpenMetrics(w, v) diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 0f176fa64f..c4cb20f0d3 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -19,20 +19,22 @@ type Format string // Constants to assemble the Content-Type values for the different wire protocols. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - OpenMetricsType = `application/openmetrics-text` - OpenMetricsVersion = "0.0.1" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + OpenMetricsVersion_0_0_1 = "0.0.1" + OpenMetricsVersion_1_0_0 = "1.0.0" // The Content-Type values for the different wire protocols. - FmtUnknown Format = `` - FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - FmtProtoText Format = ProtoFmt + ` encoding=text` - FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` - FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8` + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index ac2482782c..35db1cc9d7 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -24,8 +24,8 @@ import ( dto "github.com/prometheus/client_model/go" - "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/prometheus/common/model" + "google.golang.org/protobuf/proto" ) // A stateFn is a function that represents a state in a state machine. By diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index a197699a1e..c24864a927 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -2,6 +2,7 @@ linters: enable: - godot + - misspell - revive linter-settings: @@ -10,3 +11,5 @@ linter-settings: exclude: # Ignore "See: URL" - 'See:' + misspell: + locale: US diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index e358db69c5..062a281856 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -49,19 +49,19 @@ endif GOTEST := $(GO) test GOTEST_DIR := ifneq ($(CIRCLE_JOB),) -ifneq ($(shell which gotestsum),) +ifneq ($(shell command -v gotestsum > /dev/null),) GOTEST_DIR := test-results GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- endif endif -PROMU_VERSION ?= 0.14.0 +PROMU_VERSION ?= 0.15.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.49.0 +GOLANGCI_LINT_VERSION ?= v1.54.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -91,6 +91,8 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) +SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG)) + ifeq ($(GOHOSTARCH),amd64) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) # Only supported on amd64 @@ -176,7 +178,7 @@ endif .PHONY: common-yamllint common-yamllint: @echo ">> running yamllint on all YAML files in the repository" -ifeq (, $(shell which yamllint)) +ifeq (, $(shell command -v yamllint > /dev/null)) @echo "yamllint not installed so skipping" else yamllint . @@ -205,7 +207,7 @@ common-tarball: promu .PHONY: common-docker $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ -f $(DOCKERFILE_PATH) \ --build-arg ARCH="$*" \ --build-arg OS="linux" \ @@ -214,19 +216,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%: .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS) $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) common-docker-tag-latest: $(TAG_DOCKER_ARCHS) $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" .PHONY: common-docker-manifest common-docker-manifest: - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)) + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" .PHONY: promu promu: $(PROMU) diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md index 43c37735a7..1224816c2a 100644 --- a/vendor/github.com/prometheus/procfs/README.md +++ b/vendor/github.com/prometheus/procfs/README.md @@ -51,11 +51,11 @@ ensure the `fixtures` directory is up to date by removing the existing directory extracting the ttar file using `make fixtures/.unpacked` or just `make test`. ```bash -rm -rf fixtures +rm -rf testdata/fixtures make test ``` Next, make the required changes to the extracted files in the `fixtures` directory. When the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file based on the updated `fixtures` directory. And finally, verify the changes using -`git diff fixtures.ttar`. +`git diff testdata/fixtures.ttar`. diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index 68f36e888f..28783e2ddc 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -55,7 +55,7 @@ type ARPEntry struct { func (fs FS) GatherARPEntries() ([]ARPEntry, error) { data, err := os.ReadFile(fs.proc.Path("net/arp")) if err != nil { - return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err) + return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err) } return parseARPEntries(data) @@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) { } else if width == expectedDataWidth { entry, err := parseARPEntry(columns) if err != nil { - return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err) + return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) } entries = append(entries, entry) } else { - return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth) + return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) } } diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go index f5b7939b26..4a173636c9 100644 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -55,7 +55,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { parts := strings.Fields(line) if len(parts) < 4 { - return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts) } node := strings.TrimRight(parts[1], ",") @@ -66,7 +66,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { bucketCount = arraySize } else { if bucketCount != arraySize { - return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize) } } @@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { for i := 0; i < arraySize; i++ { sizes[i], err = strconv.ParseFloat(parts[i+4], 64) if err != nil { - return nil, fmt.Errorf("invalid value in buddyinfo: %w", err) + return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) } } diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index 06968ca2ed..f4f5501c68 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -79,7 +79,7 @@ func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { // find the first "processor" line firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) @@ -192,9 +192,10 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { scanner := bufio.NewScanner(bytes.NewReader(info)) firstLine := firstNonEmptyLine(scanner) - match, _ := regexp.MatchString("^[Pp]rocessor", firstLine) + match, err := regexp.MatchString("^[Pp]rocessor", firstLine) if !match || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err) + } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -258,7 +259,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -283,7 +284,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { if strings.HasPrefix(line, "processor") { match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) if len(match) < 2 { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } cpu := commonCPUInfo v, err := strconv.ParseUint(match[1], 0, 32) @@ -343,7 +344,7 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { // find the first "processor" line firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -421,7 +422,7 @@ func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) @@ -466,7 +467,7 @@ func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go index 5048ad1f21..9a73e26393 100644 --- a/vendor/github.com/prometheus/procfs/crypto.go +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -55,12 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) { path := fs.proc.Path("crypto") b, err := util.ReadFileNoStat(path) if err != nil { - return nil, fmt.Errorf("error reading crypto %q: %w", path, err) + return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err) + } crypto, err := parseCrypto(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("error parsing crypto %q: %w", path, err) + return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err) } return crypto, nil @@ -83,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) { kv := strings.Split(text, ":") if len(kv) != 2 { - return nil, fmt.Errorf("malformed crypto line: %q", text) + return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text) } k := strings.TrimSpace(kv[0]) diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index 0102ab0fd8..4980c875bf 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -20,7 +20,8 @@ import ( // FS represents the pseudo-filesystem sys, which provides an interface to // kernel data structures. type FS struct { - proc fs.FS + proc fs.FS + isReal bool } // DefaultMountPoint is the common mount point of the proc filesystem. @@ -39,5 +40,11 @@ func NewFS(mountPoint string) (FS, error) { if err != nil { return FS{}, err } - return FS{fs}, nil + + isReal, err := isRealProc(mountPoint) + if err != nil { + return FS{}, err + } + + return FS{fs, isReal}, nil } diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go new file mode 100644 index 0000000000..134767d69a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -0,0 +1,23 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !freebsd && !linux +// +build !freebsd,!linux + +package procfs + +// isRealProc returns true on architectures that don't have a Type argument +// in their Statfs_t struct +func isRealProc(mountPoint string) (bool, error) { + return true, nil +} diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go new file mode 100644 index 0000000000..80df79c319 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build freebsd || linux +// +build freebsd linux + +package procfs + +import ( + "syscall" +) + +// isRealProc determines whether supplied mountpoint is really a proc filesystem. +func isRealProc(mountPoint string) (bool, error) { + stat := syscall.Statfs_t{} + err := syscall.Statfs(mountPoint, &stat) + if err != nil { + return false, err + } + + // 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87 + return stat.Type == 0x9fa0, nil +} diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go index f8070e6e2b..f560a8db30 100644 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { m, err := parseFscacheinfo(bytes.NewReader(b)) if err != nil { - return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err) + return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err) } return *m, nil @@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { func setFSCacheFields(fields []string, setFields ...*uint64) error { var err error if len(fields) < len(setFields) { - return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields)) + return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err) } for i := range setFields { @@ -263,7 +263,7 @@ func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { for s.Scan() { fields := strings.Fields(s.Text()) if len(fields) < 2 { - return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text()) + return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text()) } switch fields[0] { diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index b030951faf..14272dc788 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -64,6 +64,21 @@ func ParsePInt64s(ss []string) ([]*int64, error) { return us, nil } +// Parses a uint64 from given hex in string. +func ParseHexUint64s(ss []string) ([]*uint64, error) { + us := make([]*uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return nil, err + } + + us = append(us, &u) + } + + return us, nil +} + // ReadUintFromFile reads a file and attempts to parse a uint64 from it. func ReadUintFromFile(path string) (uint64, error) { data, err := os.ReadFile(path) diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index 391c07957e..5a145bbfe1 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -221,15 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) { case 46: ip = net.ParseIP(s[1:40]) if ip == nil { - return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err) } default: - return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) + return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err) } portString := s[len(s)-4:] if len(portString) != 4 { - return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) + return nil, 0, + fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err) } port, err := strconv.ParseUint(portString, 16, 16) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go index 0096cafbdf..59465c5bbc 100644 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -44,14 +44,14 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { loads := make([]float64, 3) parts := strings.Fields(string(loadavgBytes)) if len(parts) < 3 { - return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes)) + return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes)) } var err error for i, load := range parts[0:3] { loads[i], err = strconv.ParseFloat(load, 64) if err != nil { - return nil, fmt.Errorf("could not parse load %q: %w", load, err) + return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err) } } return &LoadAvg{ diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index a95c889cb9..fdd4b95445 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -70,7 +70,7 @@ func (fs FS) MDStat() ([]MDStat, error) { } mdstat, err := parseMDStat(data) if err != nil { - return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err) + return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err) } return mdstat, nil } @@ -90,13 +90,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { deviceFields := strings.Fields(line) if len(deviceFields) < 3 { - return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) + return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line) } mdName := deviceFields[0] // mdx state := deviceFields[2] // active or inactive if len(lines) <= i+3 { - return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName) + return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName) } // Failed disks have the suffix (F) & Spare disks have the suffix (S). @@ -105,7 +105,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) if err != nil { - return nil, fmt.Errorf("error parsing md device lines: %w", err) + return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err) } syncLineIdx := i + 2 @@ -140,7 +140,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { } else { syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) if err != nil { - return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err) + return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) } } } @@ -168,13 +168,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { statusFields := strings.Fields(statusLine) if len(statusFields) < 1 { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q", statusLine) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } sizeStr := statusFields[0] size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { @@ -189,17 +189,17 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in matches := statusLineRE.FindStringSubmatch(statusLine) if len(matches) != 5 { - return 0, 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) + return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err) } total, err = strconv.ParseInt(matches[2], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } active, err = strconv.ParseInt(matches[3], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err) } down = int64(strings.Count(matches[4], "_")) @@ -209,42 +209,42 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) { matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return 0, 0, 0, 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err) } syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err) } // Get percentage complete matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, 0, 0, 0, fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLine) + return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) } pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) if err != nil { - return syncedBlocks, 0, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) } // Get time expected left to complete matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, 0, 0, fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLine) + return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) } finish, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) } // Get recovery speed matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, finish, 0, fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLine) + return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) } speed, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, finish, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) } return syncedBlocks, pct, finish, speed, nil diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go index f65e174e57..eaf00e2248 100644 --- a/vendor/github.com/prometheus/procfs/meminfo.go +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -152,7 +152,7 @@ func (fs FS) Meminfo() (Meminfo, error) { m, err := parseMemInfo(bytes.NewReader(b)) if err != nil { - return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err) + return Meminfo{}, fmt.Errorf("%s: %w", ErrFileParse, err) } return *m, nil @@ -165,7 +165,7 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { // Each line has at least a name and value; we ignore the unit. fields := strings.Fields(s.Text()) if len(fields) < 2 { - return nil, fmt.Errorf("malformed meminfo line: %q", s.Text()) + return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text()) } v, err := strconv.ParseUint(fields[1], 0, 64) diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go index 59f4d50558..388ebf396d 100644 --- a/vendor/github.com/prometheus/procfs/mountinfo.go +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -78,11 +78,11 @@ func parseMountInfoString(mountString string) (*MountInfo, error) { mountInfo := strings.Split(mountString, " ") mountInfoLength := len(mountInfo) if mountInfoLength < 10 { - return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) + return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString) } if mountInfo[mountInfoLength-4] != "-" { - return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4]) + return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4]) } mount := &MountInfo{ @@ -98,18 +98,18 @@ func parseMountInfoString(mountString string) (*MountInfo, error) { mount.MountID, err = strconv.Atoi(mountInfo[0]) if err != nil { - return nil, fmt.Errorf("failed to parse mount ID") + return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID) } mount.ParentID, err = strconv.Atoi(mountInfo[1]) if err != nil { - return nil, fmt.Errorf("failed to parse parent ID") + return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID) } // Has optional fields, which is a space separated list of values. // Example: shared:2 master:7 if mountInfo[6] != "" { mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) if err != nil { - return nil, err + return nil, fmt.Errorf("%s: %w", ErrFileParse, err) } } return mount, nil diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 0c482c18cc..9d8af6db74 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -44,6 +44,14 @@ const ( fieldTransport11TCPLen = 13 fieldTransport11UDPLen = 10 + + // kernel version >= 4.14 MaxLen + // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393 + fieldTransport11RDMAMaxLen = 28 + + // kernel version <= 4.2 MinLen + // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331 + fieldTransport11RDMAMinLen = 20 ) // A Mount is a device mount parsed from /proc/[pid]/mountstats. @@ -186,6 +194,8 @@ type NFSOperationStats struct { CumulativeTotalResponseMilliseconds uint64 // Duration from when a request was enqueued to when it was completely handled. CumulativeTotalRequestMilliseconds uint64 + // The average time from the point the client sends RPC requests until it receives the response. + AverageRTTMilliseconds float64 // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. Errors uint64 } @@ -231,6 +241,33 @@ type NFSTransportStats struct { // A running counter, incremented on each request as the current size of the // pending queue. CumulativePendingQueue uint64 + + // Stats below only available with stat version 1.1. + // Transport over RDMA + + // accessed when sending a call + ReadChunkCount uint64 + WriteChunkCount uint64 + ReplyChunkCount uint64 + TotalRdmaRequest uint64 + + // rarely accessed error counters + PullupCopyCount uint64 + HardwayRegisterCount uint64 + FailedMarshalCount uint64 + BadReplyCount uint64 + MrsRecovered uint64 + MrsOrphaned uint64 + MrsAllocated uint64 + EmptySendctxQ uint64 + + // accessed when receiving a reply + TotalRdmaReply uint64 + FixupCopyCount uint64 + ReplyWaitsForSend uint64 + LocalInvNeeded uint64 + NomsgCallCount uint64 + BcallCount uint64 } // parseMountStats parses a /proc/[pid]/mountstats file and returns a slice @@ -264,7 +301,7 @@ func parseMountStats(r io.Reader) ([]*Mount, error) { if len(ss) > deviceEntryLen { // Only NFSv3 and v4 are supported for parsing statistics if m.Type != nfs3Type && m.Type != nfs4Type { - return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) + return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type) } statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) @@ -288,7 +325,7 @@ func parseMountStats(r io.Reader) ([]*Mount, error) { // device [device] mounted on [mount] with fstype [type] func parseMount(ss []string) (*Mount, error) { if len(ss) < deviceEntryLen { - return nil, fmt.Errorf("invalid device entry: %v", ss) + return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss) } // Check for specific words appearing at specific indices to ensure @@ -306,7 +343,7 @@ func parseMount(ss []string) (*Mount, error) { for _, f := range format { if ss[f.i] != f.s { - return nil, fmt.Errorf("invalid device entry: %v", ss) + return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss) } } @@ -343,7 +380,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e switch ss[0] { case fieldOpts: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) } if stats.Opts == nil { stats.Opts = map[string]string{} @@ -358,7 +395,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e } case fieldAge: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) } // Age integer is in seconds d, err := time.ParseDuration(ss[1] + "s") @@ -369,7 +406,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Age = d case fieldBytes: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) } bstats, err := parseNFSBytesStats(ss[1:]) if err != nil { @@ -379,7 +416,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Bytes = *bstats case fieldEvents: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss) } estats, err := parseNFSEventsStats(ss[1:]) if err != nil { @@ -389,7 +426,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Events = *estats case fieldTransport: if len(ss) < 3 { - return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss) } tstats, err := parseNFSTransportStats(ss[1:], statVersion) @@ -428,7 +465,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e // integer fields. func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { if len(ss) != fieldBytesLen { - return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) + return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss) } ns := make([]uint64, 0, fieldBytesLen) @@ -457,7 +494,7 @@ func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { // integer fields. func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { if len(ss) != fieldEventsLen { - return nil, fmt.Errorf("invalid NFS events stats: %v", ss) + return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss) } ns := make([]uint64, 0, fieldEventsLen) @@ -521,7 +558,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { } if len(ss) < minFields { - return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) + return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss) } // Skip string operation name for integers @@ -534,7 +571,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { ns = append(ns, n) } - opStats := NFSOperationStats{ Operation: strings.TrimSuffix(ss[0], ":"), Requests: ns[0], @@ -546,6 +582,9 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { CumulativeTotalResponseMilliseconds: ns[6], CumulativeTotalRequestMilliseconds: ns[7], } + if ns[0] != 0 { + opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0]) + } if len(ns) > 8 { opStats.Errors = ns[8] @@ -572,10 +611,10 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } else if protocol == "udp" { expectedLength = fieldTransport10UDPLen } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) + return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss) } if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) + return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss) } case statVersion11: var expectedLength int @@ -583,14 +622,17 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats expectedLength = fieldTransport11TCPLen } else if protocol == "udp" { expectedLength = fieldTransport11UDPLen + } else if protocol == "rdma" { + expectedLength = fieldTransport11RDMAMinLen } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) + return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) } - if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) + if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) || + (protocol == "rdma" && len(ss) < expectedLength) { + return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol) } default: - return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) + return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay @@ -600,7 +642,9 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // Note: slice length must be set to length of v1.1 stats to avoid a panic when // only v1.0 stats are present. // See: https://github.com/prometheus/node_exporter/issues/571. - ns := make([]uint64, fieldTransport11TCPLen) + // + // Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen + ns := make([]uint64, fieldTransport11RDMAMaxLen+3) for i, s := range ss { n, err := strconv.ParseUint(s, 10, 64) if err != nil { @@ -618,9 +662,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // we set them to 0 here. if protocol == "udp" { ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) + } else if protocol == "tcp" { + ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...) + } else if protocol == "rdma" { + ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...) } return &NFSTransportStats{ + // NFS xprt over tcp or udp Protocol: protocol, Port: ns[0], Bind: ns[1], @@ -632,8 +681,32 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats BadTransactionIDs: ns[7], CumulativeActiveRequests: ns[8], CumulativeBacklog: ns[9], - MaximumRPCSlotsUsed: ns[10], - CumulativeSendingQueue: ns[11], - CumulativePendingQueue: ns[12], + + // NFS xprt over tcp or udp + // And statVersion 1.1 + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + + // NFS xprt over rdma + // And stat Version 1.1 + ReadChunkCount: ns[13], + WriteChunkCount: ns[14], + ReplyChunkCount: ns[15], + TotalRdmaRequest: ns[16], + PullupCopyCount: ns[17], + HardwayRegisterCount: ns[18], + FailedMarshalCount: ns[19], + BadReplyCount: ns[20], + MrsRecovered: ns[21], + MrsOrphaned: ns[22], + MrsAllocated: ns[23], + EmptySendctxQ: ns[24], + TotalRdmaReply: ns[25], + FixupCopyCount: ns[26], + ReplyWaitsForSend: ns[27], + LocalInvNeeded: ns[28], + NomsgCallCount: ns[29], + BcallCount: ns[30], }, nil } diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go index 8300daca05..fdfa456119 100644 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -18,7 +18,6 @@ import ( "bytes" "fmt" "io" - "strconv" "strings" "github.com/prometheus/procfs/internal/util" @@ -28,9 +27,13 @@ import ( // and contains netfilter conntrack statistics at one CPU core. type ConntrackStatEntry struct { Entries uint64 + Searched uint64 Found uint64 + New uint64 Invalid uint64 Ignore uint64 + Delete uint64 + DeleteList uint64 Insert uint64 InsertFailed uint64 Drop uint64 @@ -55,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) { stat, err := parseConntrackStat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err) + return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, path, err) } return stat, nil @@ -81,73 +84,35 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { // Parses a ConntrackStatEntry from given array of fields. func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { - if len(fields) != 17 { - return nil, fmt.Errorf("invalid conntrackstat entry, missing fields") - } - entry := &ConntrackStatEntry{} - - entries, err := parseConntrackStatField(fields[0]) + entries, err := util.ParseHexUint64s(fields) if err != nil { - return nil, err + return nil, fmt.Errorf("%s: Cannot parse entry: %d: %w", ErrFileParse, entries, err) } - entry.Entries = entries - - found, err := parseConntrackStatField(fields[2]) - if err != nil { - return nil, err + numEntries := len(entries) + if numEntries < 16 || numEntries > 17 { + return nil, + fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries) } - entry.Found = found - invalid, err := parseConntrackStatField(fields[4]) - if err != nil { - return nil, err + stats := &ConntrackStatEntry{ + Entries: *entries[0], + Searched: *entries[1], + Found: *entries[2], + New: *entries[3], + Invalid: *entries[4], + Ignore: *entries[5], + Delete: *entries[6], + DeleteList: *entries[7], + Insert: *entries[8], + InsertFailed: *entries[9], + Drop: *entries[10], + EarlyDrop: *entries[11], } - entry.Invalid = invalid - ignore, err := parseConntrackStatField(fields[5]) - if err != nil { - return nil, err + // Ignore missing search_restart on Linux < 2.6.35. + if numEntries == 17 { + stats.SearchRestart = *entries[16] } - entry.Ignore = ignore - insert, err := parseConntrackStatField(fields[8]) - if err != nil { - return nil, err - } - entry.Insert = insert - - insertFailed, err := parseConntrackStatField(fields[9]) - if err != nil { - return nil, err - } - entry.InsertFailed = insertFailed - - drop, err := parseConntrackStatField(fields[10]) - if err != nil { - return nil, err - } - entry.Drop = drop - - earlyDrop, err := parseConntrackStatField(fields[11]) - if err != nil { - return nil, err - } - entry.EarlyDrop = earlyDrop - - searchRestart, err := parseConntrackStatField(fields[16]) - if err != nil { - return nil, err - } - entry.SearchRestart = searchRestart - - return entry, nil -} - -// Parses a uint64 from given hex in string. -func parseConntrackStatField(field string) (uint64, error) { - val, err := strconv.ParseUint(field, 16, 64) - if err != nil { - return 0, fmt.Errorf("couldn't parse %q field: %w", field, err) - } - return val, err + return stats, nil } diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index 7fd57d7f46..4da81ea577 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -130,7 +130,7 @@ func parseIP(hexIP string) (net.IP, error) { var byteIP []byte byteIP, err := hex.DecodeString(hexIP) if err != nil { - return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP) + return nil, fmt.Errorf("%s: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err) } switch len(byteIP) { case 4: @@ -144,7 +144,7 @@ func parseIP(hexIP string) (net.IP, error) { } return i, nil default: - return nil, fmt.Errorf("Unable to parse IP %s", hexIP) + return nil, fmt.Errorf("%s: Unable to parse IP %s: %w", ErrFileParse, hexIP, nil) } } @@ -153,7 +153,8 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { line := &netIPSocketLine{} if len(fields) < 10 { return nil, fmt.Errorf( - "cannot parse net socket line as it has less then 10 columns %q", + "%w: Less than 10 columns found %q", + ErrFileParse, strings.Join(fields, " "), ) } @@ -162,64 +163,65 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { // sl s := strings.Split(fields[0], ":") if len(s) != 2 { - return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0]) + return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0]) } if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err) + return nil, fmt.Errorf("%s: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err) } // local_address l := strings.Split(fields[1], ":") if len(l) != 2 { - return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1]) + return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1]) } if line.LocalAddr, err = parseIP(l[0]); err != nil { return nil, err } if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err) + return nil, fmt.Errorf("%s: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err) } // remote_address r := strings.Split(fields[2], ":") if len(r) != 2 { - return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1]) + return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1]) } if line.RemAddr, err = parseIP(r[0]); err != nil { return nil, err } if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err) } // st if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse st value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse st value in %q: %w", ErrFileParse, line.St, err) } // tx_queue and rx_queue q := strings.Split(fields[4], ":") if len(q) != 2 { return nil, fmt.Errorf( - "cannot parse tx/rx queues in socket line as it has a missing colon %q", + "%w: Missing colon for tx/rx queues in socket line %q", + ErrFileParse, fields[4], ) } if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err) } if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err) } // uid if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err) } // inode if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse inode value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err) } return line, nil diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go index 374b6f73f8..b6c77b709f 100644 --- a/vendor/github.com/prometheus/procfs/net_protocols.go +++ b/vendor/github.com/prometheus/procfs/net_protocols.go @@ -131,7 +131,7 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro } else if fields[6] == disabled { line.Slab = false } else { - return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name) + return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name) } line.ModuleName = fields[7] @@ -173,7 +173,7 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro } else if capabilities[i] == "n" { *capabilityFields[i] = false } else { - return fmt.Errorf("unable to parse capability block for protocol: position %d", i) + return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i) } } return nil diff --git a/vendor/github.com/prometheus/procfs/net_route.go b/vendor/github.com/prometheus/procfs/net_route.go new file mode 100644 index 0000000000..deb7029fe1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_route.go @@ -0,0 +1,143 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +const ( + blackholeRepresentation string = "*" + blackholeIfaceName string = "blackhole" + routeLineColumns int = 11 +) + +// A NetRouteLine represents one line from net/route. +type NetRouteLine struct { + Iface string + Destination uint32 + Gateway uint32 + Flags uint32 + RefCnt uint32 + Use uint32 + Metric uint32 + Mask uint32 + MTU uint32 + Window uint32 + IRTT uint32 +} + +func (fs FS) NetRoute() ([]NetRouteLine, error) { + return readNetRoute(fs.proc.Path("net", "route")) +} + +func readNetRoute(path string) ([]NetRouteLine, error) { + b, err := util.ReadFileNoStat(path) + if err != nil { + return nil, err + } + + routelines, err := parseNetRoute(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read net route from %s: %w", path, err) + } + return routelines, nil +} + +func parseNetRoute(r io.Reader) ([]NetRouteLine, error) { + var routelines []NetRouteLine + + scanner := bufio.NewScanner(r) + scanner.Scan() + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + routeline, err := parseNetRouteLine(fields) + if err != nil { + return nil, err + } + routelines = append(routelines, *routeline) + } + return routelines, nil +} + +func parseNetRouteLine(fields []string) (*NetRouteLine, error) { + if len(fields) != routeLineColumns { + return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields)) + } + iface := fields[0] + if iface == blackholeRepresentation { + iface = blackholeIfaceName + } + destination, err := strconv.ParseUint(fields[1], 16, 32) + if err != nil { + return nil, err + } + gateway, err := strconv.ParseUint(fields[2], 16, 32) + if err != nil { + return nil, err + } + flags, err := strconv.ParseUint(fields[3], 10, 32) + if err != nil { + return nil, err + } + refcnt, err := strconv.ParseUint(fields[4], 10, 32) + if err != nil { + return nil, err + } + use, err := strconv.ParseUint(fields[5], 10, 32) + if err != nil { + return nil, err + } + metric, err := strconv.ParseUint(fields[6], 10, 32) + if err != nil { + return nil, err + } + mask, err := strconv.ParseUint(fields[7], 16, 32) + if err != nil { + return nil, err + } + mtu, err := strconv.ParseUint(fields[8], 10, 32) + if err != nil { + return nil, err + } + window, err := strconv.ParseUint(fields[9], 10, 32) + if err != nil { + return nil, err + } + irtt, err := strconv.ParseUint(fields[10], 10, 32) + if err != nil { + return nil, err + } + routeline := &NetRouteLine{ + Iface: iface, + Destination: uint32(destination), + Gateway: uint32(gateway), + Flags: uint32(flags), + RefCnt: uint32(refcnt), + Use: uint32(use), + Metric: uint32(metric), + Mask: uint32(mask), + MTU: uint32(mtu), + Window: uint32(window), + IRTT: uint32(irtt), + } + return routeline, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go index e36f4872dd..360e36af7d 100644 --- a/vendor/github.com/prometheus/procfs/net_sockstat.go +++ b/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -16,7 +16,6 @@ package procfs import ( "bufio" "bytes" - "errors" "fmt" "io" "strings" @@ -70,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) { stat, err := parseSockstat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err) + return nil, fmt.Errorf("%s: sockstats from %q: %w", ErrFileRead, name, err) } return stat, nil @@ -84,13 +83,13 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) { // Expect a minimum of a protocol and one key/value pair. fields := strings.Split(s.Text(), " ") if len(fields) < 3 { - return nil, fmt.Errorf("malformed sockstat line: %q", s.Text()) + return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text()) } // The remaining fields are key/value pairs. kvs, err := parseSockstatKVs(fields[1:]) if err != nil { - return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err) + return nil, fmt.Errorf("%s: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err) } // The first field is the protocol. We must trim its colon suffix. @@ -119,7 +118,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) { // parseSockstatKVs parses a string slice into a map of key/value pairs. func parseSockstatKVs(kvs []string) (map[string]int, error) { if len(kvs)%2 != 0 { - return nil, errors.New("odd number of fields in key/value pairs") + return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs) } // Iterate two values at a time to gather key/value pairs. diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index 06b7b8f216..c770852919 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -64,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { entries, err := parseSoftnet(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err) + return nil, fmt.Errorf("%s: /proc/net/softnet_stat: %w", ErrFileParse, err) } return entries, nil @@ -76,13 +76,14 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { s := bufio.NewScanner(r) var stats []SoftnetStat + cpuIndex := 0 for s.Scan() { columns := strings.Fields(s.Text()) width := len(columns) softnetStat := SoftnetStat{} if width < minColumns { - return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) + return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns) } // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347 @@ -127,9 +128,13 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { softnetStat.SoftnetBacklogLen = us[0] softnetStat.Index = us[1] + } else { + // For older kernels, create the Index based on the scan line number. + softnetStat.Index = uint32(cpuIndex) } softnetStat.Width = width stats = append(stats, softnetStat) + cpuIndex++ } return stats, nil diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go index 98aa8e1c31..acbbc57eab 100644 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) { line := s.Text() item, err := nu.parseLine(line, hasInode, minFields) if err != nil { - return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err) + return nil, fmt.Errorf("%s: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err) } nu.Rows = append(nu.Rows, item) } if err := s.Err(); err != nil { - return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err) + return nil, fmt.Errorf("%s: /proc/net/unix encountered data: %w", ErrFileParse, err) } return &nu, nil @@ -126,7 +126,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, l := len(fields) if l < min { - return nil, fmt.Errorf("expected at least %d fields but got %d", min, l) + return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l) } // Field offsets are as follows: @@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, users, err := u.parseUsers(fields[1]) if err != nil { - return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err) + return nil, fmt.Errorf("%s: ref count %q: %w", ErrFileParse, fields[1], err) } flags, err := u.parseFlags(fields[3]) if err != nil { - return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err) + return nil, fmt.Errorf("%s: Unable to parse flags %q: %w", ErrFileParse, fields[3], err) } typ, err := u.parseType(fields[4]) if err != nil { - return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err) + return nil, fmt.Errorf("%s: Failed to parse type %q: %w", ErrFileParse, fields[4], err) } state, err := u.parseState(fields[5]) if err != nil { - return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err) + return nil, fmt.Errorf("%s: Failed to parse state %q: %w", ErrFileParse, fields[5], err) } var inode uint64 if hasInode { inode, err = u.parseInode(fields[6]) if err != nil { - return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err) + return nil, fmt.Errorf("%s failed to parse inode %q: %w", ErrFileParse, fields[6], err) } } diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go new file mode 100644 index 0000000000..7443edca94 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_wireless.go @@ -0,0 +1,182 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Wireless models the content of /proc/net/wireless. +type Wireless struct { + Name string + + // Status is the current 4-digit hex value status of the interface. + Status uint64 + + // QualityLink is the link quality. + QualityLink int + + // QualityLevel is the signal gain (dBm). + QualityLevel int + + // QualityNoise is the signal noise baseline (dBm). + QualityNoise int + + // DiscardedNwid is the number of discarded packets with wrong nwid/essid. + DiscardedNwid int + + // DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP). + DiscardedCrypt int + + // DiscardedFrag is the number of discarded packets that can't perform MAC reassembly. + DiscardedFrag int + + // DiscardedRetry is the number of discarded packets that reached max MAC retries. + DiscardedRetry int + + // DiscardedMisc is the number of discarded packets for other reasons. + DiscardedMisc int + + // MissedBeacon is the number of missed beacons/superframe. + MissedBeacon int +} + +// Wireless returns kernel wireless statistics. +func (fs FS) Wireless() ([]*Wireless, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless")) + if err != nil { + return nil, err + } + + m, err := parseWireless(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("%s: wireless: %w", ErrFileParse, err) + } + + return m, nil +} + +// parseWireless parses the contents of /proc/net/wireless. +/* +Inter-| sta-| Quality | Discarded packets | Missed | WE +face | tus | link level noise | nwid crypt frag retry misc | beacon | 22 + eth1: 0000 5. -256. -10. 0 1 0 3 0 0 + eth2: 0000 5. -256. -20. 0 2 0 4 0 0 +*/ +func parseWireless(r io.Reader) ([]*Wireless, error) { + var ( + interfaces []*Wireless + scanner = bufio.NewScanner(r) + ) + + for n := 0; scanner.Scan(); n++ { + // Skip the 2 header lines. + if n < 2 { + continue + } + + line := scanner.Text() + + parts := strings.Split(line, ":") + if len(parts) != 2 { + return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line) + } + + name := strings.TrimSpace(parts[0]) + stats := strings.Fields(parts[1]) + + if len(stats) < 10 { + return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line) + } + + status, err := strconv.ParseUint(stats[0], 16, 16) + if err != nil { + return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line) + } + + qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], ".")) + if err != nil { + return nil, fmt.Errorf("%s: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err) + } + + qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], ".")) + if err != nil { + return nil, fmt.Errorf("%s: Quality:level as integer %q: %w", ErrFileParse, qlevel, err) + } + + qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], ".")) + if err != nil { + return nil, fmt.Errorf("%s: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err) + } + + dnwid, err := strconv.Atoi(stats[4]) + if err != nil { + return nil, fmt.Errorf("%s: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err) + } + + dcrypt, err := strconv.Atoi(stats[5]) + if err != nil { + return nil, fmt.Errorf("%s: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err) + } + + dfrag, err := strconv.Atoi(stats[6]) + if err != nil { + return nil, fmt.Errorf("%s: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err) + } + + dretry, err := strconv.Atoi(stats[7]) + if err != nil { + return nil, fmt.Errorf("%s: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err) + } + + dmisc, err := strconv.Atoi(stats[8]) + if err != nil { + return nil, fmt.Errorf("%s: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err) + } + + mbeacon, err := strconv.Atoi(stats[9]) + if err != nil { + return nil, fmt.Errorf("%s: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err) + } + + w := &Wireless{ + Name: name, + Status: status, + QualityLink: qlink, + QualityLevel: qlevel, + QualityNoise: qnoise, + DiscardedNwid: dnwid, + DiscardedCrypt: dcrypt, + DiscardedFrag: dfrag, + DiscardedRetry: dretry, + DiscardedMisc: dmisc, + MissedBeacon: mbeacon, + } + + interfaces = append(interfaces, w) + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("%s: Failed to scan /proc/net/wireless: %w", ErrFileRead, err) + } + + return interfaces, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go index f9d9d243db..932ef20468 100644 --- a/vendor/github.com/prometheus/procfs/net_xfrm.go +++ b/vendor/github.com/prometheus/procfs/net_xfrm.go @@ -115,7 +115,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) { fields := strings.Fields(s.Text()) if len(fields) != 2 { - return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text()) + return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text()) } name := fields[0] diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go index 5cc40aef55..742dff453b 100644 --- a/vendor/github.com/prometheus/procfs/netstat.go +++ b/vendor/github.com/prometheus/procfs/netstat.go @@ -15,7 +15,6 @@ package procfs import ( "bufio" - "io" "os" "path/filepath" "strconv" @@ -38,12 +37,7 @@ func (fs FS) NetStat() ([]NetStat, error) { var netStatsTotal []NetStat for _, filePath := range statFiles { - file, err := os.Open(filePath) - if err != nil { - return nil, err - } - - procNetstat, err := parseNetstat(file) + procNetstat, err := parseNetstat(filePath) if err != nil { return nil, err } @@ -56,14 +50,17 @@ func (fs FS) NetStat() ([]NetStat, error) { // parseNetstat parses the metrics from `/proc/net/stat/` file // and returns a NetStat structure. -func parseNetstat(r io.Reader) (NetStat, error) { - var ( - scanner = bufio.NewScanner(r) - netStat = NetStat{ - Stats: make(map[string][]uint64), - } - ) +func parseNetstat(filePath string) (NetStat, error) { + netStat := NetStat{ + Stats: make(map[string][]uint64), + } + file, err := os.Open(filePath) + if err != nil { + return netStat, err + } + defer file.Close() + scanner := bufio.NewScanner(file) scanner.Scan() // First string is always a header for stats diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index c30223af72..d1f71caa5d 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -15,13 +15,13 @@ package procfs import ( "bytes" + "errors" "fmt" "io" "os" "strconv" "strings" - "github.com/prometheus/procfs/internal/fs" "github.com/prometheus/procfs/internal/util" ) @@ -30,12 +30,18 @@ type Proc struct { // The process ID. PID int - fs fs.FS + fs FS } // Procs represents a list of Proc structs. type Procs []Proc +var ( + ErrFileParse = errors.New("Error Parsing File") + ErrFileRead = errors.New("Error Reading File") + ErrMountPoint = errors.New("Error Accessing Mount point") +) + func (p Procs) Len() int { return len(p) } func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } @@ -43,7 +49,7 @@ func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } // Self returns a process for the current process read via /proc/self. func Self() (Proc, error) { fs, err := NewFS(DefaultMountPoint) - if err != nil { + if err != nil || errors.Unwrap(err) == ErrMountPoint { return Proc{}, err } return fs.Self() @@ -92,7 +98,7 @@ func (fs FS) Proc(pid int) (Proc, error) { if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { return Proc{}, err } - return Proc{PID: pid, fs: fs.proc}, nil + return Proc{PID: pid, fs: fs}, nil } // AllProcs returns a list of all currently available processes. @@ -105,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) + return Procs{}, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) } p := Procs{} @@ -114,7 +120,7 @@ func (fs FS) AllProcs() (Procs, error) { if err != nil { continue } - p = append(p, Proc{PID: int(pid), fs: fs.proc}) + p = append(p, Proc{PID: int(pid), fs: fs}) } return p, nil @@ -206,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) { for i, n := range names { fd, err := strconv.ParseInt(n, 10, 32) if err != nil { - return nil, fmt.Errorf("could not parse fd %q: %w", n, err) + return nil, fmt.Errorf("%s: Cannot parse line: %v: %w", ErrFileParse, i, err) } fds[i] = uintptr(fd) } @@ -237,6 +243,19 @@ func (p Proc) FileDescriptorTargets() ([]string, error) { // FileDescriptorsLen returns the number of currently open file descriptors of // a process. func (p Proc) FileDescriptorsLen() (int, error) { + // Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901 + if p.fs.isReal { + stat, err := os.Stat(p.path("fd")) + if err != nil { + return 0, err + } + + size := stat.Size() + if size > 0 { + return int(size), nil + } + } + fds, err := p.fileDescriptors() if err != nil { return 0, err @@ -278,14 +297,14 @@ func (p Proc) fileDescriptors() ([]string, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("could not read %q: %w", d.Name(), err) + return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) } return names, nil } func (p Proc) path(pa ...string) string { - return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) + return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) } // FileDescriptorsInfo retrieves information about all file descriptors of diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index ea83a75ffc..daeed7f571 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -51,7 +51,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) { fields := strings.SplitN(cgroupStr, ":", 3) if len(fields) < 3 { - return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr) + return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr) } cgroup := &Cgroup{ @@ -60,7 +60,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) { } cgroup.HierarchyID, err = strconv.Atoi(fields[0]) if err != nil { - return nil, fmt.Errorf("failed to parse hierarchy ID") + return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID) } if fields[1] != "" { ssNames := strings.Split(fields[1], ",") diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go index 24d4dce9cf..5dd4938999 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroups.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go @@ -46,7 +46,7 @@ func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { fields := strings.Fields(CgroupSummaryStr) // require at least 4 fields if len(fields) < 4 { - return nil, fmt.Errorf("at least 4 fields required, found %d fields in cgroup info string: %s", len(fields), CgroupSummaryStr) + return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr) } CgroupSummary := &CgroupSummary{ @@ -54,15 +54,15 @@ func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { } CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1]) if err != nil { - return nil, fmt.Errorf("failed to parse hierarchy ID") + return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1]) } CgroupSummary.Cgroups, err = strconv.Atoi(fields[2]) if err != nil { - return nil, fmt.Errorf("failed to parse Cgroup Num") + return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2]) } CgroupSummary.Enabled, err = strconv.Atoi(fields[3]) if err != nil { - return nil, fmt.Errorf("failed to parse Enabled") + return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3]) } return CgroupSummary, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index 1bbdd4a8e9..fa761b3529 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -26,6 +26,7 @@ var ( rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) + rIno = regexp.MustCompile(`^ino:\s+(\d+)$`) rInotify = regexp.MustCompile(`^inotify`) rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) ) @@ -40,6 +41,8 @@ type ProcFDInfo struct { Flags string // Mount point ID MntID string + // Inode number + Ino string // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) InotifyInfos []InotifyInfo } @@ -51,7 +54,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { return nil, err } - var text, pos, flags, mntid string + var text, pos, flags, mntid, ino string var inotify []InotifyInfo scanner := bufio.NewScanner(bytes.NewReader(data)) @@ -63,6 +66,8 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { flags = rFlags.FindStringSubmatch(text)[1] } else if rMntID.MatchString(text) { mntid = rMntID.FindStringSubmatch(text)[1] + } else if rIno.MatchString(text) { + ino = rIno.FindStringSubmatch(text)[1] } else if rInotify.MatchString(text) { newInotify, err := parseInotifyInfo(text) if err != nil { @@ -77,6 +82,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { Pos: pos, Flags: flags, MntID: mntid, + Ino: ino, InotifyInfos: inotify, } @@ -111,7 +117,7 @@ func parseInotifyInfo(line string) (*InotifyInfo, error) { } return i, nil } - return nil, fmt.Errorf("invalid inode entry: %q", line) + return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line) } // ProcFDInfos represents a list of ProcFDInfo structs. diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go index 9df79c2379..86b4b45246 100644 --- a/vendor/github.com/prometheus/procfs/proc_interrupts.go +++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go @@ -66,7 +66,7 @@ func parseInterrupts(r io.Reader) (Interrupts, error) { continue } if len(parts) < 2 { - return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts) + return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts) } intName := parts[0][:len(parts[0])-1] // remove trailing : diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index 7a1388185a..c86d815d73 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -103,7 +103,7 @@ func (p Proc) Limits() (ProcLimits, error) { //fields := limitsMatch.Split(s.Text(), limitsFields) fields := limitsMatch.FindStringSubmatch(s.Text()) if len(fields) != limitsFields { - return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text()) + return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text()) } switch fields[1] { @@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) { } i, err := strconv.ParseUint(s, 10, 64) if err != nil { - return 0, fmt.Errorf("couldn't parse value %q: %w", s, err) + return 0, fmt.Errorf("%s: couldn't parse value %q: %w", ErrFileParse, s, err) } return i, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go index f1bcbf32bb..7e75c286b5 100644 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -63,17 +63,17 @@ type ProcMap struct { // parseDevice parses the device token of a line and converts it to a dev_t // (mkdev) like structure. func parseDevice(s string) (uint64, error) { - toks := strings.Split(s, ":") - if len(toks) < 2 { - return 0, fmt.Errorf("unexpected number of fields") + i := strings.Index(s, ":") + if i == -1 { + return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s) } - major, err := strconv.ParseUint(toks[0], 16, 0) + major, err := strconv.ParseUint(s[0:i], 16, 0) if err != nil { return 0, err } - minor, err := strconv.ParseUint(toks[1], 16, 0) + minor, err := strconv.ParseUint(s[i+1:], 16, 0) if err != nil { return 0, err } @@ -93,17 +93,17 @@ func parseAddress(s string) (uintptr, error) { // parseAddresses parses the start-end address. func parseAddresses(s string) (uintptr, uintptr, error) { - toks := strings.Split(s, "-") - if len(toks) < 2 { - return 0, 0, fmt.Errorf("invalid address") + idx := strings.Index(s, "-") + if idx == -1 { + return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s) } - saddr, err := parseAddress(toks[0]) + saddr, err := parseAddress(s[0:idx]) if err != nil { return 0, 0, err } - eaddr, err := parseAddress(toks[1]) + eaddr, err := parseAddress(s[idx+1:]) if err != nil { return 0, 0, err } @@ -114,7 +114,7 @@ func parseAddresses(s string) (uintptr, uintptr, error) { // parsePermissions parses a token and returns any that are set. func parsePermissions(s string) (*ProcMapPermissions, error) { if len(s) < 4 { - return nil, fmt.Errorf("invalid permissions token") + return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse) } perms := ProcMapPermissions{} @@ -141,7 +141,7 @@ func parsePermissions(s string) (*ProcMapPermissions, error) { func parseProcMap(text string) (*ProcMap, error) { fields := strings.Fields(text) if len(fields) < 5 { - return nil, fmt.Errorf("truncated procmap entry") + return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse) } saddr, eaddr, err := parseAddresses(fields[0]) diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go index 6a43bb2459..8e3ff4d794 100644 --- a/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -195,8 +195,8 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { // Remove trailing :. protocol := strings.TrimSuffix(nameParts[0], ":") if len(nameParts) != len(valueParts) { - return procNetstat, fmt.Errorf("mismatch field count mismatch in %s: %s", - fileName, protocol) + return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s", + ErrFileParse, fileName, protocol) } for i := 1; i < len(nameParts); i++ { value, err := strconv.ParseFloat(valueParts[i], 64) diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go index 391b4cbd11..c22666750f 100644 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("failed to read contents of ns dir: %w", err) + return nil, fmt.Errorf("%s: failed to read contents of ns dir: %w", ErrFileRead, err) } ns := make(Namespaces, len(names)) @@ -52,13 +52,13 @@ func (p Proc) Namespaces() (Namespaces, error) { fields := strings.SplitN(target, ":", 2) if len(fields) != 2 { - return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target) + return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target) } typ := fields[0] inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) if err != nil { - return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err) + return nil, fmt.Errorf("%s: inode from %q: %w", ErrFileParse, fields[1], err) } ns[name] = Namespace{typ, uint32(inode)} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go index a68fe15290..fe9dbb425f 100644 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -61,14 +61,14 @@ type PSIStats struct { func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) if err != nil { - return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err) + return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) } - return parsePSIStats(resource, bytes.NewReader(data)) + return parsePSIStats(bytes.NewReader(data)) } // parsePSIStats parses the specified file for pressure stall information. -func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { +func parsePSIStats(r io.Reader) (PSIStats, error) { psiStats := PSIStats{} scanner := bufio.NewScanner(r) diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index 0e97d99575..ad8785a407 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -135,12 +135,12 @@ func (s *ProcSMapsRollup) parseLine(line string) error { } vBytes := vKBytes * 1024 - s.addValue(k, v, vKBytes, vBytes) + s.addValue(k, vBytes) return nil } -func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) { +func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) { switch k { case "Rss": s.Rss += vUintBytes diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go index 6c46b71884..b9d2cf642a 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -159,8 +159,8 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { // Remove trailing :. protocol := strings.TrimSuffix(nameParts[0], ":") if len(nameParts) != len(valueParts) { - return procSnmp, fmt.Errorf("mismatch field count mismatch in %s: %s", - fileName, protocol) + return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s", + ErrFileParse, fileName, protocol) } for i := 1; i < len(nameParts); i++ { value, err := strconv.ParseFloat(valueParts[i], 64) diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index b278eb2c2d..923e55005b 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -18,7 +18,6 @@ import ( "fmt" "os" - "github.com/prometheus/procfs/internal/fs" "github.com/prometheus/procfs/internal/util" ) @@ -112,7 +111,7 @@ type ProcStat struct { // Aggregated block I/O delays, measured in clock ticks (centiseconds). DelayAcctBlkIOTicks uint64 - proc fs.FS + proc FS } // NewStat returns the current status information of the process. @@ -139,7 +138,7 @@ func (p Proc) Stat() (ProcStat, error) { ) if l < 0 || r < 0 { - return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data) + return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data) } s.Comm = string(data[l+1 : r]) @@ -210,8 +209,7 @@ func (s ProcStat) ResidentMemory() int { // StartTime returns the unix timestamp of the process in seconds. func (s ProcStat) StartTime() (float64, error) { - fs := FS{proc: s.proc} - stat, err := fs.Stat() + stat, err := s.proc.Stat() if err != nil { return 0, err } diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 3d8c06439a..46307f5721 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -15,6 +15,7 @@ package procfs import ( "bytes" + "sort" "strconv" "strings" @@ -22,7 +23,7 @@ import ( ) // ProcStatus provides status information about the process, -// read from /proc/[pid]/stat. +// read from /proc/[pid]/status. type ProcStatus struct { // The process ID. PID int @@ -31,6 +32,8 @@ type ProcStatus struct { // Thread group ID. TGID int + // List of Pid namespace. + NSpids []uint64 // Peak virtual memory size. VmPeak uint64 // nolint:revive @@ -76,6 +79,9 @@ type ProcStatus struct { UIDs [4]string // GIDs of the process (Real, effective, saved set, and filesystem GIDs) GIDs [4]string + + // CpusAllowedList: List of cpu cores processes are allowed to run on. + CpusAllowedList []uint64 } // NewStatus returns the current status information of the process. @@ -123,6 +129,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt copy(s.UIDs[:], strings.Split(vString, "\t")) case "Gid": copy(s.GIDs[:], strings.Split(vString, "\t")) + case "NSpid": + s.NSpids = calcNSPidsList(vString) case "VmPeak": s.VmPeak = vUintBytes case "VmSize": @@ -161,10 +169,53 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt s.VoluntaryCtxtSwitches = vUint case "nonvoluntary_ctxt_switches": s.NonVoluntaryCtxtSwitches = vUint + case "Cpus_allowed_list": + s.CpusAllowedList = calcCpusAllowedList(vString) } + } // TotalCtxtSwitches returns the total context switch. func (s ProcStatus) TotalCtxtSwitches() uint64 { return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches } + +func calcCpusAllowedList(cpuString string) []uint64 { + s := strings.Split(cpuString, ",") + + var g []uint64 + + for _, cpu := range s { + // parse cpu ranges, example: 1-3=[1,2,3] + if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 { + startCPU, _ := strconv.ParseUint(l[0], 10, 64) + endCPU, _ := strconv.ParseUint(l[1], 10, 64) + + for i := startCPU; i <= endCPU; i++ { + g = append(g, i) + } + } else if len(l) == 1 { + cpu, _ := strconv.ParseUint(l[0], 10, 64) + g = append(g, cpu) + } + + } + + sort.Slice(g, func(i, j int) bool { return g[i] < g[j] }) + return g +} + +func calcNSPidsList(nspidsString string) []uint64 { + s := strings.Split(nspidsString, " ") + var nspids []uint64 + + for _, nspid := range s { + nspid, _ := strconv.ParseUint(nspid, 10, 64) + if nspid == 0 { + continue + } + nspids = append(nspids, nspid) + } + + return nspids +} diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go index d46533ebf4..12c5bf05b7 100644 --- a/vendor/github.com/prometheus/procfs/proc_sys.go +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) { vp := util.NewValueParser(f) values[i] = vp.Int() if err := vp.Err(); err != nil { - return nil, fmt.Errorf("field %d in sysctl %s is not a valid int: %w", i, sysctl, err) + return nil, fmt.Errorf("%s: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err) } } return values, nil diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go index bc9aaf5c28..8611c90177 100644 --- a/vendor/github.com/prometheus/procfs/slab.go +++ b/vendor/github.com/prometheus/procfs/slab.go @@ -68,7 +68,7 @@ func parseV21SlabEntry(line string) (*Slab, error) { l := slabSpace.ReplaceAllString(line, " ") s := strings.Split(l, " ") if len(s) != 16 { - return nil, fmt.Errorf("unable to parse: %q", line) + return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line) } var err error i := &Slab{Name: s[0]} diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go index 559129cbca..b8fad677dc 100644 --- a/vendor/github.com/prometheus/procfs/softirqs.go +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -57,7 +57,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { ) if !scanner.Scan() { - return Softirqs{}, fmt.Errorf("softirqs empty") + return Softirqs{}, fmt.Errorf("%w: softirqs empty", ErrFileRead) } for scanner.Scan() { @@ -74,7 +74,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Hi = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (HI%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "TIMER:": @@ -82,7 +82,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Timer = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (TIMER%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "NET_TX:": @@ -90,7 +90,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.NetTx = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_TX%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "NET_RX:": @@ -98,7 +98,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.NetRx = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_RX%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "BLOCK:": @@ -106,7 +106,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Block = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (BLOCK%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "IRQ_POLL:": @@ -114,7 +114,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.IRQPoll = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (IRQ_POLL%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "TASKLET:": @@ -122,7 +122,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Tasklet = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (TASKLET%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "SCHED:": @@ -130,7 +130,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Sched = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (SCHED%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "HRTIMER:": @@ -138,7 +138,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.HRTimer = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (HRTIMER%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "RCU:": @@ -146,14 +146,14 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.RCU = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (RCU%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err) } } } } if err := scanner.Err(); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse softirqs: %w", err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse softirqs: %w", ErrFileParse, err) } return softirqs, scanner.Err() diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 586af48af9..34fc3ee21b 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -93,10 +93,10 @@ func parseCPUStat(line string) (CPUStat, int64, error) { &cpuStat.Guest, &cpuStat.GuestNice) if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err) + return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu): %w", ErrFileParse, line, err) } if count == 0 { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line) + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line) } cpuStat.User /= userHZ @@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) { cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) if err != nil { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err) + return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err) } return cpuStat, cpuID, nil @@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { &softIRQStat.Hrtimer, &softIRQStat.Rcu) if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err) + return SoftIRQStat{}, 0, fmt.Errorf("%s: couldn't parse %q (softirq): %w", ErrFileParse, line, err) } return softIRQStat, total, nil @@ -187,6 +187,10 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { err error ) + // Increase default scanner buffer to handle very long `intr` lines. + buf := make([]byte, 0, 8*1024) + scanner.Buffer(buf, 1024*1024) + for scanner.Scan() { line := scanner.Text() parts := strings.Fields(scanner.Text()) @@ -197,34 +201,34 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { switch { case parts[0] == "btime": if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err) } case parts[0] == "intr": if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err) } numberedIRQs := parts[2:] stat.IRQ = make([]uint64, len(numberedIRQs)) for i, count := range numberedIRQs { if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "ctxt": if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err) } case parts[0] == "processes": if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_running": if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_blocked": if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err) } case parts[0] == "softirq": softIRQStats, total, err := parseSoftIRQStat(line) @@ -247,7 +251,7 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { } if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q: %w", ErrFileParse, fileName, err) } return stat, nil diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go index 15edc2212b..fa00f555db 100644 --- a/vendor/github.com/prometheus/procfs/swaps.go +++ b/vendor/github.com/prometheus/procfs/swaps.go @@ -64,7 +64,7 @@ func parseSwapString(swapString string) (*Swap, error) { swapFields := strings.Fields(swapString) swapLength := len(swapFields) if swapLength < 5 { - return nil, fmt.Errorf("too few fields in swap string: %s", swapString) + return nil, fmt.Errorf("%w: too few fields in swap string: %s", ErrFileParse, swapString) } swap := &Swap{ @@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) { swap.Size, err = strconv.Atoi(swapFields[2]) if err != nil { - return nil, fmt.Errorf("invalid swap size: %s", swapFields[2]) + return nil, fmt.Errorf("%s: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err) } swap.Used, err = strconv.Atoi(swapFields[3]) if err != nil { - return nil, fmt.Errorf("invalid swap used: %s", swapFields[3]) + return nil, fmt.Errorf("%s: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err) } swap.Priority, err = strconv.Atoi(swapFields[4]) if err != nil { - return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4]) + return nil, fmt.Errorf("%s: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err) } return swap, nil diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go index f08bfc769d..df2215ece0 100644 --- a/vendor/github.com/prometheus/procfs/thread.go +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -45,7 +45,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) + return Procs{}, fmt.Errorf("%s: could not read %q: %w", ErrFileRead, d.Name(), err) } t := Procs{} @@ -54,7 +54,8 @@ func (fs FS) AllThreads(pid int) (Procs, error) { if err != nil { continue } - t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)}) + + t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.isReal}}) } return t, nil @@ -66,13 +67,13 @@ func (fs FS) Thread(pid, tid int) (Proc, error) { if _, err := os.Stat(taskPath); err != nil { return Proc{}, err } - return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil + return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.isReal}}, nil } // Thread returns a process for a given TID of Proc. func (proc Proc) Thread(tid int) (Proc, error) { - tfs := fsi.FS(proc.path("task")) - if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil { + tfs := FS{fsi.FS(proc.path("task")), proc.fs.isReal} + if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil { return Proc{}, err } return Proc{PID: tid, fs: tfs}, nil diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go index cdedcae996..51c49d89e8 100644 --- a/vendor/github.com/prometheus/procfs/vm.go +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -86,7 +86,7 @@ func (fs FS) VM() (*VM, error) { return nil, err } if !file.Mode().IsDir() { - return nil, fmt.Errorf("%s is not a directory", path) + return nil, fmt.Errorf("%w: %s is not a directory", ErrFileRead, path) } files, err := os.ReadDir(path) diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go index c745a4c04f..ce5fefa5b3 100644 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -75,11 +75,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) func (fs FS) Zoneinfo() ([]Zoneinfo, error) { data, err := os.ReadFile(fs.proc.Path("zoneinfo")) if err != nil { - return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%s: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err) } zoneinfo, err := parseZoneinfo(data) if err != nil { - return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%s: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err) } return zoneinfo, nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index 10f78fff43..6945123db4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -223,21 +223,21 @@ github.com/opencontainers/image-spec/specs-go/v1 # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/prometheus/client_golang v1.14.0 -## explicit; go 1.17 +# github.com/prometheus/client_golang v1.17.0 +## explicit; go 1.19 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.3.0 -## explicit; go 1.9 +# github.com/prometheus/client_model v0.5.0 +## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.42.0 +# github.com/prometheus/common v0.44.0 ## explicit; go 1.18 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model -# github.com/prometheus/procfs v0.9.0 -## explicit; go 1.18 +# github.com/prometheus/procfs v0.12.0 +## explicit; go 1.19 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util