mirror of https://github.com/docker/cli.git
vendor: update prometheus dependencies
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
parent
1dbcce2057
commit
074d1028b5
|
@ -76,18 +76,17 @@ require (
|
|||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/sys/symlink v0.3.0 // indirect
|
||||
github.com/moby/sys/user v0.3.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/prometheus/client_golang v1.17.0 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/prometheus/client_golang v1.20.5 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/stretchr/testify v1.9.0 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
go.etcd.io/etcd/raft/v3 v3.5.6 // indirect
|
||||
|
|
18
vendor.sum
18
vendor.sum
|
@ -162,6 +162,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/magiconair/properties v1.5.3 h1:C8fxWnhYyME3n0klPOhVM7PtYUB3eV1W3DeFmN3j53Y=
|
||||
github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
|
@ -169,8 +171,6 @@ github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZ
|
|||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
|
||||
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||
|
@ -202,6 +202,8 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
|
|||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
|
@ -227,18 +229,18 @@ github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06/go
|
|||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
||||
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
|
||||
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
|
|
|
@ -1,201 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -1 +0,0 @@
|
|||
Copyright 2012 Matt T. Proud (matt.proud@gmail.com)
|
|
@ -1 +0,0 @@
|
|||
cover.dat
|
|
@ -1,7 +0,0 @@
|
|||
all:
|
||||
|
||||
cover:
|
||||
go test -cover -v -coverprofile=cover.dat ./...
|
||||
go tool cover -func cover.dat
|
||||
|
||||
.PHONY: cover
|
|
@ -1,75 +0,0 @@
|
|||
// Copyright 2013 Matt T. Proud
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pbutil
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
var errInvalidVarint = errors.New("invalid varint32 encountered")
|
||||
|
||||
// ReadDelimited decodes a message from the provided length-delimited stream,
|
||||
// where the length is encoded as 32-bit varint prefix to the message body.
|
||||
// It returns the total number of bytes read and any applicable error. This is
|
||||
// roughly equivalent to the companion Java API's
|
||||
// MessageLite#parseDelimitedFrom. As per the reader contract, this function
|
||||
// calls r.Read repeatedly as required until exactly one message including its
|
||||
// prefix is read and decoded (or an error has occurred). The function never
|
||||
// reads more bytes from the stream than required. The function never returns
|
||||
// an error if a message has been read and decoded correctly, even if the end
|
||||
// of the stream has been reached in doing so. In that case, any subsequent
|
||||
// calls return (0, io.EOF).
|
||||
func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
|
||||
// Per AbstractParser#parsePartialDelimitedFrom with
|
||||
// CodedInputStream#readRawVarint32.
|
||||
var headerBuf [binary.MaxVarintLen32]byte
|
||||
var bytesRead, varIntBytes int
|
||||
var messageLength uint64
|
||||
for varIntBytes == 0 { // i.e. no varint has been decoded yet.
|
||||
if bytesRead >= len(headerBuf) {
|
||||
return bytesRead, errInvalidVarint
|
||||
}
|
||||
// We have to read byte by byte here to avoid reading more bytes
|
||||
// than required. Each read byte is appended to what we have
|
||||
// read before.
|
||||
newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
|
||||
if newBytesRead == 0 {
|
||||
if err != nil {
|
||||
return bytesRead, err
|
||||
}
|
||||
// A Reader should not return (0, nil), but if it does,
|
||||
// it should be treated as no-op (according to the
|
||||
// Reader contract). So let's go on...
|
||||
continue
|
||||
}
|
||||
bytesRead += newBytesRead
|
||||
// Now present everything read so far to the varint decoder and
|
||||
// see if a varint can be decoded already.
|
||||
messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
|
||||
}
|
||||
|
||||
messageBuf := make([]byte, messageLength)
|
||||
newBytesRead, err := io.ReadFull(r, messageBuf)
|
||||
bytesRead += newBytesRead
|
||||
if err != nil {
|
||||
return bytesRead, err
|
||||
}
|
||||
|
||||
return bytesRead, proto.Unmarshal(messageBuf, m)
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
// Copyright 2013 Matt T. Proud
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pbutil
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
// WriteDelimited encodes and dumps a message to the provided writer prefixed
|
||||
// with a 32-bit varint indicating the length of the encoded message, producing
|
||||
// a length-delimited record stream, which can be used to chain together
|
||||
// encoded messages of the same type together in a file. It returns the total
|
||||
// number of bytes written and any applicable error. This is roughly
|
||||
// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
|
||||
func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
|
||||
buffer, err := proto.Marshal(m)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var buf [binary.MaxVarintLen32]byte
|
||||
encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
|
||||
|
||||
sync, err := w.Write(buf[:encodedLength])
|
||||
if err != nil {
|
||||
return sync, err
|
||||
}
|
||||
|
||||
n, err = w.Write(buffer)
|
||||
return n + sync, err
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||
names of its contributors may be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,13 @@
|
|||
include $(GOROOT)/src/Make.inc
|
||||
|
||||
TARG=bitbucket.org/ww/goautoneg
|
||||
GOFILES=autoneg.go
|
||||
|
||||
include $(GOROOT)/src/Make.pkg
|
||||
|
||||
format:
|
||||
gofmt -w *.go
|
||||
|
||||
docs:
|
||||
gomake clean
|
||||
godoc ${TARG} > README.txt
|
|
@ -1,12 +1,12 @@
|
|||
/*
|
||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||
All rights reserved.
|
||||
|
||||
HTTP Content-Type Autonegotiation.
|
||||
|
||||
The functions in this package implement the behaviour specified in
|
||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
|
||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
@ -36,6 +36,7 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package goautoneg
|
||||
|
||||
import (
|
||||
|
@ -51,16 +52,14 @@ type Accept struct {
|
|||
Params map[string]string
|
||||
}
|
||||
|
||||
// For internal use, so that we can use the sort interface
|
||||
type accept_slice []Accept
|
||||
// acceptSlice is defined to implement sort interface.
|
||||
type acceptSlice []Accept
|
||||
|
||||
func (accept accept_slice) Len() int {
|
||||
slice := []Accept(accept)
|
||||
func (slice acceptSlice) Len() int {
|
||||
return len(slice)
|
||||
}
|
||||
|
||||
func (accept accept_slice) Less(i, j int) bool {
|
||||
slice := []Accept(accept)
|
||||
func (slice acceptSlice) Less(i, j int) bool {
|
||||
ai, aj := slice[i], slice[j]
|
||||
if ai.Q > aj.Q {
|
||||
return true
|
||||
|
@ -74,63 +73,93 @@ func (accept accept_slice) Less(i, j int) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func (accept accept_slice) Swap(i, j int) {
|
||||
slice := []Accept(accept)
|
||||
func (slice acceptSlice) Swap(i, j int) {
|
||||
slice[i], slice[j] = slice[j], slice[i]
|
||||
}
|
||||
|
||||
func stringTrimSpaceCutset(r rune) bool {
|
||||
return r == ' '
|
||||
}
|
||||
|
||||
func nextSplitElement(s, sep string) (item string, remaining string) {
|
||||
if index := strings.Index(s, sep); index != -1 {
|
||||
return s[:index], s[index+1:]
|
||||
}
|
||||
return s, ""
|
||||
}
|
||||
|
||||
// Parse an Accept Header string returning a sorted list
|
||||
// of clauses
|
||||
func ParseAccept(header string) (accept []Accept) {
|
||||
parts := strings.Split(header, ",")
|
||||
accept = make([]Accept, 0, len(parts))
|
||||
for _, part := range parts {
|
||||
part := strings.Trim(part, " ")
|
||||
func ParseAccept(header string) acceptSlice {
|
||||
partsCount := 0
|
||||
remaining := header
|
||||
for len(remaining) > 0 {
|
||||
partsCount++
|
||||
_, remaining = nextSplitElement(remaining, ",")
|
||||
}
|
||||
accept := make(acceptSlice, 0, partsCount)
|
||||
|
||||
a := Accept{}
|
||||
a.Params = make(map[string]string)
|
||||
a.Q = 1.0
|
||||
remaining = header
|
||||
var part string
|
||||
for len(remaining) > 0 {
|
||||
part, remaining = nextSplitElement(remaining, ",")
|
||||
part = strings.TrimFunc(part, stringTrimSpaceCutset)
|
||||
|
||||
mrp := strings.Split(part, ";")
|
||||
a := Accept{
|
||||
Q: 1.0,
|
||||
}
|
||||
|
||||
media_range := mrp[0]
|
||||
sp := strings.Split(media_range, "/")
|
||||
a.Type = strings.Trim(sp[0], " ")
|
||||
sp, remainingPart := nextSplitElement(part, ";")
|
||||
|
||||
sp0, spRemaining := nextSplitElement(sp, "/")
|
||||
a.Type = strings.TrimFunc(sp0, stringTrimSpaceCutset)
|
||||
|
||||
switch {
|
||||
case len(sp) == 1 && a.Type == "*":
|
||||
case len(spRemaining) == 0:
|
||||
if a.Type == "*" {
|
||||
a.SubType = "*"
|
||||
case len(sp) == 2:
|
||||
a.SubType = strings.Trim(sp[1], " ")
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
||||
if len(mrp) == 1 {
|
||||
accept = append(accept, a)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, param := range mrp[1:] {
|
||||
sp := strings.SplitN(param, "=", 2)
|
||||
if len(sp) != 2 {
|
||||
continue
|
||||
}
|
||||
token := strings.Trim(sp[0], " ")
|
||||
if token == "q" {
|
||||
a.Q, _ = strconv.ParseFloat(sp[1], 32)
|
||||
} else {
|
||||
a.Params[token] = strings.Trim(sp[1], " ")
|
||||
continue
|
||||
}
|
||||
default:
|
||||
var sp1 string
|
||||
sp1, spRemaining = nextSplitElement(spRemaining, "/")
|
||||
if len(spRemaining) > 0 {
|
||||
continue
|
||||
}
|
||||
a.SubType = strings.TrimFunc(sp1, stringTrimSpaceCutset)
|
||||
}
|
||||
|
||||
if len(remainingPart) == 0 {
|
||||
accept = append(accept, a)
|
||||
continue
|
||||
}
|
||||
|
||||
a.Params = make(map[string]string)
|
||||
for len(remainingPart) > 0 {
|
||||
sp, remainingPart = nextSplitElement(remainingPart, ";")
|
||||
sp0, spRemaining = nextSplitElement(sp, "=")
|
||||
if len(spRemaining) == 0 {
|
||||
continue
|
||||
}
|
||||
var sp1 string
|
||||
sp1, spRemaining = nextSplitElement(spRemaining, "=")
|
||||
if len(spRemaining) != 0 {
|
||||
continue
|
||||
}
|
||||
token := strings.TrimFunc(sp0, stringTrimSpaceCutset)
|
||||
if token == "q" {
|
||||
a.Q, _ = strconv.ParseFloat(sp1, 32)
|
||||
} else {
|
||||
a.Params[token] = strings.TrimFunc(sp1, stringTrimSpaceCutset)
|
||||
}
|
||||
}
|
||||
|
||||
accept = append(accept, a)
|
||||
}
|
||||
|
||||
slice := accept_slice(accept)
|
||||
sort.Sort(slice)
|
||||
|
||||
return
|
||||
sort.Sort(accept)
|
||||
return accept
|
||||
}
|
||||
|
||||
// Negotiate the most appropriate content_type given the accept header
|
|
@ -16,8 +16,3 @@ Go support for Protocol Buffers - Google's data interchange format
|
|||
http://github.com/golang/protobuf/
|
||||
Copyright 2010 The Go Authors
|
||||
See source code for license details.
|
||||
|
||||
Support for streaming Protocol Buffer messages for the Go language (golang).
|
||||
https://github.com/matttproud/golang_protobuf_extensions
|
||||
Copyright 2013 Matt T. Proud
|
||||
Licensed under the Apache License, Version 2.0
|
||||
|
|
27
vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE
generated
vendored
Normal file
27
vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2013 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
145
vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go
generated
vendored
Normal file
145
vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go
generated
vendored
Normal file
|
@ -0,0 +1,145 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file or at
|
||||
// https://developers.google.com/open-source/licenses/bsd.
|
||||
|
||||
// Package header provides functions for parsing HTTP headers.
|
||||
package header
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Octet types from RFC 2616.
|
||||
var octetTypes [256]octetType
|
||||
|
||||
type octetType byte
|
||||
|
||||
const (
|
||||
isToken octetType = 1 << iota
|
||||
isSpace
|
||||
)
|
||||
|
||||
func init() {
|
||||
// OCTET = <any 8-bit sequence of data>
|
||||
// CHAR = <any US-ASCII character (octets 0 - 127)>
|
||||
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
|
||||
// CR = <US-ASCII CR, carriage return (13)>
|
||||
// LF = <US-ASCII LF, linefeed (10)>
|
||||
// SP = <US-ASCII SP, space (32)>
|
||||
// HT = <US-ASCII HT, horizontal-tab (9)>
|
||||
// <"> = <US-ASCII double-quote mark (34)>
|
||||
// CRLF = CR LF
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
// TEXT = <any OCTET except CTLs, but including LWS>
|
||||
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
|
||||
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
|
||||
// token = 1*<any CHAR except CTLs or separators>
|
||||
// qdtext = <any TEXT except <">>
|
||||
|
||||
for c := 0; c < 256; c++ {
|
||||
var t octetType
|
||||
isCtl := c <= 31 || c == 127
|
||||
isChar := 0 <= c && c <= 127
|
||||
isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
|
||||
if strings.ContainsRune(" \t\r\n", rune(c)) {
|
||||
t |= isSpace
|
||||
}
|
||||
if isChar && !isCtl && !isSeparator {
|
||||
t |= isToken
|
||||
}
|
||||
octetTypes[c] = t
|
||||
}
|
||||
}
|
||||
|
||||
// AcceptSpec describes an Accept* header.
|
||||
type AcceptSpec struct {
|
||||
Value string
|
||||
Q float64
|
||||
}
|
||||
|
||||
// ParseAccept parses Accept* headers.
|
||||
func ParseAccept(header http.Header, key string) (specs []AcceptSpec) {
|
||||
loop:
|
||||
for _, s := range header[key] {
|
||||
for {
|
||||
var spec AcceptSpec
|
||||
spec.Value, s = expectTokenSlash(s)
|
||||
if spec.Value == "" {
|
||||
continue loop
|
||||
}
|
||||
spec.Q = 1.0
|
||||
s = skipSpace(s)
|
||||
if strings.HasPrefix(s, ";") {
|
||||
s = skipSpace(s[1:])
|
||||
if !strings.HasPrefix(s, "q=") {
|
||||
continue loop
|
||||
}
|
||||
spec.Q, s = expectQuality(s[2:])
|
||||
if spec.Q < 0.0 {
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
specs = append(specs, spec)
|
||||
s = skipSpace(s)
|
||||
if !strings.HasPrefix(s, ",") {
|
||||
continue loop
|
||||
}
|
||||
s = skipSpace(s[1:])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func skipSpace(s string) (rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isSpace == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[i:]
|
||||
}
|
||||
|
||||
func expectTokenSlash(s string) (token, rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
b := s[i]
|
||||
if (octetTypes[b]&isToken == 0) && b != '/' {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[:i], s[i:]
|
||||
}
|
||||
|
||||
func expectQuality(s string) (q float64, rest string) {
|
||||
switch {
|
||||
case len(s) == 0:
|
||||
return -1, ""
|
||||
case s[0] == '0':
|
||||
q = 0
|
||||
case s[0] == '1':
|
||||
q = 1
|
||||
default:
|
||||
return -1, ""
|
||||
}
|
||||
s = s[1:]
|
||||
if !strings.HasPrefix(s, ".") {
|
||||
return q, s
|
||||
}
|
||||
s = s[1:]
|
||||
i := 0
|
||||
n := 0
|
||||
d := 1
|
||||
for ; i < len(s); i++ {
|
||||
b := s[i]
|
||||
if b < '0' || b > '9' {
|
||||
break
|
||||
}
|
||||
n = n*10 + int(b) - '0'
|
||||
d *= 10
|
||||
}
|
||||
return q + float64(n)/float64(d), s[i:]
|
||||
}
|
36
vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go
generated
vendored
Normal file
36
vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file or at
|
||||
// https://developers.google.com/open-source/licenses/bsd.
|
||||
|
||||
package httputil
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header"
|
||||
)
|
||||
|
||||
// NegotiateContentEncoding returns the best offered content encoding for the
|
||||
// request's Accept-Encoding header. If two offers match with equal weight and
|
||||
// then the offer earlier in the list is preferred. If no offers are
|
||||
// acceptable, then "" is returned.
|
||||
func NegotiateContentEncoding(r *http.Request, offers []string) string {
|
||||
bestOffer := "identity"
|
||||
bestQ := -1.0
|
||||
specs := header.ParseAccept(r.Header, "Accept-Encoding")
|
||||
for _, offer := range offers {
|
||||
for _, spec := range specs {
|
||||
if spec.Q > bestQ &&
|
||||
(spec.Value == "*" || spec.Value == offer) {
|
||||
bestQ = spec.Q
|
||||
bestOffer = offer
|
||||
}
|
||||
}
|
||||
}
|
||||
if bestQ == 0 {
|
||||
bestOffer = ""
|
||||
}
|
||||
return bestOffer
|
||||
}
|
|
@ -22,13 +22,13 @@ import (
|
|||
// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats.
|
||||
// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so
|
||||
// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is
|
||||
// populated using runtime/metrics.
|
||||
// populated using runtime/metrics. Those are the defaults we can't alter.
|
||||
func goRuntimeMemStats() memStatsMetrics {
|
||||
return memStatsMetrics{
|
||||
{
|
||||
desc: NewDesc(
|
||||
memstatNamespace("alloc_bytes"),
|
||||
"Number of bytes allocated and still in use.",
|
||||
"Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
|
||||
|
@ -36,7 +36,7 @@ func goRuntimeMemStats() memStatsMetrics {
|
|||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("alloc_bytes_total"),
|
||||
"Total number of bytes allocated, even if freed.",
|
||||
"Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
|
||||
|
@ -44,23 +44,16 @@ func goRuntimeMemStats() memStatsMetrics {
|
|||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("sys_bytes"),
|
||||
"Number of bytes obtained from system.",
|
||||
"Number of bytes obtained from system. Equals to /memory/classes/total:byte.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("lookups_total"),
|
||||
"Total number of pointer lookups.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
|
||||
valType: CounterValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("mallocs_total"),
|
||||
"Total number of mallocs.",
|
||||
// TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric.
|
||||
"Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
|
||||
|
@ -68,7 +61,7 @@ func goRuntimeMemStats() memStatsMetrics {
|
|||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("frees_total"),
|
||||
"Total number of frees.",
|
||||
"Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
|
||||
|
@ -76,7 +69,7 @@ func goRuntimeMemStats() memStatsMetrics {
|
|||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_alloc_bytes"),
|
||||
"Number of heap bytes allocated and still in use.",
|
||||
"Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
|
||||
|
@ -84,7 +77,7 @@ func goRuntimeMemStats() memStatsMetrics {
|
|||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_sys_bytes"),
|
||||
"Number of heap bytes obtained from system.",
|
||||
"Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
|
||||
|
@ -92,7 +85,7 @@ func goRuntimeMemStats() memStatsMetrics {
|
|||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_idle_bytes"),
|
||||
"Number of heap bytes waiting to be used.",
|
||||
"Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
|
||||
|
@ -100,7 +93,7 @@ func goRuntimeMemStats() memStatsMetrics {
|
|||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_inuse_bytes"),
|
||||
"Number of heap bytes that are in use.",
|
||||
"Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
|
||||
|
@ -108,7 +101,7 @@ func goRuntimeMemStats() memStatsMetrics {
|
|||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_released_bytes"),
|
||||
"Number of heap bytes released to OS.",
|
||||
"Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
|
||||
|
@ -116,7 +109,7 @@ func goRuntimeMemStats() memStatsMetrics {
|
|||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_objects"),
|
||||
"Number of allocated objects.",
|
||||
"Number of currently allocated objects. Equals to /gc/heap/objects:objects.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
|
||||
|
@ -124,7 +117,7 @@ func goRuntimeMemStats() memStatsMetrics {
|
|||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("stack_inuse_bytes"),
|
||||
"Number of bytes in use by the stack allocator.",
|
||||
"Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
|
||||
|
@ -132,7 +125,7 @@ func goRuntimeMemStats() memStatsMetrics {
|
|||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("stack_sys_bytes"),
|
||||
"Number of bytes obtained from system for stack allocator.",
|
||||
"Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
|
||||
|
@ -140,7 +133,7 @@ func goRuntimeMemStats() memStatsMetrics {
|
|||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("mspan_inuse_bytes"),
|
||||
"Number of bytes in use by mspan structures.",
|
||||
"Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
|
||||
|
@ -148,7 +141,7 @@ func goRuntimeMemStats() memStatsMetrics {
|
|||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("mspan_sys_bytes"),
|
||||
"Number of bytes used for mspan structures obtained from system.",
|
||||
"Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
|
||||
|
@ -156,7 +149,7 @@ func goRuntimeMemStats() memStatsMetrics {
|
|||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("mcache_inuse_bytes"),
|
||||
"Number of bytes in use by mcache structures.",
|
||||
"Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
|
||||
|
@ -164,7 +157,7 @@ func goRuntimeMemStats() memStatsMetrics {
|
|||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("mcache_sys_bytes"),
|
||||
"Number of bytes used for mcache structures obtained from system.",
|
||||
"Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
|
||||
|
@ -172,7 +165,7 @@ func goRuntimeMemStats() memStatsMetrics {
|
|||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("buck_hash_sys_bytes"),
|
||||
"Number of bytes used by the profiling bucket hash table.",
|
||||
"Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
|
||||
|
@ -180,7 +173,7 @@ func goRuntimeMemStats() memStatsMetrics {
|
|||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("gc_sys_bytes"),
|
||||
"Number of bytes used for garbage collection system metadata.",
|
||||
"Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
|
||||
|
@ -188,7 +181,7 @@ func goRuntimeMemStats() memStatsMetrics {
|
|||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("other_sys_bytes"),
|
||||
"Number of bytes used for other system allocations.",
|
||||
"Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
|
||||
|
@ -196,7 +189,7 @@ func goRuntimeMemStats() memStatsMetrics {
|
|||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("next_gc_bytes"),
|
||||
"Number of heap bytes when next garbage collection will take place.",
|
||||
"Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
|
||||
|
@ -225,7 +218,7 @@ func newBaseGoCollector() baseGoCollector {
|
|||
nil, nil),
|
||||
gcDesc: NewDesc(
|
||||
"go_gc_duration_seconds",
|
||||
"A summary of the pause duration of garbage collection cycles.",
|
||||
"A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.",
|
||||
nil, nil),
|
||||
gcLastTimeDesc: NewDesc(
|
||||
"go_memstats_last_gc_time_seconds",
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
"runtime/metrics"
|
||||
|
@ -153,7 +154,8 @@ func defaultGoCollectorOptions() internal.GoCollectorOptions {
|
|||
"/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes,
|
||||
},
|
||||
RuntimeMetricRules: []internal.GoCollectorRule{
|
||||
//{Matcher: regexp.MustCompile("")},
|
||||
// Recommended metrics we want by default from runtime/metrics.
|
||||
{Matcher: internal.GoCollectorDefaultRuntimeMetrics},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -203,6 +205,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
|
|||
// to fail here. This condition is tested in TestExpectedRuntimeMetrics.
|
||||
continue
|
||||
}
|
||||
help := attachOriginalName(d.Description.Description, d.Name)
|
||||
|
||||
sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
|
||||
sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
|
||||
|
@ -214,7 +217,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
|
|||
m = newBatchHistogram(
|
||||
NewDesc(
|
||||
BuildFQName(namespace, subsystem, name),
|
||||
d.Description.Description,
|
||||
help,
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
|
@ -226,7 +229,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
|
|||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: name,
|
||||
Help: d.Description.Description,
|
||||
Help: help,
|
||||
},
|
||||
)
|
||||
} else {
|
||||
|
@ -234,7 +237,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
|
|||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: name,
|
||||
Help: d.Description.Description,
|
||||
Help: help,
|
||||
})
|
||||
}
|
||||
metricSet = append(metricSet, m)
|
||||
|
@ -284,6 +287,10 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
|
|||
}
|
||||
}
|
||||
|
||||
func attachOriginalName(desc, origName string) string {
|
||||
return fmt.Sprintf("%s Sourced from %s", desc, origName)
|
||||
}
|
||||
|
||||
// Describe returns all descriptions of the collector.
|
||||
func (c *goCollector) Describe(ch chan<- *Desc) {
|
||||
c.base.Describe(ch)
|
||||
|
@ -376,13 +383,13 @@ func unwrapScalarRMValue(v metrics.Value) float64 {
|
|||
//
|
||||
// This should never happen because we always populate our metric
|
||||
// set from the runtime/metrics package.
|
||||
panic("unexpected unsupported metric")
|
||||
panic("unexpected bad kind metric")
|
||||
default:
|
||||
// Unsupported metric kind.
|
||||
//
|
||||
// This should never happen because we check for this during initialization
|
||||
// and flag and filter metrics whose kinds we don't understand.
|
||||
panic("unexpected unsupported metric kind")
|
||||
panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -440,7 +440,7 @@ type HistogramOpts struct {
|
|||
// constant (or any negative float value).
|
||||
NativeHistogramZeroThreshold float64
|
||||
|
||||
// The remaining fields define a strategy to limit the number of
|
||||
// The next three fields define a strategy to limit the number of
|
||||
// populated sparse buckets. If NativeHistogramMaxBucketNumber is left
|
||||
// at zero, the number of buckets is not limited. (Note that this might
|
||||
// lead to unbounded memory consumption if the values observed by the
|
||||
|
@ -473,8 +473,27 @@ type HistogramOpts struct {
|
|||
NativeHistogramMinResetDuration time.Duration
|
||||
NativeHistogramMaxZeroThreshold float64
|
||||
|
||||
// NativeHistogramMaxExemplars limits the number of exemplars
|
||||
// that are kept in memory for each native histogram. If you leave it at
|
||||
// zero, a default value of 10 is used. If no exemplars should be kept specifically
|
||||
// for native histograms, set it to a negative value. (Scrapers can
|
||||
// still use the exemplars exposed for classic buckets, which are managed
|
||||
// independently.)
|
||||
NativeHistogramMaxExemplars int
|
||||
// NativeHistogramExemplarTTL is only checked once
|
||||
// NativeHistogramMaxExemplars is exceeded. In that case, the
|
||||
// oldest exemplar is removed if it is older than NativeHistogramExemplarTTL.
|
||||
// Otherwise, the older exemplar in the pair of exemplars that are closest
|
||||
// together (on an exponential scale) is removed.
|
||||
// If NativeHistogramExemplarTTL is left at its zero value, a default value of
|
||||
// 5m is used. To always delete the oldest exemplar, set it to a negative value.
|
||||
NativeHistogramExemplarTTL time.Duration
|
||||
|
||||
// now is for testing purposes, by default it's time.Now.
|
||||
now func() time.Time
|
||||
|
||||
// afterFunc is for testing purposes, by default it's time.AfterFunc.
|
||||
afterFunc func(time.Duration, func()) *time.Timer
|
||||
}
|
||||
|
||||
// HistogramVecOpts bundles the options to create a HistogramVec metric.
|
||||
|
@ -526,6 +545,9 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
|||
if opts.now == nil {
|
||||
opts.now = time.Now
|
||||
}
|
||||
if opts.afterFunc == nil {
|
||||
opts.afterFunc = time.AfterFunc
|
||||
}
|
||||
|
||||
h := &histogram{
|
||||
desc: desc,
|
||||
|
@ -536,6 +558,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
|||
nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
|
||||
lastResetTime: opts.now(),
|
||||
now: opts.now,
|
||||
afterFunc: opts.afterFunc,
|
||||
}
|
||||
if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
|
||||
h.upperBounds = DefBuckets
|
||||
|
@ -550,6 +573,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
|||
h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold
|
||||
} // Leave h.nativeHistogramZeroThreshold at 0 otherwise.
|
||||
h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor)
|
||||
h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars)
|
||||
}
|
||||
for i, upperBound := range h.upperBounds {
|
||||
if i < len(h.upperBounds)-1 {
|
||||
|
@ -716,9 +740,17 @@ type histogram struct {
|
|||
nativeHistogramMinResetDuration time.Duration
|
||||
// lastResetTime is protected by mtx. It is also used as created timestamp.
|
||||
lastResetTime time.Time
|
||||
// resetScheduled is protected by mtx. It is true if a reset is
|
||||
// scheduled for a later time (when nativeHistogramMinResetDuration has
|
||||
// passed).
|
||||
resetScheduled bool
|
||||
nativeExemplars nativeExemplars
|
||||
|
||||
// now is for testing purposes, by default it's time.Now.
|
||||
now func() time.Time
|
||||
|
||||
// afterFunc is for testing purposes, by default it's time.AfterFunc.
|
||||
afterFunc func(time.Duration, func()) *time.Timer
|
||||
}
|
||||
|
||||
func (h *histogram) Desc() *Desc {
|
||||
|
@ -729,6 +761,9 @@ func (h *histogram) Observe(v float64) {
|
|||
h.observe(v, h.findBucket(v))
|
||||
}
|
||||
|
||||
// ObserveWithExemplar should not be called in a high-frequency setting
|
||||
// for a native histogram with configured exemplars. For this case,
|
||||
// the implementation isn't lock-free and might suffer from lock contention.
|
||||
func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
|
||||
i := h.findBucket(v)
|
||||
h.observe(v, i)
|
||||
|
@ -808,6 +843,13 @@ func (h *histogram) Write(out *dto.Metric) error {
|
|||
Length: proto.Uint32(0),
|
||||
}}
|
||||
}
|
||||
|
||||
if h.nativeExemplars.isEnabled() {
|
||||
h.nativeExemplars.Lock()
|
||||
his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...)
|
||||
h.nativeExemplars.Unlock()
|
||||
}
|
||||
|
||||
}
|
||||
addAndResetCounts(hotCounts, coldCounts)
|
||||
return nil
|
||||
|
@ -874,21 +916,31 @@ func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket
|
|||
if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) {
|
||||
return
|
||||
}
|
||||
// One of the other strategies will happen. To undo what they will do as
|
||||
// soon as enough time has passed to satisfy
|
||||
// h.nativeHistogramMinResetDuration, schedule a reset at the right time
|
||||
// if we haven't done so already.
|
||||
if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled {
|
||||
h.resetScheduled = true
|
||||
h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset)
|
||||
}
|
||||
|
||||
if h.maybeWidenZeroBucket(hotCounts, coldCounts) {
|
||||
return
|
||||
}
|
||||
h.doubleBucketWidth(hotCounts, coldCounts)
|
||||
}
|
||||
|
||||
// maybeReset resets the whole histogram if at least h.nativeHistogramMinResetDuration
|
||||
// has been passed. It returns true if the histogram has been reset. The caller
|
||||
// must have locked h.mtx.
|
||||
// maybeReset resets the whole histogram if at least
|
||||
// h.nativeHistogramMinResetDuration has been passed. It returns true if the
|
||||
// histogram has been reset. The caller must have locked h.mtx.
|
||||
func (h *histogram) maybeReset(
|
||||
hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int,
|
||||
) bool {
|
||||
// We are using the possibly mocked h.now() rather than
|
||||
// time.Since(h.lastResetTime) to enable testing.
|
||||
if h.nativeHistogramMinResetDuration == 0 ||
|
||||
if h.nativeHistogramMinResetDuration == 0 || // No reset configured.
|
||||
h.resetScheduled || // Do not interefere if a reset is already scheduled.
|
||||
h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
|
||||
return false
|
||||
}
|
||||
|
@ -906,6 +958,29 @@ func (h *histogram) maybeReset(
|
|||
return true
|
||||
}
|
||||
|
||||
// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be
|
||||
// called without having locked h.mtx.
|
||||
func (h *histogram) reset() {
|
||||
h.mtx.Lock()
|
||||
defer h.mtx.Unlock()
|
||||
|
||||
n := atomic.LoadUint64(&h.countAndHotIdx)
|
||||
hotIdx := n >> 63
|
||||
coldIdx := (^n) >> 63
|
||||
hot := h.counts[hotIdx]
|
||||
cold := h.counts[coldIdx]
|
||||
// Completely reset coldCounts.
|
||||
h.resetCounts(cold)
|
||||
// Make coldCounts the new hot counts while resetting countAndHotIdx.
|
||||
n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63)
|
||||
count := n & ((1 << 63) - 1)
|
||||
waitForCooldown(count, hot)
|
||||
// Finally, reset the formerly hot counts, too.
|
||||
h.resetCounts(hot)
|
||||
h.lastResetTime = h.now()
|
||||
h.resetScheduled = false
|
||||
}
|
||||
|
||||
// maybeWidenZeroBucket widens the zero bucket until it includes the existing
|
||||
// buckets closest to the zero bucket (which could be two, if an equidistant
|
||||
// negative and a positive bucket exists, but usually it's only one bucket to be
|
||||
|
@ -1045,8 +1120,10 @@ func (h *histogram) resetCounts(counts *histogramCounts) {
|
|||
deleteSyncMap(&counts.nativeHistogramBucketsPositive)
|
||||
}
|
||||
|
||||
// updateExemplar replaces the exemplar for the provided bucket. With empty
|
||||
// labels, it's a no-op. It panics if any of the labels is invalid.
|
||||
// updateExemplar replaces the exemplar for the provided classic bucket.
|
||||
// With empty labels, it's a no-op. It panics if any of the labels is invalid.
|
||||
// If histogram is native, the exemplar will be cached into nativeExemplars,
|
||||
// which has a limit, and will remove one exemplar when limit is reached.
|
||||
func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
|
||||
if l == nil {
|
||||
return
|
||||
|
@ -1056,6 +1133,10 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
|
|||
panic(err)
|
||||
}
|
||||
h.exemplars[bucket].Store(e)
|
||||
doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
|
||||
if doSparse {
|
||||
h.nativeExemplars.addExemplar(e)
|
||||
}
|
||||
}
|
||||
|
||||
// HistogramVec is a Collector that bundles a set of Histograms that all share the
|
||||
|
@ -1290,6 +1371,48 @@ func MustNewConstHistogram(
|
|||
return m
|
||||
}
|
||||
|
||||
// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp.
|
||||
func NewConstHistogramWithCreatedTimestamp(
|
||||
desc *Desc,
|
||||
count uint64,
|
||||
sum float64,
|
||||
buckets map[float64]uint64,
|
||||
ct time.Time,
|
||||
labelValues ...string,
|
||||
) (Metric, error) {
|
||||
if desc.err != nil {
|
||||
return nil, desc.err
|
||||
}
|
||||
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &constHistogram{
|
||||
desc: desc,
|
||||
count: count,
|
||||
sum: sum,
|
||||
buckets: buckets,
|
||||
labelPairs: MakeLabelPairs(desc, labelValues),
|
||||
createdTs: timestamppb.New(ct),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where
|
||||
// NewConstHistogramWithCreatedTimestamp would have returned an error.
|
||||
func MustNewConstHistogramWithCreatedTimestamp(
|
||||
desc *Desc,
|
||||
count uint64,
|
||||
sum float64,
|
||||
buckets map[float64]uint64,
|
||||
ct time.Time,
|
||||
labelValues ...string,
|
||||
) Metric {
|
||||
m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type buckSort []*dto.Bucket
|
||||
|
||||
func (s buckSort) Len() int {
|
||||
|
@ -1529,3 +1652,186 @@ func addAndResetCounts(hot, cold *histogramCounts) {
|
|||
atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket))
|
||||
atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0)
|
||||
}
|
||||
|
||||
type nativeExemplars struct {
|
||||
sync.Mutex
|
||||
|
||||
// Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0.
|
||||
// The ttl is used on insertion to remove an exemplar that is older than ttl, if present.
|
||||
ttl time.Duration
|
||||
|
||||
exemplars []*dto.Exemplar
|
||||
}
|
||||
|
||||
func (n *nativeExemplars) isEnabled() bool {
|
||||
return n.ttl != -1
|
||||
}
|
||||
|
||||
func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars {
|
||||
if ttl == 0 {
|
||||
ttl = 5 * time.Minute
|
||||
}
|
||||
|
||||
if maxCount == 0 {
|
||||
maxCount = 10
|
||||
}
|
||||
|
||||
if maxCount < 0 {
|
||||
maxCount = 0
|
||||
ttl = -1
|
||||
}
|
||||
|
||||
return nativeExemplars{
|
||||
ttl: ttl,
|
||||
exemplars: make([]*dto.Exemplar, 0, maxCount),
|
||||
}
|
||||
}
|
||||
|
||||
func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
|
||||
if !n.isEnabled() {
|
||||
return
|
||||
}
|
||||
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
// When the number of exemplars has not yet exceeded or
|
||||
// is equal to cap(n.exemplars), then
|
||||
// insert the new exemplar directly.
|
||||
if len(n.exemplars) < cap(n.exemplars) {
|
||||
var nIdx int
|
||||
for nIdx = 0; nIdx < len(n.exemplars); nIdx++ {
|
||||
if *e.Value < *n.exemplars[nIdx].Value {
|
||||
break
|
||||
}
|
||||
}
|
||||
n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)
|
||||
return
|
||||
}
|
||||
|
||||
if len(n.exemplars) == 1 {
|
||||
// When the number of exemplars is 1, then
|
||||
// replace the existing exemplar with the new exemplar.
|
||||
n.exemplars[0] = e
|
||||
return
|
||||
}
|
||||
// From this point on, the number of exemplars is greater than 1.
|
||||
|
||||
// When the number of exemplars exceeds the limit, remove one exemplar.
|
||||
var (
|
||||
ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop.
|
||||
otIdx = -1 // Index of the exemplar with the oldest timestamp.
|
||||
|
||||
md = -1.0 // Logarithm of the delta of the closest pair of exemplars.
|
||||
|
||||
// The insertion point of the new exemplar in the exemplars slice after insertion.
|
||||
// This is calculated purely based on the order of the exemplars by value.
|
||||
// nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end.
|
||||
nIdx = -1
|
||||
|
||||
// rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar.
|
||||
// The aim is to keep a good spread of exemplars by value and not let them bunch up too much.
|
||||
// It is calculated in 3 steps:
|
||||
// 1. First we set rIdx to the index of the older exemplar within the closest pair by value.
|
||||
// That is the following will be true (on log scale):
|
||||
// either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have
|
||||
// the closest values to each other from all pairs.
|
||||
// For example, suppose the values are distributed like this:
|
||||
// |-----------x-------------x----------------x----x-----|
|
||||
// ^--rIdx as this is older.
|
||||
// Or like this:
|
||||
// |-----------x-------------x----------------x----x-----|
|
||||
// ^--rIdx as this is older.
|
||||
// 2. If there is an exemplar that expired, then we simple reset rIdx to that index.
|
||||
// 3. We check if by inserting the new exemplar we would create a closer pair at
|
||||
// (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to
|
||||
// keep the spread of exemplars by value; otherwise we keep rIdx as it is.
|
||||
rIdx = -1
|
||||
cLog float64 // Logarithm of the current exemplar.
|
||||
pLog float64 // Logarithm of the previous exemplar.
|
||||
)
|
||||
|
||||
for i, exemplar := range n.exemplars {
|
||||
// Find the exemplar with the oldest timestamp.
|
||||
if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) {
|
||||
ot = exemplar.Timestamp.AsTime()
|
||||
otIdx = i
|
||||
}
|
||||
|
||||
// Find the index at which to insert new the exemplar.
|
||||
if nIdx == -1 && *e.Value <= *exemplar.Value {
|
||||
nIdx = i
|
||||
}
|
||||
|
||||
// Find the two closest exemplars and pick the one the with older timestamp.
|
||||
pLog = cLog
|
||||
cLog = math.Log(exemplar.GetValue())
|
||||
if i == 0 {
|
||||
continue
|
||||
}
|
||||
diff := math.Abs(cLog - pLog)
|
||||
if md == -1 || diff < md {
|
||||
// The closest exemplar pair is at index: i-1, i.
|
||||
// Choose the exemplar with the older timestamp for replacement.
|
||||
md = diff
|
||||
if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) {
|
||||
rIdx = i
|
||||
} else {
|
||||
rIdx = i - 1
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// If all existing exemplar are smaller than new exemplar,
|
||||
// then the exemplar should be inserted at the end.
|
||||
if nIdx == -1 {
|
||||
nIdx = len(n.exemplars)
|
||||
}
|
||||
// Here, we have the following relationships:
|
||||
// n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0)
|
||||
// e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars))
|
||||
|
||||
if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl {
|
||||
// If the oldest exemplar has expired, then replace it with the new exemplar.
|
||||
rIdx = otIdx
|
||||
} else {
|
||||
// In the previous for loop, when calculating the closest pair of exemplars,
|
||||
// we did not take into account the newly inserted exemplar.
|
||||
// So we need to calculate with the newly inserted exemplar again.
|
||||
elog := math.Log(e.GetValue())
|
||||
if nIdx > 0 {
|
||||
diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue()))
|
||||
if diff < md {
|
||||
// The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx.
|
||||
// v--rIdx
|
||||
// |-----------x-n-----------x----------------x----x-----|
|
||||
// nIdx-1--^ ^--new exemplar value
|
||||
// Do not make the spread worse, replace nIdx-1 and not rIdx.
|
||||
md = diff
|
||||
rIdx = nIdx - 1
|
||||
}
|
||||
}
|
||||
if nIdx < len(n.exemplars) {
|
||||
diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog)
|
||||
if diff < md {
|
||||
// The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx.
|
||||
// v--rIdx
|
||||
// |-----------x-----------n-x----------------x----x-----|
|
||||
// new exemplar value--^ ^--nIdx
|
||||
// Do not make the spread worse, replace nIdx-1 and not rIdx.
|
||||
rIdx = nIdx
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Adjust the slice according to rIdx and nIdx.
|
||||
switch {
|
||||
case rIdx == nIdx:
|
||||
n.exemplars[nIdx] = e
|
||||
case rIdx < nIdx:
|
||||
n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...)
|
||||
case rIdx > nIdx:
|
||||
n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,3 +30,5 @@ type GoCollectorOptions struct {
|
|||
RuntimeMetricSumForHist map[string]string
|
||||
RuntimeMetricRules []GoCollectorRule
|
||||
}
|
||||
|
||||
var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`)
|
||||
|
|
|
@ -165,6 +165,8 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
|
|||
|
||||
func validateLabelValues(vals []string, expectedNumberOfValues int) error {
|
||||
if len(vals) != expectedNumberOfValues {
|
||||
// The call below makes vals escape, copy them to avoid that.
|
||||
vals := append([]string(nil), vals...)
|
||||
return fmt.Errorf(
|
||||
"%w: expected %d label values but got %d in %#v",
|
||||
errInconsistentCardinality, expectedNumberOfValues,
|
||||
|
|
|
@ -234,7 +234,7 @@ func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) {
|
|||
)
|
||||
for i, e := range exemplars {
|
||||
ts := e.Timestamp
|
||||
if ts == (time.Time{}) {
|
||||
if ts.IsZero() {
|
||||
ts = now
|
||||
}
|
||||
exs[i], err = newExemplar(e.Value, ts, e.Labels)
|
||||
|
|
|
@ -30,6 +30,7 @@ type processCollector struct {
|
|||
vsize, maxVsize *Desc
|
||||
rss *Desc
|
||||
startTime *Desc
|
||||
inBytes, outBytes *Desc
|
||||
}
|
||||
|
||||
// ProcessCollectorOpts defines the behavior of a process metrics collector
|
||||
|
@ -100,6 +101,16 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector {
|
|||
"Start time of the process since unix epoch in seconds.",
|
||||
nil, nil,
|
||||
),
|
||||
inBytes: NewDesc(
|
||||
ns+"process_network_receive_bytes_total",
|
||||
"Number of bytes received by the process over the network.",
|
||||
nil, nil,
|
||||
),
|
||||
outBytes: NewDesc(
|
||||
ns+"process_network_transmit_bytes_total",
|
||||
"Number of bytes sent by the process over the network.",
|
||||
nil, nil,
|
||||
),
|
||||
}
|
||||
|
||||
if opts.PidFn == nil {
|
||||
|
@ -129,6 +140,8 @@ func (c *processCollector) Describe(ch chan<- *Desc) {
|
|||
ch <- c.maxVsize
|
||||
ch <- c.rss
|
||||
ch <- c.startTime
|
||||
ch <- c.inBytes
|
||||
ch <- c.outBytes
|
||||
}
|
||||
|
||||
// Collect returns the current state of all metrics of the collector.
|
||||
|
|
18
vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
generated
vendored
18
vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
generated
vendored
|
@ -11,8 +11,8 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !windows && !js
|
||||
// +build !windows,!js
|
||||
//go:build !windows && !js && !wasip1
|
||||
// +build !windows,!js,!wasip1
|
||||
|
||||
package prometheus
|
||||
|
||||
|
@ -63,4 +63,18 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
|
|||
} else {
|
||||
c.reportError(ch, nil, err)
|
||||
}
|
||||
|
||||
if netstat, err := p.Netstat(); err == nil {
|
||||
var inOctets, outOctets float64
|
||||
if netstat.IpExt.InOctets != nil {
|
||||
inOctets = *netstat.IpExt.InOctets
|
||||
}
|
||||
if netstat.IpExt.OutOctets != nil {
|
||||
outOctets = *netstat.IpExt.OutOctets
|
||||
}
|
||||
ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets)
|
||||
ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets)
|
||||
} else {
|
||||
c.reportError(ch, nil, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
// Copyright 2013 Matt T. Proud
|
||||
//
|
||||
// Copyright 2023 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
@ -12,5 +11,16 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package pbutil provides record length-delimited Protocol Buffer streaming.
|
||||
package pbutil
|
||||
//go:build wasip1
|
||||
// +build wasip1
|
||||
|
||||
package prometheus
|
||||
|
||||
func canCollectProcess() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (*processCollector) processCollect(chan<- Metric) {
|
||||
// noop on this platform
|
||||
return
|
||||
}
|
|
@ -76,6 +76,12 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) {
|
|||
return n, err
|
||||
}
|
||||
|
||||
// Unwrap lets http.ResponseController get the underlying http.ResponseWriter,
|
||||
// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface.
|
||||
func (r *responseWriterDelegator) Unwrap() http.ResponseWriter {
|
||||
return r.ResponseWriter
|
||||
}
|
||||
|
||||
type (
|
||||
closeNotifierDelegator struct{ *responseWriterDelegator }
|
||||
flusherDelegator struct{ *responseWriterDelegator }
|
||||
|
|
|
@ -38,12 +38,13 @@ import (
|
|||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
|
||||
"github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
|
@ -54,6 +55,18 @@ const (
|
|||
processStartTimeHeader = "Process-Start-Time-Unix"
|
||||
)
|
||||
|
||||
// Compression represents the content encodings handlers support for the HTTP
|
||||
// responses.
|
||||
type Compression string
|
||||
|
||||
const (
|
||||
Identity Compression = "identity"
|
||||
Gzip Compression = "gzip"
|
||||
Zstd Compression = "zstd"
|
||||
)
|
||||
|
||||
var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd}
|
||||
|
||||
var gzipPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return gzip.NewWriter(nil)
|
||||
|
@ -122,6 +135,18 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
|
|||
}
|
||||
}
|
||||
|
||||
// Select compression formats to offer based on default or user choice.
|
||||
var compressions []string
|
||||
if !opts.DisableCompression {
|
||||
offers := defaultCompressionFormats
|
||||
if len(opts.OfferedCompressions) > 0 {
|
||||
offers = opts.OfferedCompressions
|
||||
}
|
||||
for _, comp := range offers {
|
||||
compressions = append(compressions, string(comp))
|
||||
}
|
||||
}
|
||||
|
||||
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
|
||||
if !opts.ProcessStartTime.IsZero() {
|
||||
rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10))
|
||||
|
@ -165,21 +190,23 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
|
|||
} else {
|
||||
contentType = expfmt.Negotiate(req.Header)
|
||||
}
|
||||
header := rsp.Header()
|
||||
header.Set(contentTypeHeader, string(contentType))
|
||||
rsp.Header().Set(contentTypeHeader, string(contentType))
|
||||
|
||||
w := io.Writer(rsp)
|
||||
if !opts.DisableCompression && gzipAccepted(req.Header) {
|
||||
header.Set(contentEncodingHeader, "gzip")
|
||||
gz := gzipPool.Get().(*gzip.Writer)
|
||||
defer gzipPool.Put(gz)
|
||||
|
||||
gz.Reset(w)
|
||||
defer gz.Close()
|
||||
|
||||
w = gz
|
||||
w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions)
|
||||
if err != nil {
|
||||
if opts.ErrorLog != nil {
|
||||
opts.ErrorLog.Println("error getting writer", err)
|
||||
}
|
||||
w = io.Writer(rsp)
|
||||
encodingHeader = string(Identity)
|
||||
}
|
||||
|
||||
defer closeWriter()
|
||||
|
||||
// Set Content-Encoding only when data is compressed
|
||||
if encodingHeader != string(Identity) {
|
||||
rsp.Header().Set(contentEncodingHeader, encodingHeader)
|
||||
}
|
||||
enc := expfmt.NewEncoder(w, contentType)
|
||||
|
||||
// handleError handles the error according to opts.ErrorHandling
|
||||
|
@ -343,9 +370,19 @@ type HandlerOpts struct {
|
|||
// no effect on the HTTP status code because ErrorHandling is set to
|
||||
// ContinueOnError.
|
||||
Registry prometheus.Registerer
|
||||
// If DisableCompression is true, the handler will never compress the
|
||||
// response, even if requested by the client.
|
||||
// DisableCompression disables the response encoding (compression) and
|
||||
// encoding negotiation. If true, the handler will
|
||||
// never compress the response, even if requested
|
||||
// by the client and the OfferedCompressions field is set.
|
||||
DisableCompression bool
|
||||
// OfferedCompressions is a set of encodings (compressions) handler will
|
||||
// try to offer when negotiating with the client. This defaults to identity, gzip
|
||||
// and zstd.
|
||||
// NOTE: If handler can't agree with the client on the encodings or
|
||||
// unsupported or empty encodings are set in OfferedCompressions,
|
||||
// handler always fallbacks to no compression (identity), for
|
||||
// compatibility reasons. In such cases ErrorLog will be used if set.
|
||||
OfferedCompressions []Compression
|
||||
// The number of concurrent HTTP requests is limited to
|
||||
// MaxRequestsInFlight. Additional requests are responded to with 503
|
||||
// Service Unavailable and a suitable message in the body. If
|
||||
|
@ -381,19 +418,6 @@ type HandlerOpts struct {
|
|||
ProcessStartTime time.Time
|
||||
}
|
||||
|
||||
// gzipAccepted returns whether the client will accept gzip-encoded content.
|
||||
func gzipAccepted(header http.Header) bool {
|
||||
a := header.Get(acceptEncodingHeader)
|
||||
parts := strings.Split(a, ",")
|
||||
for _, part := range parts {
|
||||
part = strings.TrimSpace(part)
|
||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// httpError removes any content-encoding header and then calls http.Error with
|
||||
// the provided error and http.StatusInternalServerError. Error contents is
|
||||
// supposed to be uncompressed plain text. Same as with a plain http.Error, this
|
||||
|
@ -406,3 +430,38 @@ func httpError(rsp http.ResponseWriter, err error) {
|
|||
http.StatusInternalServerError,
|
||||
)
|
||||
}
|
||||
|
||||
// negotiateEncodingWriter reads the Accept-Encoding header from a request and
|
||||
// selects the right compression based on an allow-list of supported
|
||||
// compressions. It returns a writer implementing the compression and an the
|
||||
// correct value that the caller can set in the response header.
|
||||
func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) {
|
||||
if len(compressions) == 0 {
|
||||
return rw, string(Identity), func() {}, nil
|
||||
}
|
||||
|
||||
// TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented.
|
||||
selected := httputil.NegotiateContentEncoding(r, compressions)
|
||||
|
||||
switch selected {
|
||||
case "zstd":
|
||||
// TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented.
|
||||
z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest))
|
||||
if err != nil {
|
||||
return nil, "", func() {}, err
|
||||
}
|
||||
|
||||
z.Reset(rw)
|
||||
return z, selected, func() { _ = z.Close() }, nil
|
||||
case "gzip":
|
||||
gz := gzipPool.Get().(*gzip.Writer)
|
||||
gz.Reset(rw)
|
||||
return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil
|
||||
case "identity":
|
||||
// This means the content is not compressed.
|
||||
return rw, selected, func() {}, nil
|
||||
default:
|
||||
// The content encoding was not implemented yet.
|
||||
return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -314,17 +314,18 @@ func (r *Registry) Register(c Collector) error {
|
|||
if dimHash != desc.dimHash {
|
||||
return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
|
||||
}
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
|
||||
// ...then check the new descriptors already seen.
|
||||
if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
|
||||
if dimHash != desc.dimHash {
|
||||
return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
|
||||
}
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
newDimHashesByName[desc.fqName] = desc.dimHash
|
||||
}
|
||||
}
|
||||
}
|
||||
// A Collector yielding no Desc at all is considered unchecked.
|
||||
if len(newDescIDs) == 0 {
|
||||
r.uncheckedCollectors = append(r.uncheckedCollectors, c)
|
||||
|
|
|
@ -783,3 +783,45 @@ func MustNewConstSummary(
|
|||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp.
|
||||
func NewConstSummaryWithCreatedTimestamp(
|
||||
desc *Desc,
|
||||
count uint64,
|
||||
sum float64,
|
||||
quantiles map[float64]float64,
|
||||
ct time.Time,
|
||||
labelValues ...string,
|
||||
) (Metric, error) {
|
||||
if desc.err != nil {
|
||||
return nil, desc.err
|
||||
}
|
||||
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &constSummary{
|
||||
desc: desc,
|
||||
count: count,
|
||||
sum: sum,
|
||||
quantiles: quantiles,
|
||||
labelPairs: MakeLabelPairs(desc, labelValues),
|
||||
createdTs: timestamppb.New(ct),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where
|
||||
// NewConstSummaryWithCreatedTimestamp would have returned an error.
|
||||
func MustNewConstSummaryWithCreatedTimestamp(
|
||||
desc *Desc,
|
||||
count uint64,
|
||||
sum float64,
|
||||
quantiles map[float64]float64,
|
||||
ct time.Time,
|
||||
labelValues ...string,
|
||||
) Metric {
|
||||
m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
|
|
@ -507,7 +507,7 @@ func (m *metricMap) getOrCreateMetricWithLabelValues(
|
|||
return metric
|
||||
}
|
||||
|
||||
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
||||
// getOrCreateMetricWithLabels retrieves the metric by hash and label value
|
||||
// or creates it and returns the new one.
|
||||
//
|
||||
// This function holds the mutex.
|
||||
|
|
|
@ -483,6 +483,8 @@ type Histogram struct {
|
|||
// histograms.
|
||||
PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||
PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket.
|
||||
// Only used for native histograms. These exemplars MUST have a timestamp.
|
||||
Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Histogram) Reset() {
|
||||
|
@ -622,6 +624,13 @@ func (x *Histogram) GetPositiveCount() []float64 {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (x *Histogram) GetExemplars() []*Exemplar {
|
||||
if x != nil {
|
||||
return x.Exemplars
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A Bucket of a conventional histogram, each of which is treated as
|
||||
// an individual counter-like time series by Prometheus.
|
||||
type Bucket struct {
|
||||
|
@ -923,6 +932,7 @@ type MetricFamily struct {
|
|||
Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
|
||||
Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
|
||||
Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
|
||||
Unit *string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"`
|
||||
}
|
||||
|
||||
func (x *MetricFamily) Reset() {
|
||||
|
@ -985,6 +995,13 @@ func (x *MetricFamily) GetMetric() []*Metric {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (x *MetricFamily) GetUnit() string {
|
||||
if x != nil && x.Unit != nil {
|
||||
return *x.Unit
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
|
||||
|
@ -1028,7 +1045,7 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
|
|||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
|
||||
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74,
|
||||
0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xac, 0x05, 0x0a, 0x09, 0x48,
|
||||
0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48,
|
||||
0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
|
||||
0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
|
||||
0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73,
|
||||
|
@ -1071,79 +1088,84 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
|
|||
0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c,
|
||||
0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
|
||||
0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69,
|
||||
0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75,
|
||||
0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69,
|
||||
0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f,
|
||||
0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12,
|
||||
0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f,
|
||||
0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52,
|
||||
0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74,
|
||||
0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62,
|
||||
0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65,
|
||||
0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c,
|
||||
0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
|
||||
0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c,
|
||||
0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e,
|
||||
0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11,
|
||||
0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67,
|
||||
0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
|
||||
0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a,
|
||||
0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65,
|
||||
0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
|
||||
0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
|
||||
0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78,
|
||||
0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b,
|
||||
0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65,
|
||||
0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75,
|
||||
0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a,
|
||||
0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
|
||||
0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63,
|
||||
0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c,
|
||||
0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75,
|
||||
0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42,
|
||||
0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
|
||||
0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78,
|
||||
0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
|
||||
0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16,
|
||||
0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06,
|
||||
0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91,
|
||||
0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c,
|
||||
0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
|
||||
0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62,
|
||||
0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65,
|
||||
0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
|
||||
0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a,
|
||||
0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69,
|
||||
0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
|
||||
0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c,
|
||||
0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
|
||||
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12,
|
||||
0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f,
|
||||
0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63,
|
||||
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52,
|
||||
0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18,
|
||||
0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
|
||||
0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75,
|
||||
0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75,
|
||||
0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e,
|
||||
0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
|
||||
0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65,
|
||||
0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74,
|
||||
0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
|
||||
0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72,
|
||||
0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
|
||||
0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79,
|
||||
0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74,
|
||||
0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
|
||||
0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74,
|
||||
0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
|
||||
0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61,
|
||||
0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75,
|
||||
0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69,
|
||||
0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
|
||||
0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74,
|
||||
0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
|
||||
0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48,
|
||||
0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67,
|
||||
0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
|
||||
0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69,
|
||||
0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68,
|
||||
0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12,
|
||||
0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e,
|
||||
0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c,
|
||||
0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52,
|
||||
0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18,
|
||||
0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
|
||||
0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74,
|
||||
0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, 0x62, 0x0a, 0x0a, 0x4d,
|
||||
0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x55,
|
||||
0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10,
|
||||
0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0b,
|
||||
0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x48,
|
||||
0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x41,
|
||||
0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x05, 0x42,
|
||||
0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
|
||||
0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
|
||||
0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63,
|
||||
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, 0x3b, 0x69,
|
||||
0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x69,
|
||||
0x65, 0x6e, 0x74,
|
||||
0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70,
|
||||
0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18,
|
||||
0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
|
||||
0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73,
|
||||
0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
|
||||
0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d,
|
||||
0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
|
||||
0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46,
|
||||
0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c,
|
||||
0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a,
|
||||
0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
|
||||
0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74,
|
||||
0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20,
|
||||
0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
|
||||
0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69,
|
||||
0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69,
|
||||
0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a,
|
||||
0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43,
|
||||
0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47,
|
||||
0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02,
|
||||
0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a,
|
||||
0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f,
|
||||
0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10,
|
||||
0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
|
||||
0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75,
|
||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
|
||||
0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f,
|
||||
0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63,
|
||||
0x6c, 0x69, 0x65, 0x6e, 0x74,
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -1185,22 +1207,23 @@ var file_io_prometheus_client_metrics_proto_depIdxs = []int32{
|
|||
13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp
|
||||
9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
|
||||
9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
|
||||
10, // 8: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
|
||||
1, // 9: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
|
||||
13, // 10: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
|
||||
1, // 11: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
|
||||
2, // 12: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
|
||||
3, // 13: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
|
||||
5, // 14: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
|
||||
6, // 15: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
|
||||
7, // 16: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
|
||||
0, // 17: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
|
||||
11, // 18: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
|
||||
19, // [19:19] is the sub-list for method output_type
|
||||
19, // [19:19] is the sub-list for method input_type
|
||||
19, // [19:19] is the sub-list for extension type_name
|
||||
19, // [19:19] is the sub-list for extension extendee
|
||||
0, // [0:19] is the sub-list for field type_name
|
||||
10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar
|
||||
10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
|
||||
1, // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
|
||||
13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
|
||||
1, // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
|
||||
2, // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
|
||||
3, // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
|
||||
5, // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
|
||||
6, // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
|
||||
7, // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
|
||||
0, // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
|
||||
11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
|
||||
20, // [20:20] is the sub-list for method output_type
|
||||
20, // [20:20] is the sub-list for method input_type
|
||||
20, // [20:20] is the sub-list for extension type_name
|
||||
20, // [20:20] is the sub-list for extension extendee
|
||||
0, // [0:20] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_io_prometheus_client_metrics_proto_init() }
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
package expfmt
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
|
@ -21,8 +22,8 @@ import (
|
|||
"net/http"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"google.golang.org/protobuf/encoding/protodelim"
|
||||
|
||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
|
@ -44,7 +45,7 @@ func ResponseFormat(h http.Header) Format {
|
|||
|
||||
mediatype, params, err := mime.ParseMediaType(ct)
|
||||
if err != nil {
|
||||
return FmtUnknown
|
||||
return fmtUnknown
|
||||
}
|
||||
|
||||
const textType = "text/plain"
|
||||
|
@ -52,42 +53,44 @@ func ResponseFormat(h http.Header) Format {
|
|||
switch mediatype {
|
||||
case ProtoType:
|
||||
if p, ok := params["proto"]; ok && p != ProtoProtocol {
|
||||
return FmtUnknown
|
||||
return fmtUnknown
|
||||
}
|
||||
if e, ok := params["encoding"]; ok && e != "delimited" {
|
||||
return FmtUnknown
|
||||
return fmtUnknown
|
||||
}
|
||||
return FmtProtoDelim
|
||||
return fmtProtoDelim
|
||||
|
||||
case textType:
|
||||
if v, ok := params["version"]; ok && v != TextVersion {
|
||||
return FmtUnknown
|
||||
return fmtUnknown
|
||||
}
|
||||
return FmtText
|
||||
return fmtText
|
||||
}
|
||||
|
||||
return FmtUnknown
|
||||
return fmtUnknown
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder based on the given input format.
|
||||
// If the input format does not imply otherwise, a text format decoder is returned.
|
||||
func NewDecoder(r io.Reader, format Format) Decoder {
|
||||
switch format {
|
||||
case FmtProtoDelim:
|
||||
return &protoDecoder{r: r}
|
||||
switch format.FormatType() {
|
||||
case TypeProtoDelim:
|
||||
return &protoDecoder{r: bufio.NewReader(r)}
|
||||
}
|
||||
return &textDecoder{r: r}
|
||||
}
|
||||
|
||||
// protoDecoder implements the Decoder interface for protocol buffers.
|
||||
type protoDecoder struct {
|
||||
r io.Reader
|
||||
r protodelim.Reader
|
||||
}
|
||||
|
||||
// Decode implements the Decoder interface.
|
||||
func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
|
||||
_, err := pbutil.ReadDelimited(d.r, v)
|
||||
if err != nil {
|
||||
opts := protodelim.UnmarshalOptions{
|
||||
MaxSize: -1,
|
||||
}
|
||||
if err := opts.UnmarshalFrom(d.r, v); err != nil {
|
||||
return err
|
||||
}
|
||||
if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
|
||||
|
|
|
@ -18,10 +18,13 @@ import (
|
|||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
|
||||
"google.golang.org/protobuf/encoding/protodelim"
|
||||
"google.golang.org/protobuf/encoding/prototext"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/munnerz/goautoneg"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
|
@ -60,23 +63,32 @@ func (ec encoderCloser) Close() error {
|
|||
// as the support is still experimental. To include the option to negotiate
|
||||
// FmtOpenMetrics, use NegotiateOpenMetrics.
|
||||
func Negotiate(h http.Header) Format {
|
||||
escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
|
||||
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
|
||||
if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
|
||||
switch Format(escapeParam) {
|
||||
case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
|
||||
escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam))
|
||||
default:
|
||||
// If the escaping parameter is unknown, ignore it.
|
||||
}
|
||||
}
|
||||
ver := ac.Params["version"]
|
||||
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
||||
switch ac.Params["encoding"] {
|
||||
case "delimited":
|
||||
return FmtProtoDelim
|
||||
return fmtProtoDelim + escapingScheme
|
||||
case "text":
|
||||
return FmtProtoText
|
||||
return fmtProtoText + escapingScheme
|
||||
case "compact-text":
|
||||
return FmtProtoCompact
|
||||
return fmtProtoCompact + escapingScheme
|
||||
}
|
||||
}
|
||||
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
||||
return FmtText
|
||||
return fmtText + escapingScheme
|
||||
}
|
||||
}
|
||||
return FmtText
|
||||
return fmtText + escapingScheme
|
||||
}
|
||||
|
||||
// NegotiateIncludingOpenMetrics works like Negotiate but includes
|
||||
|
@ -84,29 +96,40 @@ func Negotiate(h http.Header) Format {
|
|||
// temporary and will disappear once FmtOpenMetrics is fully supported and as
|
||||
// such may be negotiated by the normal Negotiate function.
|
||||
func NegotiateIncludingOpenMetrics(h http.Header) Format {
|
||||
escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
|
||||
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
|
||||
if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
|
||||
switch Format(escapeParam) {
|
||||
case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
|
||||
escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam))
|
||||
default:
|
||||
// If the escaping parameter is unknown, ignore it.
|
||||
}
|
||||
}
|
||||
ver := ac.Params["version"]
|
||||
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
||||
switch ac.Params["encoding"] {
|
||||
case "delimited":
|
||||
return FmtProtoDelim
|
||||
return fmtProtoDelim + escapingScheme
|
||||
case "text":
|
||||
return FmtProtoText
|
||||
return fmtProtoText + escapingScheme
|
||||
case "compact-text":
|
||||
return FmtProtoCompact
|
||||
return fmtProtoCompact + escapingScheme
|
||||
}
|
||||
}
|
||||
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
||||
return FmtText
|
||||
return fmtText + escapingScheme
|
||||
}
|
||||
if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
|
||||
if ver == OpenMetricsVersion_1_0_0 {
|
||||
return FmtOpenMetrics_1_0_0
|
||||
}
|
||||
return FmtOpenMetrics_0_0_1
|
||||
switch ver {
|
||||
case OpenMetricsVersion_1_0_0:
|
||||
return fmtOpenMetrics_1_0_0 + escapingScheme
|
||||
default:
|
||||
return fmtOpenMetrics_0_0_1 + escapingScheme
|
||||
}
|
||||
}
|
||||
return FmtText
|
||||
}
|
||||
return fmtText + escapingScheme
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder based on content type negotiation. All
|
||||
|
@ -115,44 +138,54 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
|
|||
// for FmtOpenMetrics, but a future (breaking) release will add the Close method
|
||||
// to the Encoder interface directly. The current version of the Encoder
|
||||
// interface is kept for backwards compatibility.
|
||||
func NewEncoder(w io.Writer, format Format) Encoder {
|
||||
switch format {
|
||||
case FmtProtoDelim:
|
||||
// In cases where the Format does not allow for UTF-8 names, the global
|
||||
// NameEscapingScheme will be applied.
|
||||
//
|
||||
// NewEncoder can be called with additional options to customize the OpenMetrics text output.
|
||||
// For example:
|
||||
// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines())
|
||||
//
|
||||
// Extra options are ignored for all other formats.
|
||||
func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder {
|
||||
escapingScheme := format.ToEscapingScheme()
|
||||
|
||||
switch format.FormatType() {
|
||||
case TypeProtoDelim:
|
||||
return encoderCloser{
|
||||
encode: func(v *dto.MetricFamily) error {
|
||||
_, err := pbutil.WriteDelimited(w, v)
|
||||
_, err := protodelim.MarshalTo(w, v)
|
||||
return err
|
||||
},
|
||||
close: func() error { return nil },
|
||||
}
|
||||
case FmtProtoCompact:
|
||||
case TypeProtoCompact:
|
||||
return encoderCloser{
|
||||
encode: func(v *dto.MetricFamily) error {
|
||||
_, err := fmt.Fprintln(w, v.String())
|
||||
_, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String())
|
||||
return err
|
||||
},
|
||||
close: func() error { return nil },
|
||||
}
|
||||
case FmtProtoText:
|
||||
case TypeProtoText:
|
||||
return encoderCloser{
|
||||
encode: func(v *dto.MetricFamily) error {
|
||||
_, err := fmt.Fprintln(w, prototext.Format(v))
|
||||
_, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme)))
|
||||
return err
|
||||
},
|
||||
close: func() error { return nil },
|
||||
}
|
||||
case FmtText:
|
||||
case TypeTextPlain:
|
||||
return encoderCloser{
|
||||
encode: func(v *dto.MetricFamily) error {
|
||||
_, err := MetricFamilyToText(w, v)
|
||||
_, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme))
|
||||
return err
|
||||
},
|
||||
close: func() error { return nil },
|
||||
}
|
||||
case FmtOpenMetrics_0_0_1, FmtOpenMetrics_1_0_0:
|
||||
case TypeOpenMetrics:
|
||||
return encoderCloser{
|
||||
encode: func(v *dto.MetricFamily) error {
|
||||
_, err := MetricFamilyToOpenMetrics(w, v)
|
||||
_, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...)
|
||||
return err
|
||||
},
|
||||
close: func() error {
|
||||
|
|
|
@ -14,30 +14,164 @@
|
|||
// Package expfmt contains tools for reading and writing Prometheus metrics.
|
||||
package expfmt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
// Format specifies the HTTP content type of the different wire protocols.
|
||||
type Format string
|
||||
|
||||
// Constants to assemble the Content-Type values for the different wire protocols.
|
||||
// Constants to assemble the Content-Type values for the different wire
|
||||
// protocols. The Content-Type strings here are all for the legacy exposition
|
||||
// formats, where valid characters for metric names and label names are limited.
|
||||
// Support for arbitrary UTF-8 characters in those names is already partially
|
||||
// implemented in this module (see model.ValidationScheme), but to actually use
|
||||
// it on the wire, new content-type strings will have to be agreed upon and
|
||||
// added here.
|
||||
const (
|
||||
TextVersion = "0.0.4"
|
||||
ProtoType = `application/vnd.google.protobuf`
|
||||
ProtoProtocol = `io.prometheus.client.MetricFamily`
|
||||
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
|
||||
protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
|
||||
OpenMetricsType = `application/openmetrics-text`
|
||||
OpenMetricsVersion_0_0_1 = "0.0.1"
|
||||
OpenMetricsVersion_1_0_0 = "1.0.0"
|
||||
|
||||
// The Content-Type values for the different wire protocols.
|
||||
FmtUnknown Format = `<unknown>`
|
||||
FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
|
||||
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
||||
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
||||
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
||||
FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
|
||||
FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
|
||||
// The Content-Type values for the different wire protocols. Note that these
|
||||
// values are now unexported. If code was relying on comparisons to these
|
||||
// constants, instead use FormatType().
|
||||
fmtUnknown Format = `<unknown>`
|
||||
fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
|
||||
fmtProtoDelim Format = protoFmt + ` encoding=delimited`
|
||||
fmtProtoText Format = protoFmt + ` encoding=text`
|
||||
fmtProtoCompact Format = protoFmt + ` encoding=compact-text`
|
||||
fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
|
||||
fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
|
||||
)
|
||||
|
||||
const (
|
||||
hdrContentType = "Content-Type"
|
||||
hdrAccept = "Accept"
|
||||
)
|
||||
|
||||
// FormatType is a Go enum representing the overall category for the given
|
||||
// Format. As the number of Format permutations increases, doing basic string
|
||||
// comparisons are not feasible, so this enum captures the most useful
|
||||
// high-level attribute of the Format string.
|
||||
type FormatType int
|
||||
|
||||
const (
|
||||
TypeUnknown FormatType = iota
|
||||
TypeProtoCompact
|
||||
TypeProtoDelim
|
||||
TypeProtoText
|
||||
TypeTextPlain
|
||||
TypeOpenMetrics
|
||||
)
|
||||
|
||||
// NewFormat generates a new Format from the type provided. Mostly used for
|
||||
// tests, most Formats should be generated as part of content negotiation in
|
||||
// encode.go. If a type has more than one version, the latest version will be
|
||||
// returned.
|
||||
func NewFormat(t FormatType) Format {
|
||||
switch t {
|
||||
case TypeProtoCompact:
|
||||
return fmtProtoCompact
|
||||
case TypeProtoDelim:
|
||||
return fmtProtoDelim
|
||||
case TypeProtoText:
|
||||
return fmtProtoText
|
||||
case TypeTextPlain:
|
||||
return fmtText
|
||||
case TypeOpenMetrics:
|
||||
return fmtOpenMetrics_1_0_0
|
||||
default:
|
||||
return fmtUnknown
|
||||
}
|
||||
}
|
||||
|
||||
// NewOpenMetricsFormat generates a new OpenMetrics format matching the
|
||||
// specified version number.
|
||||
func NewOpenMetricsFormat(version string) (Format, error) {
|
||||
if version == OpenMetricsVersion_0_0_1 {
|
||||
return fmtOpenMetrics_0_0_1, nil
|
||||
}
|
||||
if version == OpenMetricsVersion_1_0_0 {
|
||||
return fmtOpenMetrics_1_0_0, nil
|
||||
}
|
||||
return fmtUnknown, fmt.Errorf("unknown open metrics version string")
|
||||
}
|
||||
|
||||
// FormatType deduces an overall FormatType for the given format.
|
||||
func (f Format) FormatType() FormatType {
|
||||
toks := strings.Split(string(f), ";")
|
||||
params := make(map[string]string)
|
||||
for i, t := range toks {
|
||||
if i == 0 {
|
||||
continue
|
||||
}
|
||||
args := strings.Split(t, "=")
|
||||
if len(args) != 2 {
|
||||
continue
|
||||
}
|
||||
params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1])
|
||||
}
|
||||
|
||||
switch strings.TrimSpace(toks[0]) {
|
||||
case ProtoType:
|
||||
if params["proto"] != ProtoProtocol {
|
||||
return TypeUnknown
|
||||
}
|
||||
switch params["encoding"] {
|
||||
case "delimited":
|
||||
return TypeProtoDelim
|
||||
case "text":
|
||||
return TypeProtoText
|
||||
case "compact-text":
|
||||
return TypeProtoCompact
|
||||
default:
|
||||
return TypeUnknown
|
||||
}
|
||||
case OpenMetricsType:
|
||||
if params["charset"] != "utf-8" {
|
||||
return TypeUnknown
|
||||
}
|
||||
return TypeOpenMetrics
|
||||
case "text/plain":
|
||||
v, ok := params["version"]
|
||||
if !ok {
|
||||
return TypeTextPlain
|
||||
}
|
||||
if v == TextVersion {
|
||||
return TypeTextPlain
|
||||
}
|
||||
return TypeUnknown
|
||||
default:
|
||||
return TypeUnknown
|
||||
}
|
||||
}
|
||||
|
||||
// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the
|
||||
// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid
|
||||
// "escaping" term exists, that will be used. Otherwise, the global default will
|
||||
// be returned.
|
||||
func (format Format) ToEscapingScheme() model.EscapingScheme {
|
||||
for _, p := range strings.Split(string(format), ";") {
|
||||
toks := strings.Split(p, "=")
|
||||
if len(toks) != 2 {
|
||||
continue
|
||||
}
|
||||
key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1])
|
||||
if key == model.EscapingKey {
|
||||
scheme, err := model.ToEscapingScheme(value)
|
||||
if err != nil {
|
||||
return model.NameEscapingScheme
|
||||
}
|
||||
return scheme
|
||||
}
|
||||
}
|
||||
return model.NameEscapingScheme
|
||||
}
|
||||
|
|
|
@ -22,11 +22,47 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
type encoderOption struct {
|
||||
withCreatedLines bool
|
||||
withUnit bool
|
||||
}
|
||||
|
||||
type EncoderOption func(*encoderOption)
|
||||
|
||||
// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder
|
||||
// to include _created lines (See
|
||||
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1).
|
||||
// Created timestamps can improve the accuracy of series reset detection, but
|
||||
// come with a bandwidth cost.
|
||||
//
|
||||
// At the time of writing, created timestamp ingestion is still experimental in
|
||||
// Prometheus and need to be enabled with the feature-flag
|
||||
// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are
|
||||
// still possible. Therefore, it is recommended to use this feature with caution.
|
||||
func WithCreatedLines() EncoderOption {
|
||||
return func(t *encoderOption) {
|
||||
t.withCreatedLines = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithUnit is an EncoderOption enabling a set unit to be written to the output
|
||||
// and to be added to the metric name, if it's not there already, as a suffix.
|
||||
// Without opting in this way, the unit will not be added to the metric name and,
|
||||
// on top of that, the unit will not be passed onto the output, even if it
|
||||
// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil.
|
||||
func WithUnit() EncoderOption {
|
||||
return func(t *encoderOption) {
|
||||
t.withUnit = true
|
||||
}
|
||||
}
|
||||
|
||||
// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
|
||||
// OpenMetrics text format and writes the resulting lines to 'out'. It returns
|
||||
// the number of bytes written and any error encountered. The output will have
|
||||
|
@ -35,6 +71,18 @@ import (
|
|||
// sanity checks. If the input contains duplicate metrics or invalid metric or
|
||||
// label names, the conversion will result in invalid text format output.
|
||||
//
|
||||
// If metric names conform to the legacy validation pattern, they will be placed
|
||||
// outside the brackets in the traditional way, like `foo{}`. If the metric name
|
||||
// fails the legacy validation check, it will be placed quoted inside the
|
||||
// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
|
||||
// no error will be thrown in this case.
|
||||
//
|
||||
// Similar to metric names, if label names conform to the legacy validation
|
||||
// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
|
||||
// name fails the legacy validation check, it will be quoted:
|
||||
// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
|
||||
// no error will be thrown in this case.
|
||||
//
|
||||
// This function fulfills the type 'expfmt.encoder'.
|
||||
//
|
||||
// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
|
||||
|
@ -47,20 +95,34 @@ import (
|
|||
// Prometheus to OpenMetrics or vice versa:
|
||||
//
|
||||
// - Counters are expected to have the `_total` suffix in their metric name. In
|
||||
// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
|
||||
// line. A counter with a missing `_total` suffix is not an error. However,
|
||||
// the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT`
|
||||
// lines. A counter with a missing `_total` suffix is not an error. However,
|
||||
// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
|
||||
// output.
|
||||
//
|
||||
// - No support for the following (optional) features: `# UNIT` line, `_created`
|
||||
// line, info type, stateset type, gaugehistogram type.
|
||||
// - According to the OM specs, the `# UNIT` line is optional, but if populated,
|
||||
// the unit has to be present in the metric name as its suffix:
|
||||
// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit).
|
||||
// However, in order to accommodate any potential scenario where such a change in the
|
||||
// metric name is not desirable, the users are here given the choice of either explicitly
|
||||
// opt in, in case they wish for the unit to be included in the output AND in the metric name
|
||||
// as a suffix (see the description of the WithUnit function above),
|
||||
// or not to opt in, in case they don't want for any of that to happen.
|
||||
//
|
||||
// - No support for the following (optional) features: info type,
|
||||
// stateset type, gaugehistogram type.
|
||||
//
|
||||
// - The size of exemplar labels is not checked (i.e. it's possible to create
|
||||
// exemplars that are larger than allowed by the OpenMetrics specification).
|
||||
//
|
||||
// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
|
||||
// with a `NaN` value.)
|
||||
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
|
||||
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) {
|
||||
toOM := encoderOption{}
|
||||
for _, option := range options {
|
||||
option(&toOM)
|
||||
}
|
||||
|
||||
name := in.GetName()
|
||||
if name == "" {
|
||||
return 0, fmt.Errorf("MetricFamily has no name: %s", in)
|
||||
|
@ -85,10 +147,13 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
|||
var (
|
||||
n int
|
||||
metricType = in.GetType()
|
||||
shortName = name
|
||||
compliantName = name
|
||||
)
|
||||
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") {
|
||||
shortName = name[:len(name)-6]
|
||||
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") {
|
||||
compliantName = name[:len(name)-6]
|
||||
}
|
||||
if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) {
|
||||
compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit)
|
||||
}
|
||||
|
||||
// Comments, first HELP, then TYPE.
|
||||
|
@ -98,7 +163,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = w.WriteString(shortName)
|
||||
n, err = writeName(w, compliantName)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
|
@ -124,7 +189,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = w.WriteString(shortName)
|
||||
n, err = writeName(w, compliantName)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
|
@ -151,55 +216,89 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
if toOM.withUnit && in.Unit != nil {
|
||||
n, err = w.WriteString("# UNIT ")
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = writeName(w, compliantName)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = w.WriteByte(' ')
|
||||
written++
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = writeEscapedString(w, *in.Unit, true)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = w.WriteByte('\n')
|
||||
written++
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var createdTsBytesWritten int
|
||||
|
||||
// Finally the samples, one line for each.
|
||||
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") {
|
||||
compliantName = compliantName + "_total"
|
||||
}
|
||||
for _, metric := range in.Metric {
|
||||
switch metricType {
|
||||
case dto.MetricType_COUNTER:
|
||||
if metric.Counter == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected counter in metric %s %s", name, metric,
|
||||
"expected counter in metric %s %s", compliantName, metric,
|
||||
)
|
||||
}
|
||||
// Note that we have ensured above that either the name
|
||||
// ends on `_total` or that the rendered type is
|
||||
// `unknown`. Therefore, no `_total` must be added here.
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "", metric, "", 0,
|
||||
w, compliantName, "", metric, "", 0,
|
||||
metric.Counter.GetValue(), 0, false,
|
||||
metric.Counter.Exemplar,
|
||||
)
|
||||
if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil {
|
||||
createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp())
|
||||
n += createdTsBytesWritten
|
||||
}
|
||||
case dto.MetricType_GAUGE:
|
||||
if metric.Gauge == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected gauge in metric %s %s", name, metric,
|
||||
"expected gauge in metric %s %s", compliantName, metric,
|
||||
)
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "", metric, "", 0,
|
||||
w, compliantName, "", metric, "", 0,
|
||||
metric.Gauge.GetValue(), 0, false,
|
||||
nil,
|
||||
)
|
||||
case dto.MetricType_UNTYPED:
|
||||
if metric.Untyped == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected untyped in metric %s %s", name, metric,
|
||||
"expected untyped in metric %s %s", compliantName, metric,
|
||||
)
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "", metric, "", 0,
|
||||
w, compliantName, "", metric, "", 0,
|
||||
metric.Untyped.GetValue(), 0, false,
|
||||
nil,
|
||||
)
|
||||
case dto.MetricType_SUMMARY:
|
||||
if metric.Summary == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected summary in metric %s %s", name, metric,
|
||||
"expected summary in metric %s %s", compliantName, metric,
|
||||
)
|
||||
}
|
||||
for _, q := range metric.Summary.Quantile {
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "", metric,
|
||||
w, compliantName, "", metric,
|
||||
model.QuantileLabel, q.GetQuantile(),
|
||||
q.GetValue(), 0, false,
|
||||
nil,
|
||||
|
@ -210,7 +309,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
|||
}
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_sum", metric, "", 0,
|
||||
w, compliantName, "_sum", metric, "", 0,
|
||||
metric.Summary.GetSampleSum(), 0, false,
|
||||
nil,
|
||||
)
|
||||
|
@ -219,20 +318,24 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
|||
return
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_count", metric, "", 0,
|
||||
w, compliantName, "_count", metric, "", 0,
|
||||
0, metric.Summary.GetSampleCount(), true,
|
||||
nil,
|
||||
)
|
||||
if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil {
|
||||
createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp())
|
||||
n += createdTsBytesWritten
|
||||
}
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
if metric.Histogram == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected histogram in metric %s %s", name, metric,
|
||||
"expected histogram in metric %s %s", compliantName, metric,
|
||||
)
|
||||
}
|
||||
infSeen := false
|
||||
for _, b := range metric.Histogram.Bucket {
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_bucket", metric,
|
||||
w, compliantName, "_bucket", metric,
|
||||
model.BucketLabel, b.GetUpperBound(),
|
||||
0, b.GetCumulativeCount(), true,
|
||||
b.Exemplar,
|
||||
|
@ -247,7 +350,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
|||
}
|
||||
if !infSeen {
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_bucket", metric,
|
||||
w, compliantName, "_bucket", metric,
|
||||
model.BucketLabel, math.Inf(+1),
|
||||
0, metric.Histogram.GetSampleCount(), true,
|
||||
nil,
|
||||
|
@ -258,7 +361,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
|||
}
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_sum", metric, "", 0,
|
||||
w, compliantName, "_sum", metric, "", 0,
|
||||
metric.Histogram.GetSampleSum(), 0, false,
|
||||
nil,
|
||||
)
|
||||
|
@ -267,13 +370,17 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
|||
return
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_count", metric, "", 0,
|
||||
w, compliantName, "_count", metric, "", 0,
|
||||
0, metric.Histogram.GetSampleCount(), true,
|
||||
nil,
|
||||
)
|
||||
if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil {
|
||||
createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp())
|
||||
n += createdTsBytesWritten
|
||||
}
|
||||
default:
|
||||
return written, fmt.Errorf(
|
||||
"unexpected type in metric %s %s", name, metric,
|
||||
"unexpected type in metric %s %s", compliantName, metric,
|
||||
)
|
||||
}
|
||||
written += n
|
||||
|
@ -303,21 +410,9 @@ func writeOpenMetricsSample(
|
|||
floatValue float64, intValue uint64, useIntValue bool,
|
||||
exemplar *dto.Exemplar,
|
||||
) (int, error) {
|
||||
var written int
|
||||
n, err := w.WriteString(name)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
if suffix != "" {
|
||||
n, err = w.WriteString(suffix)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
n, err = writeOpenMetricsLabelPairs(
|
||||
w, metric.Label, additionalLabelName, additionalLabelValue,
|
||||
written := 0
|
||||
n, err := writeOpenMetricsNameAndLabelPairs(
|
||||
w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
|
@ -350,7 +445,7 @@ func writeOpenMetricsSample(
|
|||
return written, err
|
||||
}
|
||||
}
|
||||
if exemplar != nil {
|
||||
if exemplar != nil && len(exemplar.Label) > 0 {
|
||||
n, err = writeExemplar(w, exemplar)
|
||||
written += n
|
||||
if err != nil {
|
||||
|
@ -365,27 +460,58 @@ func writeOpenMetricsSample(
|
|||
return written, nil
|
||||
}
|
||||
|
||||
// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float
|
||||
// in OpenMetrics style.
|
||||
func writeOpenMetricsLabelPairs(
|
||||
// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but
|
||||
// formats the float in OpenMetrics style.
|
||||
func writeOpenMetricsNameAndLabelPairs(
|
||||
w enhancedWriter,
|
||||
name string,
|
||||
in []*dto.LabelPair,
|
||||
additionalLabelName string, additionalLabelValue float64,
|
||||
) (int, error) {
|
||||
if len(in) == 0 && additionalLabelName == "" {
|
||||
return 0, nil
|
||||
}
|
||||
var (
|
||||
written int
|
||||
separator byte = '{'
|
||||
metricInsideBraces = false
|
||||
)
|
||||
|
||||
if name != "" {
|
||||
// If the name does not pass the legacy validity check, we must put the
|
||||
// metric name inside the braces, quoted.
|
||||
if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
|
||||
metricInsideBraces = true
|
||||
err := w.WriteByte(separator)
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
separator = ','
|
||||
}
|
||||
|
||||
n, err := writeName(w, name)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(in) == 0 && additionalLabelName == "" {
|
||||
if metricInsideBraces {
|
||||
err := w.WriteByte('}')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
for _, lp := range in {
|
||||
err := w.WriteByte(separator)
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err := w.WriteString(lp.GetName())
|
||||
n, err := writeName(w, lp.GetName())
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
|
@ -442,6 +568,49 @@ func writeOpenMetricsLabelPairs(
|
|||
return written, nil
|
||||
}
|
||||
|
||||
// writeOpenMetricsCreated writes the created timestamp for a single time series
|
||||
// following OpenMetrics text format to w, given the metric name, the metric proto
|
||||
// message itself, optionally a suffix to be removed, e.g. '_total' for counters,
|
||||
// an additional label name with a float64 value (use empty string as label name if
|
||||
// not required) and the timestamp that represents the created timestamp.
|
||||
// The function returns the number of bytes written and any error encountered.
|
||||
func writeOpenMetricsCreated(w enhancedWriter,
|
||||
name, suffixToTrim string, metric *dto.Metric,
|
||||
additionalLabelName string, additionalLabelValue float64,
|
||||
createdTimestamp *timestamppb.Timestamp,
|
||||
) (int, error) {
|
||||
written := 0
|
||||
n, err := writeOpenMetricsNameAndLabelPairs(
|
||||
w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
|
||||
err = w.WriteByte(' ')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
|
||||
// TODO(beorn7): Format this directly from components of ts to
|
||||
// avoid overflow/underflow and precision issues of the float
|
||||
// conversion.
|
||||
n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
|
||||
err = w.WriteByte('\n')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
|
||||
// function returns the number of bytes written and any error encountered.
|
||||
func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
|
||||
|
@ -451,7 +620,7 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
|
|||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0)
|
||||
n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
|
|
|
@ -62,6 +62,18 @@ var (
|
|||
// contains duplicate metrics or invalid metric or label names, the conversion
|
||||
// will result in invalid text format output.
|
||||
//
|
||||
// If metric names conform to the legacy validation pattern, they will be placed
|
||||
// outside the brackets in the traditional way, like `foo{}`. If the metric name
|
||||
// fails the legacy validation check, it will be placed quoted inside the
|
||||
// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
|
||||
// no error will be thrown in this case.
|
||||
//
|
||||
// Similar to metric names, if label names conform to the legacy validation
|
||||
// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
|
||||
// name fails the legacy validation check, it will be quoted:
|
||||
// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
|
||||
// no error will be thrown in this case.
|
||||
//
|
||||
// This method fulfills the type 'prometheus.encoder'.
|
||||
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
|
||||
// Fail-fast checks.
|
||||
|
@ -98,7 +110,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = w.WriteString(name)
|
||||
n, err = writeName(w, name)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
|
@ -124,7 +136,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = w.WriteString(name)
|
||||
n, err = writeName(w, name)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
|
@ -280,21 +292,9 @@ func writeSample(
|
|||
additionalLabelName string, additionalLabelValue float64,
|
||||
value float64,
|
||||
) (int, error) {
|
||||
var written int
|
||||
n, err := w.WriteString(name)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
if suffix != "" {
|
||||
n, err = w.WriteString(suffix)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
n, err = writeLabelPairs(
|
||||
w, metric.Label, additionalLabelName, additionalLabelValue,
|
||||
written := 0
|
||||
n, err := writeNameAndLabelPairs(
|
||||
w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
|
@ -330,32 +330,64 @@ func writeSample(
|
|||
return written, nil
|
||||
}
|
||||
|
||||
// writeLabelPairs converts a slice of LabelPair proto messages plus the
|
||||
// explicitly given additional label pair into text formatted as required by the
|
||||
// text format and writes it to 'w'. An empty slice in combination with an empty
|
||||
// string 'additionalLabelName' results in nothing being written. Otherwise, the
|
||||
// label pairs are written, escaped as required by the text format, and enclosed
|
||||
// in '{...}'. The function returns the number of bytes written and any error
|
||||
// encountered.
|
||||
func writeLabelPairs(
|
||||
// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the
|
||||
// explicitly given metric name and additional label pair into text formatted as
|
||||
// required by the text format and writes it to 'w'. An empty slice in
|
||||
// combination with an empty string 'additionalLabelName' results in nothing
|
||||
// being written. Otherwise, the label pairs are written, escaped as required by
|
||||
// the text format, and enclosed in '{...}'. The function returns the number of
|
||||
// bytes written and any error encountered. If the metric name is not
|
||||
// legacy-valid, it will be put inside the brackets as well. Legacy-invalid
|
||||
// label names will also be quoted.
|
||||
func writeNameAndLabelPairs(
|
||||
w enhancedWriter,
|
||||
name string,
|
||||
in []*dto.LabelPair,
|
||||
additionalLabelName string, additionalLabelValue float64,
|
||||
) (int, error) {
|
||||
if len(in) == 0 && additionalLabelName == "" {
|
||||
return 0, nil
|
||||
}
|
||||
var (
|
||||
written int
|
||||
separator byte = '{'
|
||||
metricInsideBraces = false
|
||||
)
|
||||
|
||||
if name != "" {
|
||||
// If the name does not pass the legacy validity check, we must put the
|
||||
// metric name inside the braces.
|
||||
if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
|
||||
metricInsideBraces = true
|
||||
err := w.WriteByte(separator)
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
separator = ','
|
||||
}
|
||||
n, err := writeName(w, name)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(in) == 0 && additionalLabelName == "" {
|
||||
if metricInsideBraces {
|
||||
err := w.WriteByte('}')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
for _, lp := range in {
|
||||
err := w.WriteByte(separator)
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err := w.WriteString(lp.GetName())
|
||||
n, err := writeName(w, lp.GetName())
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
|
@ -462,3 +494,27 @@ func writeInt(w enhancedWriter, i int64) (int, error) {
|
|||
numBufPool.Put(bp)
|
||||
return written, err
|
||||
}
|
||||
|
||||
// writeName writes a string as-is if it complies with the legacy naming
|
||||
// scheme, or escapes it in double quotes if not.
|
||||
func writeName(w enhancedWriter, name string) (int, error) {
|
||||
if model.IsValidLegacyMetricName(model.LabelValue(name)) {
|
||||
return w.WriteString(name)
|
||||
}
|
||||
var written int
|
||||
var err error
|
||||
err = w.WriteByte('"')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
var n int
|
||||
n, err = writeEscapedString(w, name, true)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
err = w.WriteByte('"')
|
||||
written++
|
||||
return written, err
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ package expfmt
|
|||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
|
@ -24,8 +25,9 @@ import (
|
|||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
// A stateFn is a function that represents a state in a state machine. By
|
||||
|
@ -112,7 +114,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF
|
|||
// stream. Turn this error into something nicer and more
|
||||
// meaningful. (io.EOF is often used as a signal for the legitimate end
|
||||
// of an input stream.)
|
||||
if p.err == io.EOF {
|
||||
if p.err != nil && errors.Is(p.err, io.EOF) {
|
||||
p.parseError("unexpected end of input stream")
|
||||
}
|
||||
return p.metricFamiliesByName, p.err
|
||||
|
@ -146,7 +148,7 @@ func (p *TextParser) startOfLine() stateFn {
|
|||
// which is not an error but the signal that we are done.
|
||||
// Any other error that happens to align with the start of
|
||||
// a line is still an error.
|
||||
if p.err == io.EOF {
|
||||
if errors.Is(p.err, io.EOF) {
|
||||
p.err = nil
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -75,7 +75,12 @@ func (a *Alert) ResolvedAt(ts time.Time) bool {
|
|||
|
||||
// Status returns the status of the alert.
|
||||
func (a *Alert) Status() AlertStatus {
|
||||
if a.Resolved() {
|
||||
return a.StatusAt(time.Now())
|
||||
}
|
||||
|
||||
// StatusAt returns the status of the alert at the given timestamp.
|
||||
func (a *Alert) StatusAt(ts time.Time) AlertStatus {
|
||||
if a.ResolvedAt(ts) {
|
||||
return AlertResolved
|
||||
}
|
||||
return AlertFiring
|
||||
|
@ -90,13 +95,13 @@ func (a *Alert) Validate() error {
|
|||
return fmt.Errorf("start time must be before end time")
|
||||
}
|
||||
if err := a.Labels.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid label set: %s", err)
|
||||
return fmt.Errorf("invalid label set: %w", err)
|
||||
}
|
||||
if len(a.Labels) == 0 {
|
||||
return fmt.Errorf("at least one label pair required")
|
||||
}
|
||||
if err := a.Annotations.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid annotations: %s", err)
|
||||
return fmt.Errorf("invalid annotations: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -127,6 +132,17 @@ func (as Alerts) HasFiring() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// HasFiringAt returns true iff one of the alerts is not resolved
|
||||
// at the time ts.
|
||||
func (as Alerts) HasFiringAt(ts time.Time) bool {
|
||||
for _, a := range as {
|
||||
if !a.ResolvedAt(ts) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Status returns StatusFiring iff at least one of the alerts is firing.
|
||||
func (as Alerts) Status() AlertStatus {
|
||||
if as.HasFiring() {
|
||||
|
@ -134,3 +150,12 @@ func (as Alerts) Status() AlertStatus {
|
|||
}
|
||||
return AlertResolved
|
||||
}
|
||||
|
||||
// StatusAt returns StatusFiring iff at least one of the alerts is firing
|
||||
// at the time ts.
|
||||
func (as Alerts) StatusAt(ts time.Time) AlertStatus {
|
||||
if as.HasFiringAt(ts) {
|
||||
return AlertFiring
|
||||
}
|
||||
return AlertResolved
|
||||
}
|
||||
|
|
|
@ -97,18 +97,26 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
|
|||
// therewith.
|
||||
type LabelName string
|
||||
|
||||
// IsValid is true iff the label name matches the pattern of LabelNameRE. This
|
||||
// method, however, does not use LabelNameRE for the check but a much faster
|
||||
// hardcoded implementation.
|
||||
// IsValid returns true iff name matches the pattern of LabelNameRE for legacy
|
||||
// names, and iff it's valid UTF-8 if NameValidationScheme is set to
|
||||
// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the
|
||||
// check but a much faster hardcoded implementation.
|
||||
func (ln LabelName) IsValid() bool {
|
||||
if len(ln) == 0 {
|
||||
return false
|
||||
}
|
||||
switch NameValidationScheme {
|
||||
case LegacyValidation:
|
||||
for i, b := range ln {
|
||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
case UTF8Validation:
|
||||
return utf8.ValidString(string(ln))
|
||||
default:
|
||||
panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -164,7 +172,7 @@ func (l LabelNames) String() string {
|
|||
// A LabelValue is an associated value for a LabelName.
|
||||
type LabelValue string
|
||||
|
||||
// IsValid returns true iff the string is a valid UTF8.
|
||||
// IsValid returns true iff the string is a valid UTF-8.
|
||||
func (lv LabelValue) IsValid() bool {
|
||||
return utf8.ValidString(string(lv))
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
|
||||
|
@ -129,16 +128,6 @@ func (l LabelSet) Merge(other LabelSet) LabelSet {
|
|||
return result
|
||||
}
|
||||
|
||||
func (l LabelSet) String() string {
|
||||
lstrs := make([]string, 0, len(l))
|
||||
for l, v := range l {
|
||||
lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
|
||||
}
|
||||
|
||||
sort.Strings(lstrs)
|
||||
return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
|
||||
}
|
||||
|
||||
// Fingerprint returns the LabelSet's fingerprint.
|
||||
func (ls LabelSet) Fingerprint() Fingerprint {
|
||||
return labelSetToFingerprint(ls)
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.21
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"slices"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically.
|
||||
func (l LabelSet) String() string {
|
||||
var lna [32]string // On stack to avoid memory allocation for sorting names.
|
||||
labelNames := lna[:0]
|
||||
for name := range l {
|
||||
labelNames = append(labelNames, string(name))
|
||||
}
|
||||
slices.Sort(labelNames)
|
||||
var bytea [1024]byte // On stack to avoid memory allocation while building the output.
|
||||
b := bytes.NewBuffer(bytea[:0])
|
||||
b.WriteByte('{')
|
||||
for i, name := range labelNames {
|
||||
if i > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
b.WriteString(name)
|
||||
b.WriteByte('=')
|
||||
b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[LabelName(name)])))
|
||||
}
|
||||
b.WriteByte('}')
|
||||
return b.String()
|
||||
}
|
39
vendor/github.com/prometheus/common/model/labelset_string_go120.go
generated
vendored
Normal file
39
vendor/github.com/prometheus/common/model/labelset_string_go120.go
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !go1.21
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// String was optimized using functions not available for go 1.20
|
||||
// or lower. We keep the old implementation for compatibility with client_golang.
|
||||
// Once client golang drops support for go 1.20 (scheduled for August 2024), this
|
||||
// file can be removed.
|
||||
func (l LabelSet) String() string {
|
||||
labelNames := make([]string, 0, len(l))
|
||||
for name := range l {
|
||||
labelNames = append(labelNames, string(name))
|
||||
}
|
||||
sort.Strings(labelNames)
|
||||
lstrs := make([]string, 0, len(l))
|
||||
for _, name := range labelNames {
|
||||
lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)]))
|
||||
}
|
||||
return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
// MetricType represents metric type values.
|
||||
type MetricType string
|
||||
|
||||
const (
|
||||
MetricTypeCounter = MetricType("counter")
|
||||
MetricTypeGauge = MetricType("gauge")
|
||||
MetricTypeHistogram = MetricType("histogram")
|
||||
MetricTypeGaugeHistogram = MetricType("gaugehistogram")
|
||||
MetricTypeSummary = MetricType("summary")
|
||||
MetricTypeInfo = MetricType("info")
|
||||
MetricTypeStateset = MetricType("stateset")
|
||||
MetricTypeUnknown = MetricType("unknown")
|
||||
)
|
|
@ -18,14 +18,83 @@ import (
|
|||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var (
|
||||
// NameValidationScheme determines the method of name validation to be used by
|
||||
// all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode
|
||||
// in isolation from other components that don't support UTF-8 may result in
|
||||
// bugs or other undefined behavior. This value is intended to be set by
|
||||
// UTF-8-aware binaries as part of their startup. To avoid need for locking,
|
||||
// this value should be set once, ideally in an init(), before multiple
|
||||
// goroutines are started.
|
||||
NameValidationScheme = LegacyValidation
|
||||
|
||||
// NameEscapingScheme defines the default way that names will be
|
||||
// escaped when presented to systems that do not support UTF-8 names. If the
|
||||
// Content-Type "escaping" term is specified, that will override this value.
|
||||
NameEscapingScheme = ValueEncodingEscaping
|
||||
)
|
||||
|
||||
// ValidationScheme is a Go enum for determining how metric and label names will
|
||||
// be validated by this library.
|
||||
type ValidationScheme int
|
||||
|
||||
const (
|
||||
// LegacyValidation is a setting that requirets that metric and label names
|
||||
// conform to the original Prometheus character requirements described by
|
||||
// MetricNameRE and LabelNameRE.
|
||||
LegacyValidation ValidationScheme = iota
|
||||
|
||||
// UTF8Validation only requires that metric and label names be valid UTF-8
|
||||
// strings.
|
||||
UTF8Validation
|
||||
)
|
||||
|
||||
type EscapingScheme int
|
||||
|
||||
const (
|
||||
// NoEscaping indicates that a name will not be escaped. Unescaped names that
|
||||
// do not conform to the legacy validity check will use a new exposition
|
||||
// format syntax that will be officially standardized in future versions.
|
||||
NoEscaping EscapingScheme = iota
|
||||
|
||||
// UnderscoreEscaping replaces all legacy-invalid characters with underscores.
|
||||
UnderscoreEscaping
|
||||
|
||||
// DotsEscaping is similar to UnderscoreEscaping, except that dots are
|
||||
// converted to `_dot_` and pre-existing underscores are converted to `__`.
|
||||
DotsEscaping
|
||||
|
||||
// ValueEncodingEscaping prepends the name with `U__` and replaces all invalid
|
||||
// characters with the unicode value, surrounded by underscores. Single
|
||||
// underscores are replaced with double underscores.
|
||||
ValueEncodingEscaping
|
||||
)
|
||||
|
||||
const (
|
||||
// EscapingKey is the key in an Accept or Content-Type header that defines how
|
||||
// metric and label names that do not conform to the legacy character
|
||||
// requirements should be escaped when being scraped by a legacy prometheus
|
||||
// system. If a system does not explicitly pass an escaping parameter in the
|
||||
// Accept header, the default NameEscapingScheme will be used.
|
||||
EscapingKey = "escaping"
|
||||
|
||||
// Possible values for Escaping Key:
|
||||
AllowUTF8 = "allow-utf-8" // No escaping required.
|
||||
EscapeUnderscores = "underscores"
|
||||
EscapeDots = "dots"
|
||||
EscapeValues = "values"
|
||||
)
|
||||
|
||||
// MetricNameRE is a regular expression matching valid metric
|
||||
// names. Note that the IsValidMetricName function performs the same
|
||||
// check but faster than a match with this regular expression.
|
||||
MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
|
||||
)
|
||||
var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
|
||||
|
||||
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
|
||||
// a singleton and refers to one and only one stream of samples.
|
||||
|
@ -86,17 +155,303 @@ func (m Metric) FastFingerprint() Fingerprint {
|
|||
return LabelSet(m).FastFingerprint()
|
||||
}
|
||||
|
||||
// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
|
||||
// IsValidMetricName returns true iff name matches the pattern of MetricNameRE
|
||||
// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is
|
||||
// selected.
|
||||
func IsValidMetricName(n LabelValue) bool {
|
||||
switch NameValidationScheme {
|
||||
case LegacyValidation:
|
||||
return IsValidLegacyMetricName(n)
|
||||
case UTF8Validation:
|
||||
if len(n) == 0 {
|
||||
return false
|
||||
}
|
||||
return utf8.ValidString(string(n))
|
||||
default:
|
||||
panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
|
||||
}
|
||||
}
|
||||
|
||||
// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the
|
||||
// legacy validation scheme regardless of the value of NameValidationScheme.
|
||||
// This function, however, does not use MetricNameRE for the check but a much
|
||||
// faster hardcoded implementation.
|
||||
func IsValidMetricName(n LabelValue) bool {
|
||||
func IsValidLegacyMetricName(n LabelValue) bool {
|
||||
if len(n) == 0 {
|
||||
return false
|
||||
}
|
||||
for i, b := range n {
|
||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
|
||||
if !isValidLegacyRune(b, i) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// EscapeMetricFamily escapes the given metric names and labels with the given
|
||||
// escaping scheme. Returns a new object that uses the same pointers to fields
|
||||
// when possible and creates new escaped versions so as not to mutate the
|
||||
// input.
|
||||
func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if scheme == NoEscaping {
|
||||
return v
|
||||
}
|
||||
|
||||
out := &dto.MetricFamily{
|
||||
Help: v.Help,
|
||||
Type: v.Type,
|
||||
Unit: v.Unit,
|
||||
}
|
||||
|
||||
// If the name is nil, copy as-is, don't try to escape.
|
||||
if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) {
|
||||
out.Name = v.Name
|
||||
} else {
|
||||
out.Name = proto.String(EscapeName(v.GetName(), scheme))
|
||||
}
|
||||
for _, m := range v.Metric {
|
||||
if !metricNeedsEscaping(m) {
|
||||
out.Metric = append(out.Metric, m)
|
||||
continue
|
||||
}
|
||||
|
||||
escaped := &dto.Metric{
|
||||
Gauge: m.Gauge,
|
||||
Counter: m.Counter,
|
||||
Summary: m.Summary,
|
||||
Untyped: m.Untyped,
|
||||
Histogram: m.Histogram,
|
||||
TimestampMs: m.TimestampMs,
|
||||
}
|
||||
|
||||
for _, l := range m.Label {
|
||||
if l.GetName() == MetricNameLabel {
|
||||
if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) {
|
||||
escaped.Label = append(escaped.Label, l)
|
||||
continue
|
||||
}
|
||||
escaped.Label = append(escaped.Label, &dto.LabelPair{
|
||||
Name: proto.String(MetricNameLabel),
|
||||
Value: proto.String(EscapeName(l.GetValue(), scheme)),
|
||||
})
|
||||
continue
|
||||
}
|
||||
if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) {
|
||||
escaped.Label = append(escaped.Label, l)
|
||||
continue
|
||||
}
|
||||
escaped.Label = append(escaped.Label, &dto.LabelPair{
|
||||
Name: proto.String(EscapeName(l.GetName(), scheme)),
|
||||
Value: l.Value,
|
||||
})
|
||||
}
|
||||
out.Metric = append(out.Metric, escaped)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func metricNeedsEscaping(m *dto.Metric) bool {
|
||||
for _, l := range m.Label {
|
||||
if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) {
|
||||
return true
|
||||
}
|
||||
if !IsValidLegacyMetricName(LabelValue(l.GetName())) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
const (
|
||||
lowerhex = "0123456789abcdef"
|
||||
)
|
||||
|
||||
// EscapeName escapes the incoming name according to the provided escaping
|
||||
// scheme. Depending on the rules of escaping, this may cause no change in the
|
||||
// string that is returned. (Especially NoEscaping, which by definition is a
|
||||
// noop). This function does not do any validation of the name.
|
||||
func EscapeName(name string, scheme EscapingScheme) string {
|
||||
if len(name) == 0 {
|
||||
return name
|
||||
}
|
||||
var escaped strings.Builder
|
||||
switch scheme {
|
||||
case NoEscaping:
|
||||
return name
|
||||
case UnderscoreEscaping:
|
||||
if IsValidLegacyMetricName(LabelValue(name)) {
|
||||
return name
|
||||
}
|
||||
for i, b := range name {
|
||||
if isValidLegacyRune(b, i) {
|
||||
escaped.WriteRune(b)
|
||||
} else {
|
||||
escaped.WriteRune('_')
|
||||
}
|
||||
}
|
||||
return escaped.String()
|
||||
case DotsEscaping:
|
||||
// Do not early return for legacy valid names, we still escape underscores.
|
||||
for i, b := range name {
|
||||
if b == '_' {
|
||||
escaped.WriteString("__")
|
||||
} else if b == '.' {
|
||||
escaped.WriteString("_dot_")
|
||||
} else if isValidLegacyRune(b, i) {
|
||||
escaped.WriteRune(b)
|
||||
} else {
|
||||
escaped.WriteRune('_')
|
||||
}
|
||||
}
|
||||
return escaped.String()
|
||||
case ValueEncodingEscaping:
|
||||
if IsValidLegacyMetricName(LabelValue(name)) {
|
||||
return name
|
||||
}
|
||||
escaped.WriteString("U__")
|
||||
for i, b := range name {
|
||||
if isValidLegacyRune(b, i) {
|
||||
escaped.WriteRune(b)
|
||||
} else if !utf8.ValidRune(b) {
|
||||
escaped.WriteString("_FFFD_")
|
||||
} else if b < 0x100 {
|
||||
escaped.WriteRune('_')
|
||||
for s := 4; s >= 0; s -= 4 {
|
||||
escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
|
||||
}
|
||||
escaped.WriteRune('_')
|
||||
} else if b < 0x10000 {
|
||||
escaped.WriteRune('_')
|
||||
for s := 12; s >= 0; s -= 4 {
|
||||
escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
|
||||
}
|
||||
escaped.WriteRune('_')
|
||||
}
|
||||
}
|
||||
return escaped.String()
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
|
||||
}
|
||||
}
|
||||
|
||||
// lower function taken from strconv.atoi
|
||||
func lower(c byte) byte {
|
||||
return c | ('x' - 'X')
|
||||
}
|
||||
|
||||
// UnescapeName unescapes the incoming name according to the provided escaping
|
||||
// scheme if possible. Some schemes are partially or totally non-roundtripable.
|
||||
// If any error is enountered, returns the original input.
|
||||
func UnescapeName(name string, scheme EscapingScheme) string {
|
||||
if len(name) == 0 {
|
||||
return name
|
||||
}
|
||||
switch scheme {
|
||||
case NoEscaping:
|
||||
return name
|
||||
case UnderscoreEscaping:
|
||||
// It is not possible to unescape from underscore replacement.
|
||||
return name
|
||||
case DotsEscaping:
|
||||
name = strings.ReplaceAll(name, "_dot_", ".")
|
||||
name = strings.ReplaceAll(name, "__", "_")
|
||||
return name
|
||||
case ValueEncodingEscaping:
|
||||
escapedName, found := strings.CutPrefix(name, "U__")
|
||||
if !found {
|
||||
return name
|
||||
}
|
||||
|
||||
var unescaped strings.Builder
|
||||
TOP:
|
||||
for i := 0; i < len(escapedName); i++ {
|
||||
// All non-underscores are treated normally.
|
||||
if escapedName[i] != '_' {
|
||||
unescaped.WriteByte(escapedName[i])
|
||||
continue
|
||||
}
|
||||
i++
|
||||
if i >= len(escapedName) {
|
||||
return name
|
||||
}
|
||||
// A double underscore is a single underscore.
|
||||
if escapedName[i] == '_' {
|
||||
unescaped.WriteByte('_')
|
||||
continue
|
||||
}
|
||||
// We think we are in a UTF-8 code, process it.
|
||||
var utf8Val uint
|
||||
for j := 0; i < len(escapedName); j++ {
|
||||
// This is too many characters for a utf8 value.
|
||||
if j > 4 {
|
||||
return name
|
||||
}
|
||||
// Found a closing underscore, convert to a rune, check validity, and append.
|
||||
if escapedName[i] == '_' {
|
||||
utf8Rune := rune(utf8Val)
|
||||
if !utf8.ValidRune(utf8Rune) {
|
||||
return name
|
||||
}
|
||||
unescaped.WriteRune(utf8Rune)
|
||||
continue TOP
|
||||
}
|
||||
r := lower(escapedName[i])
|
||||
utf8Val *= 16
|
||||
if r >= '0' && r <= '9' {
|
||||
utf8Val += uint(r) - '0'
|
||||
} else if r >= 'a' && r <= 'f' {
|
||||
utf8Val += uint(r) - 'a' + 10
|
||||
} else {
|
||||
return name
|
||||
}
|
||||
i++
|
||||
}
|
||||
// Didn't find closing underscore, invalid.
|
||||
return name
|
||||
}
|
||||
return unescaped.String()
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
|
||||
}
|
||||
}
|
||||
|
||||
func isValidLegacyRune(b rune, i int) bool {
|
||||
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)
|
||||
}
|
||||
|
||||
func (e EscapingScheme) String() string {
|
||||
switch e {
|
||||
case NoEscaping:
|
||||
return AllowUTF8
|
||||
case UnderscoreEscaping:
|
||||
return EscapeUnderscores
|
||||
case DotsEscaping:
|
||||
return EscapeDots
|
||||
case ValueEncodingEscaping:
|
||||
return EscapeValues
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown format scheme %d", e))
|
||||
}
|
||||
}
|
||||
|
||||
func ToEscapingScheme(s string) (EscapingScheme, error) {
|
||||
if s == "" {
|
||||
return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme")
|
||||
}
|
||||
switch s {
|
||||
case AllowUTF8:
|
||||
return NoEscaping, nil
|
||||
case EscapeUnderscores:
|
||||
return UnderscoreEscaping, nil
|
||||
case EscapeDots:
|
||||
return DotsEscaping, nil
|
||||
case EscapeValues:
|
||||
return ValueEncodingEscaping, nil
|
||||
default:
|
||||
return NoEscaping, fmt.Errorf("unknown format scheme " + s)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,10 +22,8 @@ import (
|
|||
// when calculating their combined hash value (aka signature aka fingerprint).
|
||||
const SeparatorByte byte = 255
|
||||
|
||||
var (
|
||||
// cache the signature of an empty label set.
|
||||
emptyLabelSignature = hashNew()
|
||||
)
|
||||
var emptyLabelSignature = hashNew()
|
||||
|
||||
// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
|
||||
// given label set. (Collisions are possible but unlikely if the number of label
|
||||
|
|
|
@ -81,7 +81,7 @@ func (s *Silence) Validate() error {
|
|||
}
|
||||
for _, m := range s.Matchers {
|
||||
if err := m.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid matcher: %s", err)
|
||||
return fmt.Errorf("invalid matcher: %w", err)
|
||||
}
|
||||
}
|
||||
if s.StartsAt.IsZero() {
|
||||
|
|
|
@ -21,14 +21,12 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// ZeroSample is the pseudo zero-value of Sample used to signal a
|
||||
// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
|
||||
// and metric nil. Note that the natural zero value of Sample has a timestamp
|
||||
// of 0, which is possible to appear in a real Sample and thus not suitable
|
||||
// to signal a non-existing Sample.
|
||||
ZeroSample = Sample{Timestamp: Earliest}
|
||||
)
|
||||
var ZeroSample = Sample{Timestamp: Earliest}
|
||||
|
||||
// Sample is a sample pair associated with a metric. A single sample must either
|
||||
// define Value or Histogram but not both. Histogram == nil implies the Value
|
||||
|
@ -274,7 +272,7 @@ func (s *Scalar) UnmarshalJSON(b []byte) error {
|
|||
|
||||
value, err := strconv.ParseFloat(f, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing sample value: %s", err)
|
||||
return fmt.Errorf("error parsing sample value: %w", err)
|
||||
}
|
||||
s.Value = SampleValue(value)
|
||||
return nil
|
||||
|
|
|
@ -20,14 +20,12 @@ import (
|
|||
"strconv"
|
||||
)
|
||||
|
||||
var (
|
||||
// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
|
||||
// non-existing sample pair. It is a SamplePair with timestamp Earliest and
|
||||
// value 0.0. Note that the natural zero value of SamplePair has a timestamp
|
||||
// of 0, which is possible to appear in a real SamplePair and thus not
|
||||
// suitable to signal a non-existing SamplePair.
|
||||
ZeroSamplePair = SamplePair{Timestamp: Earliest}
|
||||
)
|
||||
var ZeroSamplePair = SamplePair{Timestamp: Earliest}
|
||||
|
||||
// A SampleValue is a representation of a value for a given sample at a given
|
||||
// time.
|
||||
|
|
160
vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go
generated
vendored
Normal file
160
vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go
generated
vendored
Normal file
|
@ -0,0 +1,160 @@
|
|||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package protodelim marshals and unmarshals varint size-delimited messages.
|
||||
package protodelim
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
"google.golang.org/protobuf/internal/errors"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// MarshalOptions is a configurable varint size-delimited marshaler.
|
||||
type MarshalOptions struct{ proto.MarshalOptions }
|
||||
|
||||
// MarshalTo writes a varint size-delimited wire-format message to w.
|
||||
// If w returns an error, MarshalTo returns it unchanged.
|
||||
func (o MarshalOptions) MarshalTo(w io.Writer, m proto.Message) (int, error) {
|
||||
msgBytes, err := o.MarshalOptions.Marshal(m)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
sizeBytes := protowire.AppendVarint(nil, uint64(len(msgBytes)))
|
||||
sizeWritten, err := w.Write(sizeBytes)
|
||||
if err != nil {
|
||||
return sizeWritten, err
|
||||
}
|
||||
msgWritten, err := w.Write(msgBytes)
|
||||
if err != nil {
|
||||
return sizeWritten + msgWritten, err
|
||||
}
|
||||
return sizeWritten + msgWritten, nil
|
||||
}
|
||||
|
||||
// MarshalTo writes a varint size-delimited wire-format message to w
|
||||
// with the default options.
|
||||
//
|
||||
// See the documentation for [MarshalOptions.MarshalTo].
|
||||
func MarshalTo(w io.Writer, m proto.Message) (int, error) {
|
||||
return MarshalOptions{}.MarshalTo(w, m)
|
||||
}
|
||||
|
||||
// UnmarshalOptions is a configurable varint size-delimited unmarshaler.
|
||||
type UnmarshalOptions struct {
|
||||
proto.UnmarshalOptions
|
||||
|
||||
// MaxSize is the maximum size in wire-format bytes of a single message.
|
||||
// Unmarshaling a message larger than MaxSize will return an error.
|
||||
// A zero MaxSize will default to 4 MiB.
|
||||
// Setting MaxSize to -1 disables the limit.
|
||||
MaxSize int64
|
||||
}
|
||||
|
||||
const defaultMaxSize = 4 << 20 // 4 MiB, corresponds to the default gRPC max request/response size
|
||||
|
||||
// SizeTooLargeError is an error that is returned when the unmarshaler encounters a message size
|
||||
// that is larger than its configured [UnmarshalOptions.MaxSize].
|
||||
type SizeTooLargeError struct {
|
||||
// Size is the varint size of the message encountered
|
||||
// that was larger than the provided MaxSize.
|
||||
Size uint64
|
||||
|
||||
// MaxSize is the MaxSize limit configured in UnmarshalOptions, which Size exceeded.
|
||||
MaxSize uint64
|
||||
}
|
||||
|
||||
func (e *SizeTooLargeError) Error() string {
|
||||
return fmt.Sprintf("message size %d exceeded unmarshaler's maximum configured size %d", e.Size, e.MaxSize)
|
||||
}
|
||||
|
||||
// Reader is the interface expected by [UnmarshalFrom].
|
||||
// It is implemented by *[bufio.Reader].
|
||||
type Reader interface {
|
||||
io.Reader
|
||||
io.ByteReader
|
||||
}
|
||||
|
||||
// UnmarshalFrom parses and consumes a varint size-delimited wire-format message
|
||||
// from r.
|
||||
// The provided message must be mutable (e.g., a non-nil pointer to a message).
|
||||
//
|
||||
// The error is [io.EOF] error only if no bytes are read.
|
||||
// If an EOF happens after reading some but not all the bytes,
|
||||
// UnmarshalFrom returns a non-io.EOF error.
|
||||
// In particular if r returns a non-io.EOF error, UnmarshalFrom returns it unchanged,
|
||||
// and if only a size is read with no subsequent message, [io.ErrUnexpectedEOF] is returned.
|
||||
func (o UnmarshalOptions) UnmarshalFrom(r Reader, m proto.Message) error {
|
||||
var sizeArr [binary.MaxVarintLen64]byte
|
||||
sizeBuf := sizeArr[:0]
|
||||
for i := range sizeArr {
|
||||
b, err := r.ReadByte()
|
||||
if err != nil {
|
||||
// Immediate EOF is unexpected.
|
||||
if err == io.EOF && i != 0 {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
sizeBuf = append(sizeBuf, b)
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
size, n := protowire.ConsumeVarint(sizeBuf)
|
||||
if n < 0 {
|
||||
return protowire.ParseError(n)
|
||||
}
|
||||
|
||||
maxSize := o.MaxSize
|
||||
if maxSize == 0 {
|
||||
maxSize = defaultMaxSize
|
||||
}
|
||||
if maxSize != -1 && size > uint64(maxSize) {
|
||||
return errors.Wrap(&SizeTooLargeError{Size: size, MaxSize: uint64(maxSize)}, "")
|
||||
}
|
||||
|
||||
var b []byte
|
||||
var err error
|
||||
if br, ok := r.(*bufio.Reader); ok {
|
||||
// Use the []byte from the bufio.Reader instead of having to allocate one.
|
||||
// This reduces CPU usage and allocated bytes.
|
||||
b, err = br.Peek(int(size))
|
||||
if err == nil {
|
||||
defer br.Discard(int(size))
|
||||
} else {
|
||||
b = nil
|
||||
}
|
||||
}
|
||||
if b == nil {
|
||||
b = make([]byte, size)
|
||||
_, err = io.ReadFull(r, b)
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := o.Unmarshal(b, m); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalFrom parses and consumes a varint size-delimited wire-format message
|
||||
// from r with the default options.
|
||||
// The provided message must be mutable (e.g., a non-nil pointer to a message).
|
||||
//
|
||||
// See the documentation for [UnmarshalOptions.UnmarshalFrom].
|
||||
func UnmarshalFrom(r Reader, m proto.Message) error {
|
||||
return UnmarshalOptions{}.UnmarshalFrom(r, m)
|
||||
}
|
|
@ -184,9 +184,6 @@ github.com/klauspost/compress/zstd/internal/xxhash
|
|||
# github.com/mattn/go-runewidth v0.0.15
|
||||
## explicit; go 1.9
|
||||
github.com/mattn/go-runewidth
|
||||
# github.com/matttproud/golang_protobuf_extensions v1.0.4
|
||||
## explicit; go 1.9
|
||||
github.com/matttproud/golang_protobuf_extensions/pbutil
|
||||
# github.com/miekg/pkcs11 v1.1.1
|
||||
## explicit; go 1.12
|
||||
github.com/miekg/pkcs11
|
||||
|
@ -230,6 +227,9 @@ github.com/moby/term/windows
|
|||
# github.com/morikuni/aec v1.0.0
|
||||
## explicit
|
||||
github.com/morikuni/aec
|
||||
# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
|
||||
## explicit
|
||||
github.com/munnerz/goautoneg
|
||||
# github.com/opencontainers/go-digest v1.0.0
|
||||
## explicit; go 1.13
|
||||
github.com/opencontainers/go-digest
|
||||
|
@ -243,18 +243,19 @@ github.com/pkg/browser
|
|||
# github.com/pkg/errors v0.9.1
|
||||
## explicit
|
||||
github.com/pkg/errors
|
||||
# github.com/prometheus/client_golang v1.17.0
|
||||
## explicit; go 1.19
|
||||
# github.com/prometheus/client_golang v1.20.5
|
||||
## explicit; go 1.20
|
||||
github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil
|
||||
github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header
|
||||
github.com/prometheus/client_golang/prometheus
|
||||
github.com/prometheus/client_golang/prometheus/internal
|
||||
github.com/prometheus/client_golang/prometheus/promhttp
|
||||
# github.com/prometheus/client_model v0.5.0
|
||||
# github.com/prometheus/client_model v0.6.1
|
||||
## explicit; go 1.19
|
||||
github.com/prometheus/client_model/go
|
||||
# github.com/prometheus/common v0.44.0
|
||||
## explicit; go 1.18
|
||||
# github.com/prometheus/common v0.55.0
|
||||
## explicit; go 1.20
|
||||
github.com/prometheus/common/expfmt
|
||||
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
|
||||
github.com/prometheus/common/model
|
||||
# github.com/prometheus/procfs v0.15.1
|
||||
## explicit; go 1.20
|
||||
|
@ -273,8 +274,6 @@ github.com/spf13/cobra
|
|||
# github.com/spf13/pflag v1.0.5
|
||||
## explicit; go 1.12
|
||||
github.com/spf13/pflag
|
||||
# github.com/stretchr/testify v1.9.0
|
||||
## explicit; go 1.17
|
||||
# github.com/theupdateframework/notary v0.7.1-0.20210315103452-bf96a202a09a
|
||||
## explicit; go 1.12
|
||||
github.com/theupdateframework/notary
|
||||
|
@ -478,6 +477,7 @@ google.golang.org/grpc/status
|
|||
google.golang.org/grpc/tap
|
||||
# google.golang.org/protobuf v1.35.1
|
||||
## explicit; go 1.21
|
||||
google.golang.org/protobuf/encoding/protodelim
|
||||
google.golang.org/protobuf/encoding/protojson
|
||||
google.golang.org/protobuf/encoding/prototext
|
||||
google.golang.org/protobuf/encoding/protowire
|
||||
|
|
Loading…
Reference in New Issue