mirror of https://github.com/docker/cli.git
Merge pull request #2711 from thaJeztah/bump_engine
vendor: bump docker/docker, docker/swarmkit, and dependencies
This commit is contained in:
commit
60abe967b5
31
vendor.conf
31
vendor.conf
|
@ -2,36 +2,37 @@ cloud.google.com/go 0ebda48a7f143b1cce9eb37a8c11
|
|||
github.com/agl/ed25519 5312a61534124124185d41f09206b9fef1d88403
|
||||
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
|
||||
github.com/beorn7/perks 37c8de3658fcb183f997c4e13e8337516ab753e6 # v1.0.1
|
||||
github.com/cespare/xxhash/v2 d7df74196a9e781ede915320c11c378c1b2f3a1f # v2.1.1
|
||||
github.com/containerd/console 8375c3424e4d7b114e8a90a4a40c8e1b40d1d4e6 # v1.0.0
|
||||
github.com/containerd/containerd c80284d4b5291a351bb471bcdabb5c1d95e7a583 # master / v1.4.0-dev
|
||||
github.com/containerd/containerd e9f94064b9616ab36a8a51d632a63f97f7783c3d # v1.4.0-rc.1
|
||||
github.com/containerd/continuity efbc4488d8fe1bdc16bde3b2d2990d9b3a899165
|
||||
github.com/containerd/cgroups 44306b6a1d46985d916b48b4199f93a378af314f
|
||||
github.com/containerd/cgroups 318312a373405e5e91134d8063d04d59768a1bff
|
||||
github.com/coreos/etcd d57e8b8d97adfc4a6c224fe116714bf1a1f3beb9 # v3.3.12
|
||||
github.com/cpuguy83/go-md2man/v2 f79a8a8ca69da163eee19ab442bedad7a35bba5a # v2.0.0
|
||||
github.com/creack/pty 3a6a957789163cacdfe0e291617a1c8e80612c11 # v1.1.9
|
||||
github.com/davecgh/go-spew 8991bc29aa16c548c550c7ff78260e27b9ab7c73 # v1.1.1
|
||||
github.com/docker/compose-on-kubernetes 78e6a00beda64ac8ccb9fec787e601fe2ce0d5bb # v0.5.0-alpha1
|
||||
github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580
|
||||
github.com/docker/docker 0f41a77c6993ade605a639fb25994cfe5e1b3fe8
|
||||
github.com/docker/docker f50a40e889fdaeebf14fce1d494f95e60092d21d
|
||||
github.com/docker/docker-credential-helpers 54f0238b6bf101fc3ad3b34114cb5520beb562f5 # v0.6.3
|
||||
github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06 # Contains a customized version of canonical/json and is used by Notary. The package is periodically rebased on current Go versions.
|
||||
github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55 # v0.4.0
|
||||
github.com/docker/go-events e31b211e4f1cd09aa76fe4ac244571fab96ae47f
|
||||
github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18
|
||||
github.com/docker/go-metrics b619b3592b65de4f087d9f16863a7e6ff905973c # v0.0.1
|
||||
github.com/docker/go-units 519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0
|
||||
github.com/docker/swarmkit 035d564a3686f5e348d861ec0c074ff26854c498
|
||||
github.com/docker/swarmkit d6592ddefd8a5319aadff74c558b816b1a0b2590
|
||||
github.com/evanphx/json-patch 72bf35d0ff611848c1dc9df0f976c81192392fa5 # v4.1.0
|
||||
github.com/fvbommel/sortorder a1ddee917217bbd0691affdca9c88d24d3f5c27d # v1.0.1
|
||||
github.com/gofrs/flock 392e7fae8f1b0bdbd67dad7237d23f618feb6dbb # v0.7.1
|
||||
github.com/gogo/googleapis 01e0f9cca9b92166042241267ee2a5cdf5cff46c # v1.3.2
|
||||
github.com/gogo/protobuf 5628607bb4c51c3157aacc3a50f0ab707582b805 # v1.3.1
|
||||
github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998
|
||||
github.com/golang/protobuf d23c5127dc24889085f8ccea5c9d560a57a879d8 # v1.3.3
|
||||
github.com/golang/protobuf 84668698ea25b64748563aa20726db66a6b8d299 # v1.3.5
|
||||
github.com/google/go-cmp 3af367b6b30c263d47e8895973edcca9a49cf029 # v0.2.0
|
||||
github.com/google/gofuzz 24818f796faf91cd76ec7bddd72458fbced7a6c1
|
||||
github.com/google/shlex e7afc7fbc51079733e9468cdfd1efcd7d196cd1d
|
||||
github.com/googleapis/gnostic 7c663266750e7d82587642f65e60bc4083f1f84e # v0.2.0
|
||||
github.com/gorilla/mux 75dcda0896e109a2a22c9315bca3bb21b87b2ba5 # v1.7.4
|
||||
github.com/gorilla/mux 98cb6bf42e086f6af920b965c38cacc07402d51b # v1.8.0
|
||||
github.com/grpc-ecosystem/go-grpc-middleware 3c51f7f332123e8be5a157c0802a228ac85bf9db # v1.2.0
|
||||
github.com/grpc-ecosystem/grpc-gateway 1a03ca3bad1e1ebadaedd3abb76bc58d4ac8143b
|
||||
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
|
||||
|
@ -54,13 +55,13 @@ github.com/modern-go/reflect2 4b7aa43c6742a2c18fdef89dd197
|
|||
github.com/morikuni/aec 39771216ff4c63d11f5e604076f9c45e8be1067b # v1.0.0
|
||||
github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf # v1.0.0-rc1
|
||||
github.com/opencontainers/image-spec d60099175f88c47cd379c4738d158884749ed235 # v1.0.1
|
||||
github.com/opencontainers/runc dc9208a3303feef5b3839f4323d9beb36df0a9dd # v1.0.0-rc10
|
||||
github.com/opencontainers/runc ff819c7e9184c13b7c2607fe6c30ae19403a7aff # v1.0.0-rc92
|
||||
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
|
||||
github.com/pkg/errors 614d223910a179a466c1767a985424175c39b465 # v0.9.1
|
||||
github.com/prometheus/client_golang c5b7fccd204277076155f10851dad72b76a49317 # v0.8.0
|
||||
github.com/prometheus/client_model 6f3806018612930941127f2a7c6c453ba2c527d2
|
||||
github.com/prometheus/common 7600349dcfe1abd18d72d3a1770870d9800a7801
|
||||
github.com/prometheus/procfs 7d6f385de8bea29190f15ba9931442a0eaef9af7
|
||||
github.com/prometheus/client_golang 6edbbd9e560190e318cdc5b4d3e630b442858380 # v1.6.0
|
||||
github.com/prometheus/client_model 7bc5445566f0fe75b15de23e6b93886e982d7bf9 # v0.2.0
|
||||
github.com/prometheus/common d978bcb1309602d68bb4ba69cf3f8ed900e07308 # v0.9.1
|
||||
github.com/prometheus/procfs 46159f73e74d1cb8dc223deef9b2d049286f46b1 # v0.0.11
|
||||
github.com/russross/blackfriday/v2 d3b5b032dc8e8927d31a5071b56e14c89f045135 # v2.0.1
|
||||
github.com/shurcooL/sanitized_anchor_name 7bfe4c7ecddb3666a94b053b422cdd8f5aaa3615 # v1.0.0
|
||||
github.com/sirupsen/logrus 60c74ad9be0d874af0ab0daef6ab07c5c5911f0d # v1.6.0
|
||||
|
@ -73,11 +74,11 @@ github.com/tonistiigi/units 6950e57a87eaf136bbe44ef2ec8e
|
|||
github.com/xeipuuv/gojsonpointer 02993c407bfbf5f6dae44c4f4b1cf6a39b5fc5bb
|
||||
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
|
||||
github.com/xeipuuv/gojsonschema 82fcdeb203eb6ab2a67d0a623d9c19e5e5a64927 # v1.2.0
|
||||
golang.org/x/crypto 2aa609cf4a9d7d1126360de73b55b6002f9e052a
|
||||
golang.org/x/net 0de0cce0169b09b364e001f108dc0399ea8630b3
|
||||
golang.org/x/crypto 75b288015ac94e66e3d6715fb68a9b41bf046ec2
|
||||
golang.org/x/net ab34263943818b32f575efc978a3d24e80b04bd7
|
||||
golang.org/x/oauth2 bf48bf16ab8d622ce64ec6ce98d2c98f916b6303
|
||||
golang.org/x/sync cd5d95a43a6e21273425c7ae415d3df9ea832eeb
|
||||
golang.org/x/sys 85ca7c5b95cdf1e557abb38a283d1e61a5959c31
|
||||
golang.org/x/sys ed371f2e16b4b305ee99df548828de367527b76b
|
||||
golang.org/x/text 23ae387dee1f90d29a23c0e87ee0b46038fbed0e # v0.3.3
|
||||
golang.org/x/time 555d28b269f0569763d25dbe1a237ae74c6bcc82
|
||||
google.golang.org/genproto 3f1135a288c9a07e340ae8ba4cc6c7065a3160e8
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2016 Caleb Spare
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,67 @@
|
|||
# xxhash
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
|
||||
[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
This package provides a straightforward API:
|
||||
|
||||
```
|
||||
func Sum64(b []byte) uint64
|
||||
func Sum64String(s string) uint64
|
||||
type Digest struct{ ... }
|
||||
func New() *Digest
|
||||
```
|
||||
|
||||
The `Digest` type implements hash.Hash64. Its key methods are:
|
||||
|
||||
```
|
||||
func (*Digest) Write([]byte) (int, error)
|
||||
func (*Digest) WriteString(string) (int, error)
|
||||
func (*Digest) Sum64() uint64
|
||||
```
|
||||
|
||||
This implementation provides a fast pure-Go implementation and an even faster
|
||||
assembly implementation for amd64.
|
||||
|
||||
## Compatibility
|
||||
|
||||
This package is in a module and the latest code is in version 2 of the module.
|
||||
You need a version of Go with at least "minimal module compatibility" to use
|
||||
github.com/cespare/xxhash/v2:
|
||||
|
||||
* 1.9.7+ for Go 1.9
|
||||
* 1.10.3+ for Go 1.10
|
||||
* Go 1.11 or later
|
||||
|
||||
I recommend using the latest release of Go.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64.
|
||||
|
||||
| input size | purego | asm |
|
||||
| --- | --- | --- |
|
||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
||||
|
||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
||||
the following commands under Go 1.11.2:
|
||||
|
||||
```
|
||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
|
@ -0,0 +1,3 @@
|
|||
module github.com/cespare/xxhash/v2
|
||||
|
||||
go 1.11
|
|
@ -0,0 +1,236 @@
|
|||
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||
// at http://cyan4973.github.io/xxHash/.
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
const (
|
||||
prime1 uint64 = 11400714785074694791
|
||||
prime2 uint64 = 14029467366897019727
|
||||
prime3 uint64 = 1609587929392839161
|
||||
prime4 uint64 = 9650029242287828579
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||||
// possible in the Go code is worth a small (but measurable) performance boost
|
||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||||
// convenience in the Go code in a few places where we need to intentionally
|
||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||||
// result overflows a uint64).
|
||||
var (
|
||||
prime1v = prime1
|
||||
prime2v = prime2
|
||||
prime3v = prime3
|
||||
prime4v = prime4
|
||||
prime5v = prime5
|
||||
)
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
type Digest struct {
|
||||
v1 uint64
|
||||
v2 uint64
|
||||
v3 uint64
|
||||
v4 uint64
|
||||
total uint64
|
||||
mem [32]byte
|
||||
n int // how much of mem is used
|
||||
}
|
||||
|
||||
// New creates a new Digest that computes the 64-bit xxHash algorithm.
|
||||
func New() *Digest {
|
||||
var d Digest
|
||||
d.Reset()
|
||||
return &d
|
||||
}
|
||||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
func (d *Digest) Reset() {
|
||||
d.v1 = prime1v + prime2
|
||||
d.v2 = prime2
|
||||
d.v3 = 0
|
||||
d.v4 = -prime1v
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
// Size always returns 8 bytes.
|
||||
func (d *Digest) Size() int { return 8 }
|
||||
|
||||
// BlockSize always returns 32 bytes.
|
||||
func (d *Digest) BlockSize() int { return 32 }
|
||||
|
||||
// Write adds more data to d. It always returns len(b), nil.
|
||||
func (d *Digest) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
d.total += uint64(n)
|
||||
|
||||
if d.n+n < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(d.mem[d.n:], b)
|
||||
d.n += n
|
||||
return
|
||||
}
|
||||
|
||||
if d.n > 0 {
|
||||
// Finish off the partial block.
|
||||
copy(d.mem[d.n:], b)
|
||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||
b = b[32-d.n:]
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
if len(b) >= 32 {
|
||||
// One or more full blocks left.
|
||||
nw := writeBlocks(d, b)
|
||||
b = b[nw:]
|
||||
}
|
||||
|
||||
// Store any remaining partial block.
|
||||
copy(d.mem[:], b)
|
||||
d.n = len(b)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
func (d *Digest) Sum(b []byte) []byte {
|
||||
s := d.Sum64()
|
||||
return append(
|
||||
b,
|
||||
byte(s>>56),
|
||||
byte(s>>48),
|
||||
byte(s>>40),
|
||||
byte(s>>32),
|
||||
byte(s>>24),
|
||||
byte(s>>16),
|
||||
byte(s>>8),
|
||||
byte(s),
|
||||
)
|
||||
}
|
||||
|
||||
// Sum64 returns the current hash.
|
||||
func (d *Digest) Sum64() uint64 {
|
||||
var h uint64
|
||||
|
||||
if d.total >= 32 {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = d.v3 + prime5
|
||||
}
|
||||
|
||||
h += d.total
|
||||
|
||||
i, end := 0, d.n
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(d.mem[i:i+8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for i < end {
|
||||
h ^= uint64(d.mem[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
i++
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
const (
|
||||
magic = "xxh\x06"
|
||||
marshaledSize = len(magic) + 8*5 + 32
|
||||
)
|
||||
|
||||
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
||||
func (d *Digest) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, 0, marshaledSize)
|
||||
b = append(b, magic...)
|
||||
b = appendUint64(b, d.v1)
|
||||
b = appendUint64(b, d.v2)
|
||||
b = appendUint64(b, d.v3)
|
||||
b = appendUint64(b, d.v4)
|
||||
b = appendUint64(b, d.total)
|
||||
b = append(b, d.mem[:d.n]...)
|
||||
b = b[:len(b)+len(d.mem)-d.n]
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
||||
func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
|
||||
return errors.New("xxhash: invalid hash state identifier")
|
||||
}
|
||||
if len(b) != marshaledSize {
|
||||
return errors.New("xxhash: invalid hash state size")
|
||||
}
|
||||
b = b[len(magic):]
|
||||
b, d.v1 = consumeUint64(b)
|
||||
b, d.v2 = consumeUint64(b)
|
||||
b, d.v3 = consumeUint64(b)
|
||||
b, d.v4 = consumeUint64(b)
|
||||
b, d.total = consumeUint64(b)
|
||||
copy(d.mem[:], b)
|
||||
b = b[len(d.mem):]
|
||||
d.n = int(d.total % uint64(len(d.mem)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func appendUint64(b []byte, x uint64) []byte {
|
||||
var a [8]byte
|
||||
binary.LittleEndian.PutUint64(a[:], x)
|
||||
return append(b, a[:]...)
|
||||
}
|
||||
|
||||
func consumeUint64(b []byte) ([]byte, uint64) {
|
||||
x := u64(b)
|
||||
return b[8:], x
|
||||
}
|
||||
|
||||
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
||||
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
||||
|
||||
func round(acc, input uint64) uint64 {
|
||||
acc += input * prime2
|
||||
acc = rol31(acc)
|
||||
acc *= prime1
|
||||
return acc
|
||||
}
|
||||
|
||||
func mergeRound(acc, val uint64) uint64 {
|
||||
val = round(0, val)
|
||||
acc ^= val
|
||||
acc = acc*prime1 + prime4
|
||||
return acc
|
||||
}
|
||||
|
||||
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
||||
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
||||
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
||||
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
||||
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
||||
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
||||
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
||||
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
|
@ -0,0 +1,13 @@
|
|||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
//
|
||||
//go:noescape
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
//go:noescape
|
||||
func writeBlocks(d *Digest, b []byte) int
|
|
@ -0,0 +1,215 @@
|
|||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// CX pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
// R9 v2
|
||||
// R10 v3
|
||||
// R11 v4
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// R15 prime4v
|
||||
|
||||
// round reads from and advances the buffer pointer in CX.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (CX), R12 \
|
||||
ADDQ $8, CX \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ R15, acc
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), R15
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), CX
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ DX, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ R13, R8
|
||||
ADDQ R14, R8
|
||||
MOVQ R14, R9
|
||||
XORQ R10, R10
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
|
||||
// Loop until CX > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE blockLoop
|
||||
|
||||
MOVQ R8, AX
|
||||
ROLQ $1, AX
|
||||
MOVQ R9, R12
|
||||
ROLQ $7, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R10, R12
|
||||
ROLQ $12, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R11, R12
|
||||
ROLQ $18, R12
|
||||
ADDQ R12, AX
|
||||
|
||||
mergeRound(AX, R8)
|
||||
mergeRound(AX, R9)
|
||||
mergeRound(AX, R10)
|
||||
mergeRound(AX, R11)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·prime5v(SB), AX
|
||||
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
|
||||
CMPQ CX, BX
|
||||
JG fourByte
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (CX), R8
|
||||
ADDQ $8, CX
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ R15, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE wordLoop
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
JG singles
|
||||
|
||||
MOVL (CX), R8
|
||||
ADDQ $4, CX
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
ROLQ $23, AX
|
||||
IMULQ R14, AX
|
||||
ADDQ ·prime3v(SB), AX
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (CX), R12
|
||||
ADDQ $1, CX
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
JL singlesLoop
|
||||
|
||||
finalize:
|
||||
MOVQ AX, R12
|
||||
SHRQ $33, R12
|
||||
XORQ R12, AX
|
||||
IMULQ R14, AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $29, R12
|
||||
XORQ R12, AX
|
||||
IMULQ ·prime3v(SB), AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $32, R12
|
||||
XORQ R12, AX
|
||||
|
||||
MOVQ AX, ret+24(FP)
|
||||
RET
|
||||
|
||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
||||
// the d pointer.
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), CX
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ d+0(FP), AX
|
||||
MOVQ 0(AX), R8 // v1
|
||||
MOVQ 8(AX), R9 // v2
|
||||
MOVQ 16(AX), R10 // v3
|
||||
MOVQ 24(AX), R11 // v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE blockLoop
|
||||
|
||||
// Copy vN back to d.
|
||||
MOVQ R8, 0(AX)
|
||||
MOVQ R9, 8(AX)
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
|
||||
// The number of bytes written is CX minus the old base pointer.
|
||||
SUBQ b_base+8(FP), CX
|
||||
MOVQ CX, ret+32(FP)
|
||||
|
||||
RET
|
|
@ -0,0 +1,76 @@
|
|||
// +build !amd64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
func Sum64(b []byte) uint64 {
|
||||
// A simpler version would be
|
||||
// d := New()
|
||||
// d.Write(b)
|
||||
// return d.Sum64()
|
||||
// but this is faster, particularly for small inputs.
|
||||
|
||||
n := len(b)
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := prime1v + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -prime1v
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = prime5
|
||||
}
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
i, end := 0, len(b)
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for ; i < end; i++ {
|
||||
h ^= uint64(b[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func writeBlocks(d *Digest, b []byte) int {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
n := len(b)
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
|
||||
return n - len(b)
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
func Sum64String(s string) uint64 {
|
||||
return Sum64([]byte(s))
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
return d.Write([]byte(s))
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
// xxhash_safe.go contains the safe implementations.
|
||||
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Notes:
|
||||
//
|
||||
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
|
||||
// for some discussion about these unsafe conversions.
|
||||
//
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// unsafe operations unnecessary: https://golang.org/issue/2205.
|
||||
//
|
||||
// Both of these wrapper functions still incur function call overhead since they
|
||||
// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
|
||||
// for strings to squeeze out a bit more speed. Mid-stack inlining should
|
||||
// eventually fix this.
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
func Sum64String(s string) uint64 {
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
return Sum64(b)
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
return d.Write(b)
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
# cgroups
|
||||
|
||||
[![Build Status](https://travis-ci.org/containerd/cgroups.svg?branch=master)](https://travis-ci.org/containerd/cgroups)
|
||||
[![Build Status](https://github.com/containerd/cgroups/workflows/CI/badge.svg)](https://github.com/containerd/cgroups/actions?query=workflow%3ACI)
|
||||
[![codecov](https://codecov.io/gh/containerd/cgroups/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/cgroups)
|
||||
[![GoDoc](https://godoc.org/github.com/containerd/cgroups?status.svg)](https://godoc.org/github.com/containerd/cgroups)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/cgroups)](https://goreportcard.com/report/github.com/containerd/cgroups)
|
||||
|
@ -65,7 +65,7 @@ To update the resources applied in the cgroup
|
|||
```go
|
||||
shares = uint64(200)
|
||||
if err := control.Update(&specs.LinuxResources{
|
||||
CPU: &specs.CPU{
|
||||
CPU: &specs.LinuxCPU{
|
||||
Shares: &shares,
|
||||
},
|
||||
}); err != nil {
|
||||
|
@ -112,6 +112,27 @@ err := control.MoveTo(destination)
|
|||
subCgroup, err := control.New("child", resources)
|
||||
```
|
||||
|
||||
### Registering for memory events
|
||||
|
||||
This allows you to get notified by an eventfd for v1 memory cgroups events.
|
||||
|
||||
```go
|
||||
event := cgroups.MemoryThresholdEvent(50 * 1024 * 1024, false)
|
||||
efd, err := control.RegisterMemoryEvent(event)
|
||||
```
|
||||
|
||||
```go
|
||||
event := cgroups.MemoryPressureEvent(cgroups.MediumPressure, cgroups.DefaultMode)
|
||||
efd, err := control.RegisterMemoryEvent(event)
|
||||
```
|
||||
|
||||
```go
|
||||
efd, err := control.OOMEventFD()
|
||||
// or by using RegisterMemoryEvent
|
||||
event := cgroups.OOMEvent()
|
||||
efd, err := control.RegisterMemoryEvent(event)
|
||||
```
|
||||
|
||||
### Attention
|
||||
|
||||
All static path should not include `/sys/fs/cgroup/` prefix, it should start with your own cgroups name
|
||||
|
|
|
@ -3,17 +3,16 @@ module github.com/containerd/cgroups
|
|||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3
|
||||
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775
|
||||
github.com/coreos/go-systemd/v22 v22.0.0
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/godbus/dbus/v5 v5.0.3
|
||||
github.com/gogo/protobuf v1.3.1
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700
|
||||
github.com/opencontainers/runtime-spec v1.0.2
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/stretchr/testify v1.2.2
|
||||
github.com/urfave/cli v1.22.2
|
||||
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479
|
||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9
|
||||
)
|
||||
|
|
|
@ -154,7 +154,7 @@ Taking a container object and turning it into a runnable process on a system is
|
|||
|
||||
```go
|
||||
// create a new task
|
||||
task, err := redis.NewTask(context, cio.Stdio)
|
||||
task, err := redis.NewTask(context, cio.NewCreator(cio.WithStdio))
|
||||
defer task.Delete(context)
|
||||
|
||||
// the task is now running and has a pid that can be use to setup networking
|
||||
|
@ -184,7 +184,7 @@ checkpoint, err := client.Pull(context, "myregistry/checkpoints/redis:master")
|
|||
redis, err = client.NewContainer(context, "redis-master", containerd.WithNewSnapshot("redis-rootfs", checkpoint))
|
||||
defer container.Delete(context)
|
||||
|
||||
task, err = redis.NewTask(context, cio.Stdio, containerd.WithTaskCheckpoint(checkpoint))
|
||||
task, err = redis.NewTask(context, cio.NewCreator(cio.WithStdio), containerd.WithTaskCheckpoint(checkpoint))
|
||||
defer task.Delete(context)
|
||||
|
||||
err := task.Start(context)
|
||||
|
|
|
@ -74,8 +74,8 @@ func getCPUInfo(pattern string) (info string, err error) {
|
|||
}
|
||||
|
||||
func getCPUVariant() string {
|
||||
if runtime.GOOS == "windows" {
|
||||
// Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use
|
||||
if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
|
||||
// Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use
|
||||
// runtime.GOARCH to determine the variants
|
||||
var variant string
|
||||
switch runtime.GOARCH {
|
||||
|
|
|
@ -1,102 +1,101 @@
|
|||
github.com/beorn7/perks 37c8de3658fcb183f997c4e13e8337516ab753e6 # v1.0.1
|
||||
github.com/BurntSushi/toml 3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005 # v0.3.1
|
||||
github.com/cespare/xxhash/v2 d7df74196a9e781ede915320c11c378c1b2f3a1f # v2.1.1
|
||||
github.com/beorn7/perks v1.0.1
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/cespare/xxhash/v2 v2.1.1
|
||||
github.com/containerd/btrfs 153935315f4ab9be5bf03650a1341454b05efa5d
|
||||
github.com/containerd/cgroups b4448137398923af7f4918b8b2ad8249172ca7a6
|
||||
github.com/containerd/console 8375c3424e4d7b114e8a90a4a40c8e1b40d1d4e6 # v1.0.0
|
||||
github.com/containerd/continuity 0ec596719c75bfd42908850990acea594b7593ac
|
||||
github.com/containerd/fifo bda0ff6ed73c67bfb5e62bc9c697f146b7fd7f13
|
||||
github.com/containerd/go-runc a5c2862aed5e6358b305b0e16bfce58e0549b1cd
|
||||
github.com/containerd/ttrpc 72bb1b21c5b0a4a107f59dd85f6ab58e564b68d6 # v1.0.1
|
||||
github.com/containerd/typeurl cd3ce7159eae562a4f60ceff37dada11a939d247 # v1.0.1
|
||||
github.com/coreos/go-systemd/v22 2d78030078ef61b3cae27f42ad6d0e46db51b339 # v22.0.0
|
||||
github.com/cpuguy83/go-md2man 7762f7e404f8416dfa1d9bb6a8c192aa9acb4d19 # v1.0.10
|
||||
github.com/containerd/cgroups 318312a373405e5e91134d8063d04d59768a1bff
|
||||
github.com/containerd/console v1.0.0
|
||||
github.com/containerd/continuity efbc4488d8fe1bdc16bde3b2d2990d9b3a899165
|
||||
github.com/containerd/fifo f15a3290365b9d2627d189e619ab4008e0069caf
|
||||
github.com/containerd/go-runc 7016d3ce2328dd2cb1192b2076ebd565c4e8df0c
|
||||
github.com/containerd/ttrpc v1.0.1
|
||||
github.com/containerd/typeurl v1.0.1
|
||||
github.com/coreos/go-systemd/v22 v22.1.0
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0
|
||||
github.com/docker/go-events e31b211e4f1cd09aa76fe4ac244571fab96ae47f
|
||||
github.com/docker/go-metrics b619b3592b65de4f087d9f16863a7e6ff905973c # v0.0.1
|
||||
github.com/docker/go-units 519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0
|
||||
github.com/godbus/dbus/v5 37bf87eef99d69c4f1d3528bd66e3a87dc201472 # v5.0.3
|
||||
github.com/gogo/googleapis 01e0f9cca9b92166042241267ee2a5cdf5cff46c # v1.3.2
|
||||
github.com/gogo/protobuf 5628607bb4c51c3157aacc3a50f0ab707582b805 # v1.3.1
|
||||
github.com/golang/protobuf d23c5127dc24889085f8ccea5c9d560a57a879d8 # v1.3.3
|
||||
github.com/google/go-cmp 3af367b6b30c263d47e8895973edcca9a49cf029 # v0.2.0
|
||||
github.com/google/uuid 0cd6bf5da1e1c83f8b45653022c74f71af0538a4 # v1.1.1
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus c225b8c3b01faf2899099b768856a9e916e5087b # v1.2.0
|
||||
github.com/hashicorp/errwrap 8a6fb523712970c966eefc6b39ed2c5e74880354 # v1.0.0
|
||||
github.com/hashicorp/go-multierror 886a7fbe3eb1c874d46f623bfa70af45f425b3d1 # v1.0.0
|
||||
github.com/hashicorp/golang-lru 7f827b33c0f158ec5dfbba01bb0b14a4541fd81d # v0.5.3
|
||||
github.com/imdario/mergo 7c29201646fa3de8506f701213473dd407f19646 # v0.3.7
|
||||
github.com/konsorten/go-windows-terminal-sequences edb144dfd453055e1e49a3d8b410a660b5a87613 # v1.0.3
|
||||
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c # v1.0.1
|
||||
github.com/Microsoft/go-winio 6c72808b55902eae4c5943626030429ff20f3b63 # v0.4.14
|
||||
github.com/Microsoft/hcsshim 5bc557dd210ff2caf615e6e22d398123de77fc11 # v0.8.9
|
||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||
github.com/opencontainers/image-spec d60099175f88c47cd379c4738d158884749ed235 # v1.0.1
|
||||
github.com/opencontainers/runc dc9208a3303feef5b3839f4323d9beb36df0a9dd # v1.0.0-rc10
|
||||
github.com/opencontainers/runtime-spec c4ee7d12c742ffe806cd9350b6af3b4b19faed6f # v1.0.2
|
||||
github.com/pkg/errors 614d223910a179a466c1767a985424175c39b465 # v0.9.1
|
||||
github.com/prometheus/client_golang c42bebe5a5cddfc6b28cd639103369d8a75dfa89 # v1.3.0
|
||||
github.com/prometheus/client_model d1d2010b5beead3fa1c5f271a5cf626e40b3ad6e # v0.1.0
|
||||
github.com/prometheus/common 287d3e634a1e550c9e463dd7e5a75a422c614505 # v0.7.0
|
||||
github.com/prometheus/procfs 6d489fc7f1d9cd890a250f3ea3431b1744b9623f # v0.0.8
|
||||
github.com/russross/blackfriday 05f3235734ad95d0016f6a23902f06461fcf567a # v1.5.2
|
||||
github.com/sirupsen/logrus 60c74ad9be0d874af0ab0daef6ab07c5c5911f0d # v1.6.0
|
||||
github.com/docker/go-metrics v0.0.1
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/godbus/dbus/v5 v5.0.3
|
||||
github.com/gogo/googleapis v1.3.2
|
||||
github.com/gogo/protobuf v1.3.1
|
||||
github.com/golang/protobuf v1.3.5
|
||||
github.com/google/go-cmp v0.2.0
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/hashicorp/errwrap v1.0.0
|
||||
github.com/hashicorp/go-multierror v1.0.0
|
||||
github.com/hashicorp/golang-lru v0.5.3
|
||||
github.com/imdario/mergo v0.3.7
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1
|
||||
github.com/Microsoft/go-winio v0.4.14
|
||||
github.com/Microsoft/hcsshim v0.8.9
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/opencontainers/runc v1.0.0-rc92
|
||||
github.com/opencontainers/runtime-spec 4d89ac9fbff6c455f46a5bb59c6b1bb7184a5e43 # v1.0.3-0.20200728170252-4d89ac9fbff6
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.6.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/common v0.9.1
|
||||
github.com/prometheus/procfs v0.0.11
|
||||
github.com/russross/blackfriday/v2 v2.0.1
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
|
||||
github.com/urfave/cli bfe2e925cfb6d44b40ad3a779165ea7e8aff9212 # v1.22.0
|
||||
go.etcd.io/bbolt a0458a2b35708eef59eb5f620ceb3cd1c01a824d # v1.3.3
|
||||
go.opencensus.io 9c377598961b706d1542bd2d84d538b5094d596e # v0.22.0
|
||||
golang.org/x/net f3200d17e092c607f615320ecaad13d87ad9a2b3
|
||||
github.com/urfave/cli v1.22.1 # NOTE: urfave/cli must be <= v1.22.1 due to a regression: https://github.com/urfave/cli/issues/1092
|
||||
go.etcd.io/bbolt v1.3.5
|
||||
go.opencensus.io v0.22.0
|
||||
golang.org/x/net ab34263943818b32f575efc978a3d24e80b04bd7
|
||||
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
||||
golang.org/x/sys 5c8b2ff67527cb88b770f693cebf3799036d8bc0
|
||||
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
|
||||
golang.org/x/sys ed371f2e16b4b305ee99df548828de367527b76b
|
||||
golang.org/x/text v0.3.3
|
||||
google.golang.org/genproto e50cd9704f63023d62cd06a1994b98227fc4d21a
|
||||
google.golang.org/grpc f495f5b15ae7ccda3b38c53a1bfcde4c1a58a2bc # v1.27.1
|
||||
gotest.tools/v3 bb0d8a963040ea5048dcef1a14d8f8b58a33d4b3 # v3.0.2
|
||||
google.golang.org/grpc v1.27.1
|
||||
gotest.tools/v3 v3.0.2
|
||||
|
||||
# cgroups dependencies
|
||||
github.com/cilium/ebpf 4032b1d8aae306b7bb94a2a11002932caf88c644
|
||||
github.com/cilium/ebpf 1c8d4c9ef7759622653a1d319284a44652333b28
|
||||
|
||||
# cri dependencies
|
||||
github.com/containerd/cri 65830369b6b2b4edc454bf5cebbd9b76c1c1ac66 # master
|
||||
github.com/davecgh/go-spew 8991bc29aa16c548c550c7ff78260e27b9ab7c73 # v1.1.1
|
||||
github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580
|
||||
github.com/containerd/cri 4e6644c8cf7fb825f62e0007421b7d83dfeab5a1 # master
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/docker/docker 4634ce647cf2ce2c6031129ccd109e557244986f
|
||||
github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528
|
||||
github.com/emicklei/go-restful b993709ae1a4f6dd19cfa475232614441b11c9d5 # v2.9.5
|
||||
github.com/google/gofuzz db92cf7ae75e4a7a28abc005addab2b394362888 # v1.1.0
|
||||
github.com/json-iterator/go 03217c3e97663914aec3faafde50d081f197a0a2 # v1.1.8
|
||||
github.com/modern-go/concurrent bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 # 1.0.3
|
||||
github.com/modern-go/reflect2 4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd # 1.0.1
|
||||
github.com/opencontainers/selinux 0d49ba2a6aae052c614dfe5de62a158711a6c461 # 1.5.1
|
||||
github.com/seccomp/libseccomp-golang 689e3c1541a84461afc49c1c87352a6cedf72e9c # v0.9.1
|
||||
github.com/stretchr/testify 221dbe5ed46703ee255b1da0dec05086f5035f62 # v1.4.0
|
||||
github.com/tchap/go-patricia 666120de432aea38ab06bd5c818f04f4129882c9 # v2.2.6
|
||||
golang.org/x/crypto bac4c82f69751a6dd76e702d54b3ceb88adab236
|
||||
golang.org/x/oauth2 0f29369cfe4552d0e4bcddc57cc75f4d7e672a33
|
||||
golang.org/x/time 9d24e82272b4f38b78bc8cff74fa936d31ccd8ef
|
||||
gopkg.in/inf.v0 d2d2541c53f18d2a059457998ce2876cc8e67cbf # v0.9.1
|
||||
gopkg.in/yaml.v2 53403b58ad1b561927d19068c655246f2db79d48 # v2.2.8
|
||||
k8s.io/api d2dce8e1788e4be2be3a62b6439b3eaa087df0df # v0.18.0
|
||||
k8s.io/apimachinery 105e0c6d63f10531ed07f3b5a2195771a0fa444b # v0.18.0
|
||||
k8s.io/apiserver 5c8e895629a454efd75a453d1dea5b8142db0013 # v0.18.0
|
||||
k8s.io/client-go 0b19784585bd0a0ee5509855829ead81feaa2bdc # v0.18.0
|
||||
k8s.io/cri-api 3d1680d8d202aa12c5dc5689170c3c03a488d35b # v0.18.0
|
||||
k8s.io/klog 2ca9ad30301bf30a8a6e0fa2110db6b8df699a91 # v1.0.0
|
||||
k8s.io/kubernetes 9e991415386e4cf155a24b1da15becaa390438d8 # v1.18.0
|
||||
k8s.io/utils a9aa75ae1b89e1b992c33383f48e942d97e52dae
|
||||
sigs.k8s.io/structured-merge-diff/v3 877aee05330847a873a1a8998b40e12a1e0fde25 # v3.0.0
|
||||
sigs.k8s.io/yaml 9fc95527decd95bb9d28cc2eab08179b2d0f6971 # v1.2.0
|
||||
github.com/emicklei/go-restful v2.9.5
|
||||
github.com/go-logr/logr v0.2.0
|
||||
github.com/google/gofuzz v1.1.0
|
||||
github.com/json-iterator/go v1.1.10
|
||||
github.com/modern-go/concurrent 1.0.3
|
||||
github.com/modern-go/reflect2 v1.0.1
|
||||
github.com/opencontainers/selinux v1.6.0
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
github.com/willf/bitset d5bec3311243426a3c6d1b7a795f24b17c686dbb # 1.1.10+ used by selinux pkg
|
||||
golang.org/x/crypto 75b288015ac94e66e3d6715fb68a9b41bf046ec2
|
||||
golang.org/x/oauth2 858c2ad4c8b6c5d10852cb89079f6ca1c7309787
|
||||
golang.org/x/time 555d28b269f0569763d25dbe1a237ae74c6bcc82
|
||||
gopkg.in/inf.v0 v0.9.1
|
||||
gopkg.in/yaml.v2 v2.2.8
|
||||
k8s.io/api v0.19.0-rc.4
|
||||
k8s.io/apimachinery v0.19.0-rc.4
|
||||
k8s.io/apiserver v0.19.0-rc.4
|
||||
k8s.io/client-go v0.19.0-rc.4
|
||||
k8s.io/cri-api v0.19.0-rc.4
|
||||
k8s.io/klog/v2 v2.2.0
|
||||
k8s.io/utils 2df71ebbae66f39338aed4cd0bb82d2212ee33cc
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
|
||||
# cni dependencies
|
||||
github.com/containerd/go-cni 0d360c50b10b350b6bb23863fd4dfb1c232b01c9
|
||||
github.com/containernetworking/cni 4cfb7b568922a3c79a23e438dc52fe537fc9687e # v0.7.1
|
||||
github.com/containernetworking/plugins 9f96827c7cabb03f21d86326000c00f61e181f6a # v0.7.6
|
||||
github.com/fsnotify/fsnotify 4bf2d1fec78374803a39307bfb8d340688f4f28e # v1.4.8
|
||||
github.com/containerd/go-cni v1.0.0
|
||||
github.com/containernetworking/cni v0.7.1
|
||||
github.com/containernetworking/plugins v0.7.6
|
||||
github.com/fsnotify/fsnotify v1.4.9
|
||||
|
||||
# image decrypt depedencies
|
||||
github.com/containerd/imgcrypt 9e761ccd6069fb707ec9493435f31475b5524b38 # v1.0.1
|
||||
github.com/containers/ocicrypt 0343cc6053fd65069df55bce6838096e09b4033a # v1.0.1 from containerd/imgcrypt
|
||||
github.com/fullsailor/pkcs7 8306686428a5fe132eac8cb7c4848af725098bd4 # from containers/ocicrypt
|
||||
gopkg.in/square/go-jose.v2 730df5f748271903322feb182be83b43ebbbe27d # v2.3.1 from containers/ocicrypt
|
||||
github.com/containerd/imgcrypt v1.0.1
|
||||
github.com/containers/ocicrypt v1.0.1
|
||||
github.com/fullsailor/pkcs7 8306686428a5fe132eac8cb7c4848af725098bd4
|
||||
gopkg.in/square/go-jose.v2 v2.3.1
|
||||
|
||||
# zfs dependencies
|
||||
github.com/containerd/zfs 9abf673ca6ff9ab8d9bd776a4ceff8f6dc699c3d
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package events // import "github.com/docker/docker/api/types/events"
|
||||
|
||||
const (
|
||||
// BuilderEventType is the event type that the builder generates
|
||||
BuilderEventType = "builder"
|
||||
// ContainerEventType is the event type that containers generate
|
||||
ContainerEventType = "container"
|
||||
// DaemonEventType is the event type that daemon generate
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
|
||||
|
@ -75,4 +76,5 @@ type ContainerSpec struct {
|
|||
Sysctls map[string]string `json:",omitempty"`
|
||||
CapabilityAdd []string `json:",omitempty"`
|
||||
CapabilityDrop []string `json:",omitempty"`
|
||||
Ulimits []*units.Ulimit `json:",omitempty"`
|
||||
}
|
||||
|
|
|
@ -134,8 +134,7 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp
|
|||
|
||||
// Don't decorate context sentinel errors; users may be comparing to
|
||||
// them directly.
|
||||
switch err {
|
||||
case context.Canceled, context.DeadlineExceeded:
|
||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||
return serverResp, err
|
||||
}
|
||||
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
|
@ -107,14 +106,14 @@ func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
|
|||
|
||||
// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username,
|
||||
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||
func LookupUser(username string) (user.User, error) {
|
||||
func LookupUser(name string) (user.User, error) {
|
||||
// first try a local system files lookup using existing capabilities
|
||||
usr, err := user.LookupUser(username)
|
||||
usr, err := user.LookupUser(name)
|
||||
if err == nil {
|
||||
return usr, nil
|
||||
}
|
||||
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
|
||||
usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username))
|
||||
usr, err = getentUser(name)
|
||||
if err != nil {
|
||||
return user.User{}, err
|
||||
}
|
||||
|
@ -130,11 +129,11 @@ func LookupUID(uid int) (user.User, error) {
|
|||
return usr, nil
|
||||
}
|
||||
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
|
||||
return getentUser(fmt.Sprintf("%s %d", "passwd", uid))
|
||||
return getentUser(strconv.Itoa(uid))
|
||||
}
|
||||
|
||||
func getentUser(args string) (user.User, error) {
|
||||
reader, err := callGetent(args)
|
||||
func getentUser(name string) (user.User, error) {
|
||||
reader, err := callGetent("passwd", name)
|
||||
if err != nil {
|
||||
return user.User{}, err
|
||||
}
|
||||
|
@ -143,21 +142,21 @@ func getentUser(args string) (user.User, error) {
|
|||
return user.User{}, err
|
||||
}
|
||||
if len(users) == 0 {
|
||||
return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1])
|
||||
return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", name)
|
||||
}
|
||||
return users[0], nil
|
||||
}
|
||||
|
||||
// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
|
||||
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||
func LookupGroup(groupname string) (user.Group, error) {
|
||||
func LookupGroup(name string) (user.Group, error) {
|
||||
// first try a local system files lookup using existing capabilities
|
||||
group, err := user.LookupGroup(groupname)
|
||||
group, err := user.LookupGroup(name)
|
||||
if err == nil {
|
||||
return group, nil
|
||||
}
|
||||
// local files lookup failed; attempt to call `getent` to query configured group dbs
|
||||
return getentGroup(fmt.Sprintf("%s %s", "group", groupname))
|
||||
return getentGroup(name)
|
||||
}
|
||||
|
||||
// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID,
|
||||
|
@ -169,11 +168,11 @@ func LookupGID(gid int) (user.Group, error) {
|
|||
return group, nil
|
||||
}
|
||||
// local files lookup failed; attempt to call `getent` to query configured group dbs
|
||||
return getentGroup(fmt.Sprintf("%s %d", "group", gid))
|
||||
return getentGroup(strconv.Itoa(gid))
|
||||
}
|
||||
|
||||
func getentGroup(args string) (user.Group, error) {
|
||||
reader, err := callGetent(args)
|
||||
func getentGroup(name string) (user.Group, error) {
|
||||
reader, err := callGetent("group", name)
|
||||
if err != nil {
|
||||
return user.Group{}, err
|
||||
}
|
||||
|
@ -182,18 +181,18 @@ func getentGroup(args string) (user.Group, error) {
|
|||
return user.Group{}, err
|
||||
}
|
||||
if len(groups) == 0 {
|
||||
return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1])
|
||||
return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", name)
|
||||
}
|
||||
return groups[0], nil
|
||||
}
|
||||
|
||||
func callGetent(args string) (io.Reader, error) {
|
||||
func callGetent(database, key string) (io.Reader, error) {
|
||||
entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") })
|
||||
// if no `getent` command on host, can't do anything else
|
||||
if getentCmd == "" {
|
||||
return nil, fmt.Errorf("")
|
||||
return nil, fmt.Errorf("unable to find getent command")
|
||||
}
|
||||
out, err := execCmd(getentCmd, args)
|
||||
out, err := execCmd(getentCmd, database, key)
|
||||
if err != nil {
|
||||
exitCode, errC := system.GetExitCode(err)
|
||||
if errC != nil {
|
||||
|
@ -203,8 +202,7 @@ func callGetent(args string) (io.Reader, error) {
|
|||
case 1:
|
||||
return nil, fmt.Errorf("getent reported invalid parameters/database unknown")
|
||||
case 2:
|
||||
terms := strings.Split(args, " ")
|
||||
return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0])
|
||||
return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, database)
|
||||
case 3:
|
||||
return nil, fmt.Errorf("getent database doesn't support enumeration")
|
||||
default:
|
||||
|
@ -235,19 +233,19 @@ func lazyChown(p string, uid, gid int, stat *system.StatT) error {
|
|||
// NewIdentityMapping takes a requested username and
|
||||
// using the data from /etc/sub{uid,gid} ranges, creates the
|
||||
// proper uid and gid remapping ranges for that user/group pair
|
||||
func NewIdentityMapping(username string) (*IdentityMapping, error) {
|
||||
usr, err := LookupUser(username)
|
||||
func NewIdentityMapping(name string) (*IdentityMapping, error) {
|
||||
usr, err := LookupUser(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not get user for username %s: %v", username, err)
|
||||
return nil, fmt.Errorf("Could not get user for username %s: %v", name, err)
|
||||
}
|
||||
|
||||
uid := strconv.Itoa(usr.Uid)
|
||||
|
||||
subuidRangesWithUserName, err := parseSubuid(username)
|
||||
subuidRangesWithUserName, err := parseSubuid(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
subgidRangesWithUserName, err := parseSubgid(username)
|
||||
subgidRangesWithUserName, err := parseSubgid(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -265,10 +263,10 @@ func NewIdentityMapping(username string) (*IdentityMapping, error) {
|
|||
subgidRanges := append(subgidRangesWithUserName, subgidRangesWithUID...)
|
||||
|
||||
if len(subuidRanges) == 0 {
|
||||
return nil, errors.Errorf("no subuid ranges found for user %q", username)
|
||||
return nil, errors.Errorf("no subuid ranges found for user %q", name)
|
||||
}
|
||||
if len(subgidRanges) == 0 {
|
||||
return nil, errors.Errorf("no subgid ranges found for user %q", username)
|
||||
return nil, errors.Errorf("no subgid ranges found for user %q", name)
|
||||
}
|
||||
|
||||
return &IdentityMapping{
|
||||
|
|
|
@ -17,18 +17,13 @@ import (
|
|||
var (
|
||||
once sync.Once
|
||||
userCommand string
|
||||
|
||||
cmdTemplates = map[string]string{
|
||||
"adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s",
|
||||
"useradd": "-r -s /bin/false %s",
|
||||
"usermod": "-%s %d-%d %s",
|
||||
}
|
||||
|
||||
idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
|
||||
)
|
||||
|
||||
const (
|
||||
// default length for a UID/GID subordinate range
|
||||
defaultRangeLen = 65536
|
||||
defaultRangeStart = 100000
|
||||
userMod = "usermod"
|
||||
)
|
||||
|
||||
// AddNamespaceRangesUser takes a username and uses the standard system
|
||||
|
@ -67,7 +62,7 @@ func AddNamespaceRangesUser(name string) (int, int, error) {
|
|||
return uid, gid, nil
|
||||
}
|
||||
|
||||
func addUser(userName string) error {
|
||||
func addUser(name string) error {
|
||||
once.Do(func() {
|
||||
// set up which commands are used for adding users/groups dependent on distro
|
||||
if _, err := resolveBinary("adduser"); err == nil {
|
||||
|
@ -76,13 +71,18 @@ func addUser(userName string) error {
|
|||
userCommand = "useradd"
|
||||
}
|
||||
})
|
||||
if userCommand == "" {
|
||||
return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
|
||||
var args []string
|
||||
switch userCommand {
|
||||
case "adduser":
|
||||
args = []string{"--system", "--shell", "/bin/false", "--no-create-home", "--disabled-login", "--disabled-password", "--group", name}
|
||||
case "useradd":
|
||||
args = []string{"-r", "-s", "/bin/false", name}
|
||||
default:
|
||||
return fmt.Errorf("cannot add user; no useradd/adduser binary found")
|
||||
}
|
||||
args := fmt.Sprintf(cmdTemplates[userCommand], userName)
|
||||
out, err := execCmd(userCommand, args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
|
||||
|
||||
if out, err := execCmd(userCommand, args...); err != nil {
|
||||
return fmt.Errorf("failed to add user with error: %v; output: %q", err, string(out))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ func createSubordinateRanges(name string) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("Can't find available subuid range: %v", err)
|
||||
}
|
||||
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
|
||||
out, err := execCmd("usermod", "-v", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ func createSubordinateRanges(name string) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("Can't find available subgid range: %v", err)
|
||||
}
|
||||
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
|
||||
out, err := execCmd("usermod", "-w", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
|
||||
}
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"fmt"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func resolveBinary(binname string) (string, error) {
|
||||
|
@ -26,7 +25,7 @@ func resolveBinary(binname string) (string, error) {
|
|||
return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
|
||||
}
|
||||
|
||||
func execCmd(cmd, args string) ([]byte, error) {
|
||||
execCmd := exec.Command(cmd, strings.Split(args, " ")...)
|
||||
func execCmd(cmd string, arg ...string) ([]byte, error) {
|
||||
execCmd := exec.Command(cmd, arg...)
|
||||
return execCmd.CombinedOutput()
|
||||
}
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import "syscall"
|
||||
|
||||
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
|
||||
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
|
||||
return &StatT{size: s.Size,
|
||||
mode: s.Mode,
|
||||
uid: s.Uid,
|
||||
gid: s.Gid,
|
||||
rdev: s.Rdev,
|
||||
mtim: s.Mtim}, nil
|
||||
}
|
|
@ -4,7 +4,7 @@ github.com/Microsoft/go-winio 6c72808b55902eae4c5943626030
|
|||
github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
|
||||
github.com/golang/gddo 72a348e765d293ed6d1ded7b699591f14d6cd921
|
||||
github.com/google/uuid 0cd6bf5da1e1c83f8b45653022c74f71af0538a4 # v1.1.1
|
||||
github.com/gorilla/mux 75dcda0896e109a2a22c9315bca3bb21b87b2ba5 # v1.7.4
|
||||
github.com/gorilla/mux 98cb6bf42e086f6af920b965c38cacc07402d51b # v1.8.0
|
||||
github.com/Microsoft/opengcs a10967154e143a36014584a6f664344e3bb0aa64
|
||||
github.com/moby/term 73f35e472e8f0a3f91347164138ce6bd73b756a9
|
||||
|
||||
|
@ -12,8 +12,8 @@ github.com/creack/pty 3a6a957789163cacdfe0e291617a
|
|||
github.com/konsorten/go-windows-terminal-sequences edb144dfd453055e1e49a3d8b410a660b5a87613 # v1.0.3
|
||||
github.com/sirupsen/logrus 60c74ad9be0d874af0ab0daef6ab07c5c5911f0d # v1.6.0
|
||||
github.com/tchap/go-patricia a7f0089c6f496e8e70402f61733606daa326cac5 # v2.3.0
|
||||
golang.org/x/net 0de0cce0169b09b364e001f108dc0399ea8630b3
|
||||
golang.org/x/sys 85ca7c5b95cdf1e557abb38a283d1e61a5959c31
|
||||
golang.org/x/net ab34263943818b32f575efc978a3d24e80b04bd7
|
||||
golang.org/x/sys ed371f2e16b4b305ee99df548828de367527b76b
|
||||
github.com/docker/go-units 519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0
|
||||
github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55 # v0.4.0
|
||||
github.com/moby/sys 6154f11e6840c0d6b0dbb23f4125a6134b3013c9 # mountinfo/v0.1.3
|
||||
|
@ -52,7 +52,7 @@ github.com/hashicorp/go-sockaddr c7188e74f6acae5a989bdc959aa7
|
|||
github.com/hashicorp/go-multierror 886a7fbe3eb1c874d46f623bfa70af45f425b3d1 # v1.0.0
|
||||
github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
|
||||
github.com/docker/libkv 458977154600b9f23984d9f4b82e79570b5ae12b
|
||||
github.com/vishvananda/netns 0a2b9b5464df8343199164a0321edf3313202f7e
|
||||
github.com/vishvananda/netns db3c7e526aae966c4ccfa6c8189b693d6ac5d202
|
||||
github.com/vishvananda/netlink f049be6f391489d3f374498fe0c8df8449258372 # v1.1.0
|
||||
github.com/moby/ipvs 4566ccea0e08d68e9614c3e7a64a23b850c4bb35 # v1.0.1
|
||||
|
||||
|
@ -66,7 +66,7 @@ github.com/ugorji/go b4c50a2b199d93b13dc15e78929c
|
|||
github.com/hashicorp/consul 9a9cc9341bb487651a0399e3fc5e1e8a42e62dd9 # v0.5.2
|
||||
github.com/miekg/dns 6c0c4e6581f8e173cc562c8b3363ab984e4ae071 # v1.1.27
|
||||
github.com/ishidawataru/sctp 6e2cb1366111dcf547c13531e3a263a067715847
|
||||
go.etcd.io/bbolt a0458a2b35708eef59eb5f620ceb3cd1c01a824d # v1.3.3
|
||||
go.etcd.io/bbolt 232d8fc87f50244f9c808f4745759e08a304c029 # v1.3.5
|
||||
|
||||
# get graph and distribution packages
|
||||
github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580
|
||||
|
@ -83,10 +83,11 @@ google.golang.org/grpc f495f5b15ae7ccda3b38c53a1bfc
|
|||
# the containerd project first, and update both after that is merged.
|
||||
# This commit does not need to match RUNC_COMMIT as it is used for helper
|
||||
# packages but should be newer or equal.
|
||||
github.com/opencontainers/runc dc9208a3303feef5b3839f4323d9beb36df0a9dd # v1.0.0-rc10
|
||||
github.com/opencontainers/runtime-spec c4ee7d12c742ffe806cd9350b6af3b4b19faed6f # v1.0.2
|
||||
github.com/opencontainers/runc ff819c7e9184c13b7c2607fe6c30ae19403a7aff # v1.0.0-rc92
|
||||
github.com/opencontainers/runtime-spec 4d89ac9fbff6c455f46a5bb59c6b1bb7184a5e43 # v1.0.3-0.20200728170252-4d89ac9fbff6
|
||||
github.com/opencontainers/image-spec d60099175f88c47cd379c4738d158884749ed235 # v1.0.1
|
||||
github.com/seccomp/libseccomp-golang 689e3c1541a84461afc49c1c87352a6cedf72e9c # v0.9.1
|
||||
github.com/cyphar/filepath-securejoin a261ee33d7a517f054effbf451841abaafe3e0fd # v0.2.2
|
||||
|
||||
# go-systemd v17 is required by github.com/coreos/pkg/capnslog/journald_formatter.go
|
||||
github.com/coreos/go-systemd 39ca1b05acc7ad1220e09f133283b8859a8b71ab # v17
|
||||
|
@ -122,25 +123,25 @@ github.com/googleapis/gax-go 317e0006254c44a0ac427cc52a0e
|
|||
google.golang.org/genproto 3f1135a288c9a07e340ae8ba4cc6c7065a3160e8
|
||||
|
||||
# containerd
|
||||
github.com/containerd/containerd c80284d4b5291a351bb471bcdabb5c1d95e7a583 # master / v1.4.0-dev
|
||||
github.com/containerd/fifo ff969a566b00877c63489baf6e8c35d60af6142c
|
||||
github.com/containerd/continuity 26c1120b8d4107d2471b93ad78ef7ce1fc84c4c4
|
||||
github.com/containerd/cgroups 44306b6a1d46985d916b48b4199f93a378af314f
|
||||
github.com/containerd/containerd e9f94064b9616ab36a8a51d632a63f97f7783c3d # v1.4.0-rc.1
|
||||
github.com/containerd/fifo f15a3290365b9d2627d189e619ab4008e0069caf
|
||||
github.com/containerd/continuity efbc4488d8fe1bdc16bde3b2d2990d9b3a899165
|
||||
github.com/containerd/cgroups 318312a373405e5e91134d8063d04d59768a1bff
|
||||
github.com/containerd/console 8375c3424e4d7b114e8a90a4a40c8e1b40d1d4e6 # v1.0.0
|
||||
github.com/containerd/go-runc 7016d3ce2328dd2cb1192b2076ebd565c4e8df0c
|
||||
github.com/containerd/typeurl cd3ce7159eae562a4f60ceff37dada11a939d247 # v1.0.1
|
||||
github.com/containerd/ttrpc 72bb1b21c5b0a4a107f59dd85f6ab58e564b68d6 # v1.0.1
|
||||
github.com/gogo/googleapis 01e0f9cca9b92166042241267ee2a5cdf5cff46c # v1.3.2
|
||||
github.com/cilium/ebpf 60c3aa43f488292fe2ee50fb8b833b383ca8ebbb
|
||||
github.com/cilium/ebpf 1c8d4c9ef7759622653a1d319284a44652333b28
|
||||
|
||||
# cluster
|
||||
github.com/docker/swarmkit 035d564a3686f5e348d861ec0c074ff26854c498
|
||||
github.com/docker/swarmkit d6592ddefd8a5319aadff74c558b816b1a0b2590
|
||||
github.com/gogo/protobuf 5628607bb4c51c3157aacc3a50f0ab707582b805 # v1.3.1
|
||||
github.com/golang/protobuf d23c5127dc24889085f8ccea5c9d560a57a879d8 # v1.3.3
|
||||
github.com/golang/protobuf 84668698ea25b64748563aa20726db66a6b8d299 # v1.3.5
|
||||
github.com/cloudflare/cfssl 5d63dbd981b5c408effbb58c442d54761ff94fbd # 1.3.2
|
||||
github.com/fernet/fernet-go 9eac43b88a5efb8651d24de9b68e87567e029736
|
||||
github.com/google/certificate-transparency-go 37a384cd035e722ea46e55029093e26687138edf # v1.0.20
|
||||
golang.org/x/crypto 2aa609cf4a9d7d1126360de73b55b6002f9e052a
|
||||
golang.org/x/crypto 75b288015ac94e66e3d6715fb68a9b41bf046ec2
|
||||
golang.org/x/time 555d28b269f0569763d25dbe1a237ae74c6bcc82
|
||||
github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
|
||||
github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git
|
||||
|
@ -149,11 +150,11 @@ github.com/coreos/pkg 3ac0863d7acf3bc44daf49afef89
|
|||
code.cloudfoundry.org/clock 02e53af36e6c978af692887ed449b74026d76fec # v1.0.0
|
||||
|
||||
# prometheus
|
||||
github.com/prometheus/client_golang c42bebe5a5cddfc6b28cd639103369d8a75dfa89 # v1.3.0
|
||||
github.com/prometheus/client_golang 6edbbd9e560190e318cdc5b4d3e630b442858380 # v1.6.0
|
||||
github.com/beorn7/perks 37c8de3658fcb183f997c4e13e8337516ab753e6 # v1.0.1
|
||||
github.com/prometheus/client_model d1d2010b5beead3fa1c5f271a5cf626e40b3ad6e # v0.1.0
|
||||
github.com/prometheus/common 287d3e634a1e550c9e463dd7e5a75a422c614505 # v0.7.0
|
||||
github.com/prometheus/procfs 6d489fc7f1d9cd890a250f3ea3431b1744b9623f # v0.0.8
|
||||
github.com/prometheus/client_model 7bc5445566f0fe75b15de23e6b93886e982d7bf9 # v0.2.0
|
||||
github.com/prometheus/common d978bcb1309602d68bb4ba69cf3f8ed900e07308 # v0.9.1
|
||||
github.com/prometheus/procfs 46159f73e74d1cb8dc223deef9b2d049286f46b1 # v0.0.11
|
||||
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c # v1.0.1
|
||||
github.com/pkg/errors 614d223910a179a466c1767a985424175c39b465 # v0.9.1
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus c225b8c3b01faf2899099b768856a9e916e5087b # v1.2.0
|
||||
|
|
|
@ -68,9 +68,21 @@ If you need to use a unit but it is not defined in the package please open a PR
|
|||
|
||||
Package documentation can be found [here](https://godoc.org/github.com/docker/go-metrics).
|
||||
|
||||
## HTTP Metrics
|
||||
|
||||
To instrument a http handler, you can wrap the code like this:
|
||||
|
||||
```go
|
||||
namespace := metrics.NewNamespace("docker_distribution", "http", metrics.Labels{"handler": "your_http_handler_name"})
|
||||
httpMetrics := namespace.NewDefaultHttpMetrics()
|
||||
metrics.Register(namespace)
|
||||
instrumentedHandler = metrics.InstrumentHandler(httpMetrics, unInstrumentedHandler)
|
||||
```
|
||||
Note: The `handler` label must be provided when a new namespace is created.
|
||||
|
||||
## Additional Metrics
|
||||
|
||||
Additional metrics are also defined here that are not avaliable in the prometheus client.
|
||||
Additional metrics are also defined here that are not available in the prometheus client.
|
||||
If you need a custom metrics and it is generic enough to be used by multiple projects, define it here.
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
module github.com/docker/go-metrics
|
||||
|
||||
go 1.11
|
||||
|
||||
require github.com/prometheus/client_golang v1.1.0
|
|
@ -4,10 +4,71 @@ import (
|
|||
"net/http"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
// HTTPHandlerOpts describes a set of configurable options of http metrics
|
||||
type HTTPHandlerOpts struct {
|
||||
DurationBuckets []float64
|
||||
RequestSizeBuckets []float64
|
||||
ResponseSizeBuckets []float64
|
||||
}
|
||||
|
||||
const (
|
||||
InstrumentHandlerResponseSize = iota
|
||||
InstrumentHandlerRequestSize
|
||||
InstrumentHandlerDuration
|
||||
InstrumentHandlerCounter
|
||||
InstrumentHandlerInFlight
|
||||
)
|
||||
|
||||
type HTTPMetric struct {
|
||||
prometheus.Collector
|
||||
handlerType int
|
||||
}
|
||||
|
||||
var (
|
||||
defaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 60}
|
||||
defaultRequestSizeBuckets = prometheus.ExponentialBuckets(1024, 2, 22) //1K to 4G
|
||||
defaultResponseSizeBuckets = defaultRequestSizeBuckets
|
||||
)
|
||||
|
||||
// Handler returns the global http.Handler that provides the prometheus
|
||||
// metrics format on GET requests
|
||||
// metrics format on GET requests. This handler is no longer instrumented.
|
||||
func Handler() http.Handler {
|
||||
return prometheus.Handler()
|
||||
return promhttp.Handler()
|
||||
}
|
||||
|
||||
func InstrumentHandler(metrics []*HTTPMetric, handler http.Handler) http.HandlerFunc {
|
||||
return InstrumentHandlerFunc(metrics, handler.ServeHTTP)
|
||||
}
|
||||
|
||||
func InstrumentHandlerFunc(metrics []*HTTPMetric, handlerFunc http.HandlerFunc) http.HandlerFunc {
|
||||
var handler http.Handler
|
||||
handler = http.HandlerFunc(handlerFunc)
|
||||
for _, metric := range metrics {
|
||||
switch metric.handlerType {
|
||||
case InstrumentHandlerResponseSize:
|
||||
if collector, ok := metric.Collector.(prometheus.ObserverVec); ok {
|
||||
handler = promhttp.InstrumentHandlerResponseSize(collector, handler)
|
||||
}
|
||||
case InstrumentHandlerRequestSize:
|
||||
if collector, ok := metric.Collector.(prometheus.ObserverVec); ok {
|
||||
handler = promhttp.InstrumentHandlerRequestSize(collector, handler)
|
||||
}
|
||||
case InstrumentHandlerDuration:
|
||||
if collector, ok := metric.Collector.(prometheus.ObserverVec); ok {
|
||||
handler = promhttp.InstrumentHandlerDuration(collector, handler)
|
||||
}
|
||||
case InstrumentHandlerCounter:
|
||||
if collector, ok := metric.Collector.(*prometheus.CounterVec); ok {
|
||||
handler = promhttp.InstrumentHandlerCounter(collector, handler)
|
||||
}
|
||||
case InstrumentHandlerInFlight:
|
||||
if collector, ok := metric.Collector.(prometheus.Gauge); ok {
|
||||
handler = promhttp.InstrumentHandlerInFlight(collector, handler)
|
||||
}
|
||||
}
|
||||
}
|
||||
return handler.ServeHTTP
|
||||
}
|
||||
|
|
|
@ -179,3 +179,137 @@ func makeName(name string, unit Unit) string {
|
|||
|
||||
return fmt.Sprintf("%s_%s", name, unit)
|
||||
}
|
||||
|
||||
func (n *Namespace) NewDefaultHttpMetrics(handlerName string) []*HTTPMetric {
|
||||
return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{
|
||||
DurationBuckets: defaultDurationBuckets,
|
||||
RequestSizeBuckets: defaultResponseSizeBuckets,
|
||||
ResponseSizeBuckets: defaultResponseSizeBuckets,
|
||||
})
|
||||
}
|
||||
|
||||
func (n *Namespace) NewHttpMetrics(handlerName string, durationBuckets, requestSizeBuckets, responseSizeBuckets []float64) []*HTTPMetric {
|
||||
return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{
|
||||
DurationBuckets: durationBuckets,
|
||||
RequestSizeBuckets: requestSizeBuckets,
|
||||
ResponseSizeBuckets: responseSizeBuckets,
|
||||
})
|
||||
}
|
||||
|
||||
func (n *Namespace) NewHttpMetricsWithOpts(handlerName string, opts HTTPHandlerOpts) []*HTTPMetric {
|
||||
var httpMetrics []*HTTPMetric
|
||||
inFlightMetric := n.NewInFlightGaugeMetric(handlerName)
|
||||
requestTotalMetric := n.NewRequestTotalMetric(handlerName)
|
||||
requestDurationMetric := n.NewRequestDurationMetric(handlerName, opts.DurationBuckets)
|
||||
requestSizeMetric := n.NewRequestSizeMetric(handlerName, opts.RequestSizeBuckets)
|
||||
responseSizeMetric := n.NewResponseSizeMetric(handlerName, opts.ResponseSizeBuckets)
|
||||
httpMetrics = append(httpMetrics, inFlightMetric, requestDurationMetric, requestTotalMetric, requestSizeMetric, responseSizeMetric)
|
||||
return httpMetrics
|
||||
}
|
||||
|
||||
func (n *Namespace) NewInFlightGaugeMetric(handlerName string) *HTTPMetric {
|
||||
labels := prometheus.Labels(n.labels)
|
||||
labels["handler"] = handlerName
|
||||
metric := prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: n.name,
|
||||
Subsystem: n.subsystem,
|
||||
Name: "in_flight_requests",
|
||||
Help: "The in-flight HTTP requests",
|
||||
ConstLabels: prometheus.Labels(labels),
|
||||
})
|
||||
httpMetric := &HTTPMetric{
|
||||
Collector: metric,
|
||||
handlerType: InstrumentHandlerInFlight,
|
||||
}
|
||||
n.Add(httpMetric)
|
||||
return httpMetric
|
||||
}
|
||||
|
||||
func (n *Namespace) NewRequestTotalMetric(handlerName string) *HTTPMetric {
|
||||
labels := prometheus.Labels(n.labels)
|
||||
labels["handler"] = handlerName
|
||||
metric := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: n.name,
|
||||
Subsystem: n.subsystem,
|
||||
Name: "requests_total",
|
||||
Help: "Total number of HTTP requests made.",
|
||||
ConstLabels: prometheus.Labels(labels),
|
||||
},
|
||||
[]string{"code", "method"},
|
||||
)
|
||||
httpMetric := &HTTPMetric{
|
||||
Collector: metric,
|
||||
handlerType: InstrumentHandlerCounter,
|
||||
}
|
||||
n.Add(httpMetric)
|
||||
return httpMetric
|
||||
}
|
||||
func (n *Namespace) NewRequestDurationMetric(handlerName string, buckets []float64) *HTTPMetric {
|
||||
if len(buckets) == 0 {
|
||||
panic("DurationBuckets must be provided")
|
||||
}
|
||||
labels := prometheus.Labels(n.labels)
|
||||
labels["handler"] = handlerName
|
||||
opts := prometheus.HistogramOpts{
|
||||
Namespace: n.name,
|
||||
Subsystem: n.subsystem,
|
||||
Name: "request_duration_seconds",
|
||||
Help: "The HTTP request latencies in seconds.",
|
||||
Buckets: buckets,
|
||||
ConstLabels: prometheus.Labels(labels),
|
||||
}
|
||||
metric := prometheus.NewHistogramVec(opts, []string{"method"})
|
||||
httpMetric := &HTTPMetric{
|
||||
Collector: metric,
|
||||
handlerType: InstrumentHandlerDuration,
|
||||
}
|
||||
n.Add(httpMetric)
|
||||
return httpMetric
|
||||
}
|
||||
|
||||
func (n *Namespace) NewRequestSizeMetric(handlerName string, buckets []float64) *HTTPMetric {
|
||||
if len(buckets) == 0 {
|
||||
panic("RequestSizeBuckets must be provided")
|
||||
}
|
||||
labels := prometheus.Labels(n.labels)
|
||||
labels["handler"] = handlerName
|
||||
opts := prometheus.HistogramOpts{
|
||||
Namespace: n.name,
|
||||
Subsystem: n.subsystem,
|
||||
Name: "request_size_bytes",
|
||||
Help: "The HTTP request sizes in bytes.",
|
||||
Buckets: buckets,
|
||||
ConstLabels: prometheus.Labels(labels),
|
||||
}
|
||||
metric := prometheus.NewHistogramVec(opts, []string{})
|
||||
httpMetric := &HTTPMetric{
|
||||
Collector: metric,
|
||||
handlerType: InstrumentHandlerRequestSize,
|
||||
}
|
||||
n.Add(httpMetric)
|
||||
return httpMetric
|
||||
}
|
||||
|
||||
func (n *Namespace) NewResponseSizeMetric(handlerName string, buckets []float64) *HTTPMetric {
|
||||
if len(buckets) == 0 {
|
||||
panic("ResponseSizeBuckets must be provided")
|
||||
}
|
||||
labels := prometheus.Labels(n.labels)
|
||||
labels["handler"] = handlerName
|
||||
opts := prometheus.HistogramOpts{
|
||||
Namespace: n.name,
|
||||
Subsystem: n.subsystem,
|
||||
Name: "response_size_bytes",
|
||||
Help: "The HTTP response sizes in bytes.",
|
||||
Buckets: buckets,
|
||||
ConstLabels: prometheus.Labels(labels),
|
||||
}
|
||||
metrics := prometheus.NewHistogramVec(opts, []string{})
|
||||
httpMetric := &HTTPMetric{
|
||||
Collector: metrics,
|
||||
handlerType: InstrumentHandlerResponseSize,
|
||||
}
|
||||
n.Add(httpMetric)
|
||||
return httpMetric
|
||||
}
|
||||
|
|
|
@ -28,15 +28,27 @@ type Timer interface {
|
|||
|
||||
// LabeledTimer is a timer that must have label values populated before use.
|
||||
type LabeledTimer interface {
|
||||
WithValues(labels ...string) Timer
|
||||
WithValues(labels ...string) *labeledTimerObserver
|
||||
}
|
||||
|
||||
type labeledTimer struct {
|
||||
m *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
func (lt *labeledTimer) WithValues(labels ...string) Timer {
|
||||
return &timer{m: lt.m.WithLabelValues(labels...)}
|
||||
type labeledTimerObserver struct {
|
||||
m prometheus.Observer
|
||||
}
|
||||
|
||||
func (lbo *labeledTimerObserver) Update(duration time.Duration) {
|
||||
lbo.m.Observe(duration.Seconds())
|
||||
}
|
||||
|
||||
func (lbo *labeledTimerObserver) UpdateSince(since time.Time) {
|
||||
lbo.m.Observe(time.Since(since).Seconds())
|
||||
}
|
||||
|
||||
func (lt *labeledTimer) WithValues(labels ...string) *labeledTimerObserver {
|
||||
return &labeledTimerObserver{m: lt.m.WithLabelValues(labels...)}
|
||||
}
|
||||
|
||||
func (lt *labeledTimer) Describe(c chan<- *prometheus.Desc) {
|
||||
|
@ -48,7 +60,7 @@ func (lt *labeledTimer) Collect(c chan<- prometheus.Metric) {
|
|||
}
|
||||
|
||||
type timer struct {
|
||||
m prometheus.Histogram
|
||||
m prometheus.Observer
|
||||
}
|
||||
|
||||
func (t *timer) Update(duration time.Duration) {
|
||||
|
@ -60,9 +72,14 @@ func (t *timer) UpdateSince(since time.Time) {
|
|||
}
|
||||
|
||||
func (t *timer) Describe(c chan<- *prometheus.Desc) {
|
||||
t.m.Describe(c)
|
||||
c <- t.m.(prometheus.Metric).Desc()
|
||||
}
|
||||
|
||||
func (t *timer) Collect(c chan<- prometheus.Metric) {
|
||||
t.m.Collect(c)
|
||||
// Are there any observers that don't implement Collector? It is really
|
||||
// unclear what the point of the upstream change was, but we'll let this
|
||||
// panic if we get an observer that doesn't implement collector. In this
|
||||
// case, we should almost always see metricVec objects, so this should
|
||||
// never panic.
|
||||
t.m.(prometheus.Collector).Collect(c)
|
||||
}
|
||||
|
|
|
@ -1021,8 +1021,11 @@ type ContainerSpec struct {
|
|||
Sysctls map[string]string `protobuf:"bytes,26,rep,name=sysctls,proto3" json:"sysctls,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// CapabilityAdd sets the list of capabilities to add to the default capability list
|
||||
CapabilityAdd []string `protobuf:"bytes,27,rep,name=capability_add,json=capabilityAdd,proto3" json:"capability_add,omitempty"`
|
||||
// CapabilityAdd sets the list of capabilities to drop from the default capability list
|
||||
// CapabilityDrop sets the list of capabilities to drop from the default capability list
|
||||
CapabilityDrop []string `protobuf:"bytes,28,rep,name=capability_drop,json=capabilityDrop,proto3" json:"capability_drop,omitempty"`
|
||||
// Ulimits defines the list of ulimits to set in the container. This option
|
||||
// is equivalent to passing --ulimit to docker run.
|
||||
Ulimits []*ContainerSpec_Ulimit `protobuf:"bytes,29,rep,name=ulimits,proto3" json:"ulimits,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ContainerSpec) Reset() { *m = ContainerSpec{} }
|
||||
|
@ -1143,6 +1146,44 @@ func (m *ContainerSpec_DNSConfig) XXX_DiscardUnknown() {
|
|||
|
||||
var xxx_messageInfo_ContainerSpec_DNSConfig proto.InternalMessageInfo
|
||||
|
||||
type ContainerSpec_Ulimit struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Soft int64 `protobuf:"varint,2,opt,name=soft,proto3" json:"soft,omitempty"`
|
||||
Hard int64 `protobuf:"varint,3,opt,name=hard,proto3" json:"hard,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ContainerSpec_Ulimit) Reset() { *m = ContainerSpec_Ulimit{} }
|
||||
func (*ContainerSpec_Ulimit) ProtoMessage() {}
|
||||
func (*ContainerSpec_Ulimit) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6589acc608f7d4fd, []int{10, 4}
|
||||
}
|
||||
func (m *ContainerSpec_Ulimit) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ContainerSpec_Ulimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ContainerSpec_Ulimit.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ContainerSpec_Ulimit) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ContainerSpec_Ulimit.Merge(m, src)
|
||||
}
|
||||
func (m *ContainerSpec_Ulimit) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ContainerSpec_Ulimit) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ContainerSpec_Ulimit.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ContainerSpec_Ulimit proto.InternalMessageInfo
|
||||
|
||||
// EndpointSpec defines the properties that can be configured to
|
||||
// access and loadbalance the service.
|
||||
type EndpointSpec struct {
|
||||
|
@ -1491,6 +1532,7 @@ func init() {
|
|||
proto.RegisterMapType((map[string]string)(nil), "docker.swarmkit.v1.ContainerSpec.SysctlsEntry")
|
||||
proto.RegisterType((*ContainerSpec_PullOptions)(nil), "docker.swarmkit.v1.ContainerSpec.PullOptions")
|
||||
proto.RegisterType((*ContainerSpec_DNSConfig)(nil), "docker.swarmkit.v1.ContainerSpec.DNSConfig")
|
||||
proto.RegisterType((*ContainerSpec_Ulimit)(nil), "docker.swarmkit.v1.ContainerSpec.Ulimit")
|
||||
proto.RegisterType((*EndpointSpec)(nil), "docker.swarmkit.v1.EndpointSpec")
|
||||
proto.RegisterType((*NetworkSpec)(nil), "docker.swarmkit.v1.NetworkSpec")
|
||||
proto.RegisterType((*ClusterSpec)(nil), "docker.swarmkit.v1.ClusterSpec")
|
||||
|
@ -1503,152 +1545,155 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptor_6589acc608f7d4fd = []byte{
|
||||
// 2311 bytes of a gzipped FileDescriptorProto
|
||||
// 2363 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x4d, 0x73, 0x1b, 0xc7,
|
||||
0xd1, 0x06, 0x48, 0x10, 0x1f, 0xbd, 0x00, 0x05, 0x8e, 0x65, 0x7b, 0x09, 0xc9, 0x20, 0x0c, 0xcb,
|
||||
0xd1, 0x06, 0x48, 0x10, 0x1f, 0xbd, 0x00, 0x05, 0x8e, 0x65, 0x7b, 0x09, 0x49, 0x20, 0x0c, 0xcb,
|
||||
0x36, 0x6d, 0xd7, 0x0b, 0xd6, 0xcb, 0xb8, 0x1c, 0x7f, 0xc4, 0x49, 0x00, 0x02, 0x96, 0x60, 0x49,
|
||||
0x14, 0x6a, 0x40, 0x2b, 0x51, 0x55, 0xaa, 0x50, 0x83, 0xdd, 0x21, 0xb0, 0xe1, 0x62, 0x67, 0x33,
|
||||
0x3b, 0xa0, 0x8d, 0x5b, 0x8e, 0x2e, 0xe6, 0x92, 0x3f, 0xc0, 0x53, 0x2a, 0xa7, 0x5c, 0x92, 0x4b,
|
||||
0x7e, 0x83, 0x8f, 0x3e, 0x3a, 0x17, 0x56, 0x4c, 0xff, 0x84, 0xdc, 0x72, 0x49, 0x6a, 0x66, 0x67,
|
||||
0x17, 0x0b, 0x0a, 0x10, 0x95, 0x8a, 0x0e, 0xb9, 0xcd, 0xf4, 0x3e, 0x4f, 0x4f, 0xf7, 0x4c, 0x77,
|
||||
0x4f, 0xcf, 0xc2, 0xbb, 0x23, 0x47, 0x8c, 0xa7, 0xc3, 0x86, 0xc5, 0x26, 0x7b, 0x36, 0xb3, 0x4e,
|
||||
0x14, 0x6a, 0x40, 0x29, 0x51, 0x55, 0xaa, 0x50, 0x83, 0xdd, 0x21, 0xb0, 0xe1, 0x62, 0x67, 0x33,
|
||||
0x3b, 0xa0, 0x8d, 0x5b, 0x8e, 0x2e, 0xe5, 0x92, 0x3f, 0xc0, 0x53, 0x2a, 0xa7, 0x5c, 0x92, 0x7f,
|
||||
0x90, 0xa3, 0x8f, 0x3e, 0x3a, 0x17, 0x56, 0x4c, 0xff, 0x84, 0xdc, 0x72, 0x49, 0x6a, 0x66, 0x67,
|
||||
0x17, 0x0b, 0x0a, 0x10, 0x95, 0x8a, 0x0e, 0xb9, 0xcd, 0xf4, 0x3e, 0x4f, 0x4f, 0xcf, 0x4c, 0x77,
|
||||
0x4f, 0xf7, 0xc2, 0x7b, 0x23, 0x47, 0x8c, 0xa7, 0xc3, 0x86, 0xc5, 0x26, 0x7b, 0x36, 0xb3, 0x4e,
|
||||
0x28, 0xdf, 0x0b, 0xbe, 0x24, 0x7c, 0x72, 0xe2, 0x88, 0x3d, 0xe2, 0x3b, 0x7b, 0x81, 0x4f, 0xad,
|
||||
0xa0, 0xe1, 0x73, 0x26, 0x18, 0x42, 0x21, 0xa0, 0x11, 0x01, 0x1a, 0xa7, 0xff, 0x5f, 0xb9, 0x8e,
|
||||
0x2f, 0x66, 0x3e, 0xd5, 0xfc, 0xca, 0xcd, 0x11, 0x1b, 0x31, 0x35, 0xdc, 0x93, 0x23, 0x2d, 0xad,
|
||||
0xa0, 0xe1, 0x73, 0x26, 0x18, 0x42, 0x21, 0xa0, 0x11, 0x01, 0x1a, 0xa7, 0xff, 0x5f, 0xb9, 0x8a,
|
||||
0x2f, 0x66, 0x3e, 0xd5, 0xfc, 0xca, 0xf5, 0x11, 0x1b, 0x31, 0x35, 0xdc, 0x93, 0x23, 0x2d, 0xad,
|
||||
0x8e, 0x18, 0x1b, 0xb9, 0x74, 0x4f, 0xcd, 0x86, 0xd3, 0xe3, 0x3d, 0x7b, 0xca, 0x89, 0x70, 0x98,
|
||||
0xa7, 0xbf, 0x6f, 0x5f, 0xfd, 0x4e, 0xbc, 0xd9, 0x2a, 0xea, 0x97, 0x9c, 0xf8, 0x3e, 0xe5, 0x7a,
|
||||
0xc1, 0xfa, 0x79, 0x06, 0xf2, 0x87, 0xcc, 0xa6, 0x7d, 0x9f, 0x5a, 0xe8, 0x2e, 0x18, 0xc4, 0xf3,
|
||||
0x98, 0x50, 0xba, 0x03, 0x33, 0x5d, 0x4b, 0xef, 0x1a, 0xfb, 0x3b, 0x8d, 0xa7, 0x7d, 0x6a, 0x34,
|
||||
0xe7, 0xb0, 0x56, 0xe6, 0x9b, 0x8b, 0x9d, 0x14, 0x4e, 0x32, 0xd1, 0xcf, 0xa0, 0x68, 0xd3, 0xc0,
|
||||
0xe1, 0xd4, 0x1e, 0x70, 0xe6, 0x52, 0x73, 0xad, 0x96, 0xde, 0xdd, 0xdc, 0xbf, 0xbd, 0x4c, 0x93,
|
||||
0x5c, 0x1c, 0x33, 0x97, 0x62, 0x43, 0x33, 0xe4, 0x04, 0xdd, 0x05, 0x98, 0xd0, 0xc9, 0x90, 0xf2,
|
||||
0x60, 0xec, 0xf8, 0xe6, 0xba, 0xa2, 0xbf, 0xbd, 0x8a, 0x2e, 0x6d, 0x6f, 0x3c, 0x8c, 0xe1, 0x38,
|
||||
0x41, 0x45, 0x0f, 0xa1, 0x48, 0x4e, 0x89, 0xe3, 0x92, 0xa1, 0xe3, 0x3a, 0x62, 0x66, 0x66, 0x94,
|
||||
0xaa, 0x77, 0x9e, 0xa9, 0xaa, 0x99, 0x20, 0xe0, 0x05, 0x7a, 0xdd, 0x06, 0x98, 0x2f, 0x84, 0xde,
|
||||
0x82, 0x5c, 0xaf, 0x73, 0xd8, 0xee, 0x1e, 0xde, 0x2d, 0xa7, 0x2a, 0xdb, 0x67, 0xe7, 0xb5, 0x97,
|
||||
0xa5, 0x8e, 0x39, 0xa0, 0x47, 0x3d, 0xdb, 0xf1, 0x46, 0x68, 0x17, 0xf2, 0xcd, 0x83, 0x83, 0x4e,
|
||||
0xef, 0xa8, 0xd3, 0x2e, 0xa7, 0x2b, 0x95, 0xb3, 0xf3, 0xda, 0x2b, 0x8b, 0xc0, 0xa6, 0x65, 0x51,
|
||||
0x5f, 0x50, 0xbb, 0x92, 0xf9, 0xfa, 0x0f, 0xd5, 0x54, 0xfd, 0xeb, 0x34, 0x14, 0x93, 0x46, 0xa0,
|
||||
0xb7, 0x20, 0xdb, 0x3c, 0x38, 0xea, 0x3e, 0xee, 0x94, 0x53, 0x73, 0x7a, 0x12, 0xd1, 0xb4, 0x84,
|
||||
0x73, 0x4a, 0xd1, 0x1d, 0xd8, 0xe8, 0x35, 0xbf, 0xe8, 0x77, 0xca, 0xe9, 0xb9, 0x39, 0x49, 0x58,
|
||||
0x8f, 0x4c, 0x03, 0x85, 0x6a, 0xe3, 0x66, 0xf7, 0xb0, 0xbc, 0xb6, 0x1c, 0xd5, 0xe6, 0xc4, 0xf1,
|
||||
0xb4, 0x29, 0x7f, 0xda, 0x00, 0xa3, 0x4f, 0xf9, 0xa9, 0x63, 0xbd, 0xe0, 0x10, 0xf9, 0x00, 0x32,
|
||||
0x82, 0x04, 0x27, 0x2a, 0x34, 0x8c, 0xe5, 0xa1, 0x71, 0x44, 0x82, 0x13, 0xb9, 0xa8, 0xa6, 0x2b,
|
||||
0xbc, 0x8c, 0x0c, 0x4e, 0x7d, 0xd7, 0xb1, 0x88, 0xa0, 0xb6, 0x8a, 0x0c, 0x63, 0xff, 0xcd, 0x65,
|
||||
0x6c, 0x1c, 0xa3, 0xb4, 0xfd, 0xf7, 0x52, 0x38, 0x41, 0x45, 0x9f, 0x40, 0x76, 0xe4, 0xb2, 0x21,
|
||||
0x71, 0x55, 0x4c, 0x18, 0xfb, 0xaf, 0x2f, 0x53, 0x72, 0x57, 0x21, 0xe6, 0x0a, 0x34, 0x05, 0x7d,
|
||||
0x0e, 0x9b, 0x73, 0x55, 0x83, 0x5f, 0xb3, 0xa1, 0x09, 0xab, 0x95, 0xcc, 0x2d, 0xf9, 0x9c, 0x0d,
|
||||
0xef, 0xa5, 0x70, 0x89, 0x27, 0x05, 0xe8, 0xa7, 0x00, 0xa1, 0x56, 0xa5, 0xc7, 0x50, 0x7a, 0x5e,
|
||||
0x5b, 0x6d, 0x4c, 0xa8, 0xa3, 0x30, 0x8a, 0x26, 0xe8, 0x43, 0xc8, 0x4e, 0x7d, 0x9b, 0x08, 0x6a,
|
||||
0x66, 0x15, 0xb7, 0xb6, 0x8c, 0xfb, 0x85, 0x42, 0x1c, 0x30, 0xef, 0xd8, 0x19, 0x61, 0x8d, 0x47,
|
||||
0x3f, 0x81, 0x3c, 0x67, 0xae, 0x3b, 0x24, 0xd6, 0x89, 0x59, 0x78, 0x4e, 0x6e, 0xcc, 0x40, 0xf7,
|
||||
0x21, 0xef, 0x51, 0xf1, 0x25, 0xe3, 0x27, 0x81, 0x99, 0xab, 0xad, 0xef, 0x1a, 0xfb, 0xef, 0x2d,
|
||||
0x4d, 0xab, 0x10, 0xd3, 0x14, 0x82, 0x58, 0xe3, 0x09, 0xf5, 0x44, 0xa8, 0xa8, 0xb5, 0x66, 0xa6,
|
||||
0xa7, 0xbf, 0x6f, 0x5f, 0xfe, 0x4e, 0xbc, 0xd9, 0x2a, 0xea, 0x97, 0x9c, 0xf8, 0x3e, 0xe5, 0x7a,
|
||||
0xc1, 0xfa, 0x59, 0x06, 0xf2, 0x87, 0xcc, 0xa6, 0x7d, 0x9f, 0x5a, 0xe8, 0x0e, 0x18, 0xc4, 0xf3,
|
||||
0x98, 0x50, 0xba, 0x03, 0x33, 0x5d, 0x4b, 0xef, 0x1a, 0xfb, 0x3b, 0x8d, 0x67, 0xf7, 0xd4, 0x68,
|
||||
0xce, 0x61, 0xad, 0xcc, 0x37, 0xe7, 0x3b, 0x29, 0x9c, 0x64, 0xa2, 0x9f, 0x41, 0xd1, 0xa6, 0x81,
|
||||
0xc3, 0xa9, 0x3d, 0xe0, 0xcc, 0xa5, 0xe6, 0x5a, 0x2d, 0xbd, 0xbb, 0xb9, 0x7f, 0x73, 0x99, 0x26,
|
||||
0xb9, 0x38, 0x66, 0x2e, 0xc5, 0x86, 0x66, 0xc8, 0x09, 0xba, 0x03, 0x30, 0xa1, 0x93, 0x21, 0xe5,
|
||||
0xc1, 0xd8, 0xf1, 0xcd, 0x75, 0x45, 0x7f, 0x67, 0x15, 0x5d, 0xda, 0xde, 0x78, 0x10, 0xc3, 0x71,
|
||||
0x82, 0x8a, 0x1e, 0x40, 0x91, 0x9c, 0x12, 0xc7, 0x25, 0x43, 0xc7, 0x75, 0xc4, 0xcc, 0xcc, 0x28,
|
||||
0x55, 0xef, 0x3e, 0x57, 0x55, 0x33, 0x41, 0xc0, 0x0b, 0xf4, 0xba, 0x0d, 0x30, 0x5f, 0x08, 0xbd,
|
||||
0x0d, 0xb9, 0x5e, 0xe7, 0xb0, 0xdd, 0x3d, 0xbc, 0x53, 0x4e, 0x55, 0xb6, 0x9f, 0x9e, 0xd5, 0x5e,
|
||||
0x95, 0x3a, 0xe6, 0x80, 0x1e, 0xf5, 0x6c, 0xc7, 0x1b, 0xa1, 0x5d, 0xc8, 0x37, 0x0f, 0x0e, 0x3a,
|
||||
0xbd, 0xa3, 0x4e, 0xbb, 0x9c, 0xae, 0x54, 0x9e, 0x9e, 0xd5, 0x5e, 0x5b, 0x04, 0x36, 0x2d, 0x8b,
|
||||
0xfa, 0x82, 0xda, 0x95, 0xcc, 0xd7, 0x7f, 0xa8, 0xa6, 0xea, 0x5f, 0xa7, 0xa1, 0x98, 0x34, 0x02,
|
||||
0xbd, 0x0d, 0xd9, 0xe6, 0xc1, 0x51, 0xf7, 0x71, 0xa7, 0x9c, 0x9a, 0xd3, 0x93, 0x88, 0xa6, 0x25,
|
||||
0x9c, 0x53, 0x8a, 0x6e, 0xc3, 0x46, 0xaf, 0xf9, 0xa8, 0xdf, 0x29, 0xa7, 0xe7, 0xe6, 0x24, 0x61,
|
||||
0x3d, 0x32, 0x0d, 0x14, 0xaa, 0x8d, 0x9b, 0xdd, 0xc3, 0xf2, 0xda, 0x72, 0x54, 0x9b, 0x13, 0xc7,
|
||||
0xd3, 0xa6, 0xfc, 0x69, 0x03, 0x8c, 0x3e, 0xe5, 0xa7, 0x8e, 0xf5, 0x92, 0x5d, 0xe4, 0x43, 0xc8,
|
||||
0x08, 0x12, 0x9c, 0x28, 0xd7, 0x30, 0x96, 0xbb, 0xc6, 0x11, 0x09, 0x4e, 0xe4, 0xa2, 0x9a, 0xae,
|
||||
0xf0, 0xd2, 0x33, 0x38, 0xf5, 0x5d, 0xc7, 0x22, 0x82, 0xda, 0xca, 0x33, 0x8c, 0xfd, 0xb7, 0x96,
|
||||
0xb1, 0x71, 0x8c, 0xd2, 0xf6, 0xdf, 0x4d, 0xe1, 0x04, 0x15, 0x7d, 0x0a, 0xd9, 0x91, 0xcb, 0x86,
|
||||
0xc4, 0x55, 0x3e, 0x61, 0xec, 0xbf, 0xb1, 0x4c, 0xc9, 0x1d, 0x85, 0x98, 0x2b, 0xd0, 0x14, 0xf4,
|
||||
0x05, 0x6c, 0xce, 0x55, 0x0d, 0x7e, 0xcd, 0x86, 0x26, 0xac, 0x56, 0x32, 0xb7, 0xe4, 0x0b, 0x36,
|
||||
0xbc, 0x9b, 0xc2, 0x25, 0x9e, 0x14, 0xa0, 0x9f, 0x02, 0x84, 0x5a, 0x95, 0x1e, 0x43, 0xe9, 0xb9,
|
||||
0xb5, 0xda, 0x98, 0x50, 0x47, 0x61, 0x14, 0x4d, 0xd0, 0x47, 0x90, 0x9d, 0xfa, 0x36, 0x11, 0xd4,
|
||||
0xcc, 0x2a, 0x6e, 0x6d, 0x19, 0xf7, 0x91, 0x42, 0x1c, 0x30, 0xef, 0xd8, 0x19, 0x61, 0x8d, 0x47,
|
||||
0x3f, 0x81, 0x3c, 0x67, 0xae, 0x3b, 0x24, 0xd6, 0x89, 0x59, 0x78, 0x41, 0x6e, 0xcc, 0x40, 0xf7,
|
||||
0x20, 0xef, 0x51, 0xf1, 0x25, 0xe3, 0x27, 0x81, 0x99, 0xab, 0xad, 0xef, 0x1a, 0xfb, 0xef, 0x2f,
|
||||
0x0d, 0xab, 0x10, 0xd3, 0x14, 0x82, 0x58, 0xe3, 0x09, 0xf5, 0x44, 0xa8, 0xa8, 0xb5, 0x66, 0xa6,
|
||||
0x71, 0xac, 0x40, 0x9a, 0x42, 0x3d, 0xdb, 0x67, 0x8e, 0x27, 0xcc, 0xfc, 0x6a, 0x53, 0x3a, 0x1a,
|
||||
0x23, 0xc3, 0x02, 0xc7, 0x8c, 0x56, 0x16, 0x32, 0x13, 0x66, 0xd3, 0xfa, 0x1e, 0x6c, 0x3d, 0x75,
|
||||
0xec, 0xa8, 0x02, 0x79, 0xbd, 0xe1, 0x61, 0xbc, 0x66, 0x70, 0x3c, 0xaf, 0xdf, 0x80, 0xd2, 0xc2,
|
||||
0x11, 0xd7, 0x2d, 0x28, 0x2d, 0x1c, 0x17, 0x7a, 0x13, 0x36, 0x27, 0xe4, 0xab, 0x81, 0xc5, 0x3c,
|
||||
0x6b, 0xca, 0x39, 0xf5, 0x84, 0xd6, 0x51, 0x9a, 0x90, 0xaf, 0x0e, 0x62, 0x21, 0x7a, 0x0f, 0xb6,
|
||||
0x04, 0x13, 0xc4, 0x1d, 0x58, 0x6c, 0xe2, 0xbb, 0x34, 0xcc, 0x8e, 0x35, 0x85, 0x2c, 0xab, 0x0f,
|
||||
0x07, 0x73, 0x79, 0xdd, 0x80, 0x42, 0x7c, 0x96, 0xf5, 0x3f, 0x6f, 0x40, 0x3e, 0x8a, 0x74, 0x74,
|
||||
0x1f, 0x80, 0xc4, 0x1b, 0xa5, 0x37, 0xe2, 0x9d, 0xe7, 0xda, 0x55, 0x49, 0x97, 0x11, 0x3e, 0xa7,
|
||||
0xa3, 0x26, 0x14, 0x2c, 0xe6, 0x09, 0xe2, 0x78, 0x94, 0xeb, 0x4c, 0x5d, 0x1a, 0x9f, 0x07, 0x11,
|
||||
0x48, 0xeb, 0x98, 0xb3, 0x50, 0x0b, 0x72, 0x23, 0xea, 0x51, 0xee, 0x58, 0x3a, 0xc0, 0xdf, 0x5a,
|
||||
0x1a, 0x98, 0x21, 0x04, 0x4f, 0x3d, 0xe1, 0x4c, 0xa8, 0xd6, 0x12, 0x11, 0xd1, 0x67, 0x50, 0xe0,
|
||||
0x34, 0x60, 0x53, 0x6e, 0xd1, 0x40, 0xa7, 0xfb, 0xee, 0xf2, 0x34, 0x09, 0x41, 0x98, 0xfe, 0x66,
|
||||
0xea, 0x70, 0x2a, 0x5d, 0x08, 0xf0, 0x9c, 0x8a, 0x3e, 0x81, 0x1c, 0xa7, 0x81, 0x20, 0x5c, 0x3c,
|
||||
0x2b, 0x63, 0x71, 0x08, 0xe9, 0x31, 0xd7, 0xb1, 0x66, 0x38, 0x62, 0xa0, 0x4f, 0xa0, 0xe0, 0xbb,
|
||||
0xc4, 0x52, 0x5a, 0xcd, 0x8d, 0xd5, 0x39, 0xd6, 0x8b, 0x40, 0x78, 0x8e, 0x47, 0x1f, 0x01, 0xb8,
|
||||
0x6c, 0x34, 0xb0, 0xb9, 0x73, 0x4a, 0xb9, 0xce, 0xb2, 0xca, 0x32, 0x76, 0x5b, 0x21, 0x70, 0xc1,
|
||||
0x65, 0xa3, 0x70, 0x88, 0xee, 0xfe, 0x57, 0x49, 0x92, 0x48, 0x90, 0xd7, 0xa1, 0x78, 0xcc, 0xb8,
|
||||
0x45, 0x07, 0x3a, 0xd7, 0x0b, 0x2a, 0xb6, 0x0c, 0x25, 0x0b, 0x13, 0x14, 0xfd, 0x0a, 0x5e, 0x8a,
|
||||
0x76, 0x6b, 0xc0, 0xe9, 0x31, 0xe5, 0xd4, 0x93, 0x5b, 0x6e, 0xa8, 0x65, 0xdf, 0x7c, 0xf6, 0x96,
|
||||
0x6b, 0xb4, 0x2e, 0xb5, 0x88, 0x5f, 0xfd, 0x10, 0xb4, 0x0a, 0x90, 0xe3, 0xe1, 0x01, 0xd7, 0x7f,
|
||||
0x97, 0x96, 0x79, 0x76, 0x05, 0x81, 0xf6, 0xc0, 0x88, 0x97, 0x77, 0x6c, 0x15, 0x70, 0x85, 0xd6,
|
||||
0xe6, 0xe5, 0xc5, 0x0e, 0x44, 0xd8, 0x6e, 0x5b, 0x56, 0x60, 0x3d, 0xb6, 0x51, 0x07, 0x4a, 0x31,
|
||||
0x41, 0x36, 0x41, 0xba, 0x4d, 0xa8, 0x3d, 0xcb, 0xd2, 0xa3, 0x99, 0x4f, 0x71, 0x91, 0x27, 0x66,
|
||||
0xf5, 0x5f, 0x02, 0x7a, 0x3a, 0x00, 0x11, 0x82, 0xcc, 0x89, 0xe3, 0x69, 0x33, 0xb0, 0x1a, 0xa3,
|
||||
0x06, 0xe4, 0x7c, 0x32, 0x73, 0x19, 0xb1, 0x75, 0x1c, 0xde, 0x6c, 0x84, 0xed, 0x51, 0x23, 0x6a,
|
||||
0x8f, 0x1a, 0x4d, 0x6f, 0x86, 0x23, 0x50, 0xfd, 0x3e, 0xbc, 0xbc, 0x34, 0xcf, 0xd0, 0x3e, 0x14,
|
||||
0xe3, 0x1c, 0x99, 0xfb, 0x7a, 0xe3, 0xf2, 0x62, 0xc7, 0x88, 0x93, 0xa9, 0xdb, 0xc6, 0x46, 0x0c,
|
||||
0xea, 0xda, 0xf5, 0xbf, 0x96, 0xa0, 0xb4, 0x90, 0x69, 0xe8, 0x26, 0x6c, 0x38, 0x13, 0x32, 0xa2,
|
||||
0xda, 0xc6, 0x70, 0x82, 0x3a, 0x90, 0x75, 0xc9, 0x90, 0xba, 0x32, 0x57, 0xe4, 0xc1, 0xfd, 0xdf,
|
||||
0xb5, 0x29, 0xdb, 0x78, 0xa0, 0xf0, 0x1d, 0x4f, 0xf0, 0x19, 0xd6, 0x64, 0x64, 0x42, 0xce, 0x62,
|
||||
0x93, 0x09, 0xf1, 0xe4, 0x25, 0xb9, 0xbe, 0x5b, 0xc0, 0xd1, 0x54, 0xee, 0x0c, 0xe1, 0xa3, 0xc0,
|
||||
0xcc, 0x28, 0xb1, 0x1a, 0xcb, 0x1a, 0x39, 0x66, 0x81, 0xf0, 0xc8, 0x84, 0x9a, 0x9b, 0xca, 0x9a,
|
||||
0x78, 0x8e, 0xca, 0xb0, 0x4e, 0xbd, 0x53, 0x73, 0x43, 0xc1, 0xe5, 0x50, 0x4a, 0x6c, 0x27, 0x4c,
|
||||
0x84, 0x02, 0x96, 0x43, 0xa9, 0x73, 0x1a, 0x50, 0x6e, 0xe6, 0xc2, 0xdd, 0x96, 0x63, 0xf4, 0x0a,
|
||||
0x64, 0x47, 0x9c, 0x4d, 0xfd, 0x30, 0x02, 0x0b, 0x58, 0xcf, 0xe4, 0x7d, 0xe7, 0x73, 0xe7, 0xd4,
|
||||
0x71, 0xe9, 0x88, 0x06, 0xe6, 0x2b, 0xea, 0x20, 0xaa, 0x4b, 0x73, 0x31, 0x46, 0xe1, 0x04, 0x03,
|
||||
0x35, 0x20, 0xe3, 0x78, 0x8e, 0x30, 0x5f, 0xd5, 0x79, 0x78, 0xf5, 0x08, 0x5b, 0x8c, 0xb9, 0x8f,
|
||||
0x89, 0x3b, 0xa5, 0x58, 0xe1, 0xd0, 0x36, 0xac, 0x0b, 0x31, 0x33, 0x4b, 0xb5, 0xf4, 0x6e, 0xbe,
|
||||
0x95, 0xbb, 0xbc, 0xd8, 0x59, 0x3f, 0x3a, 0x7a, 0x82, 0xa5, 0x0c, 0xbd, 0x06, 0xc0, 0x7c, 0xea,
|
||||
0x0d, 0x02, 0x61, 0x3b, 0x9e, 0x89, 0x24, 0x02, 0x17, 0xa4, 0xa4, 0x2f, 0x05, 0xe8, 0x96, 0xac,
|
||||
0x5c, 0xc4, 0x1e, 0x30, 0xcf, 0x9d, 0x99, 0x2f, 0xa9, 0xaf, 0x79, 0x29, 0x78, 0xe4, 0xb9, 0x33,
|
||||
0xb4, 0x03, 0x46, 0x20, 0x98, 0x3f, 0x08, 0x9c, 0x91, 0x47, 0x5c, 0xf3, 0xa6, 0xf2, 0x1c, 0xa4,
|
||||
0xa8, 0xaf, 0x24, 0xe8, 0xc7, 0x90, 0x9d, 0xb0, 0xa9, 0x27, 0x02, 0x33, 0xaf, 0x0e, 0x72, 0x7b,
|
||||
0x99, 0x8f, 0x0f, 0x25, 0x42, 0x67, 0x9d, 0x86, 0xa3, 0x0e, 0x6c, 0x29, 0xcd, 0x23, 0x4e, 0x2c,
|
||||
0x3a, 0xf0, 0x29, 0x77, 0x98, 0xad, 0xef, 0xe7, 0xed, 0xa7, 0xbc, 0x6d, 0xeb, 0xa7, 0x00, 0xbe,
|
||||
0x21, 0x39, 0x77, 0x25, 0xa5, 0xa7, 0x18, 0xa8, 0x07, 0x45, 0x7f, 0xea, 0xba, 0x03, 0xe6, 0x87,
|
||||
0xb7, 0x51, 0x58, 0xc0, 0x9f, 0x23, 0x9c, 0x7a, 0x53, 0xd7, 0x7d, 0x14, 0x92, 0xb0, 0xe1, 0xcf,
|
||||
0x27, 0xe8, 0x53, 0xc8, 0x05, 0xd4, 0xe2, 0x54, 0x04, 0x66, 0x51, 0xb9, 0xf4, 0xc6, 0x32, 0x65,
|
||||
0x7d, 0x05, 0x89, 0xeb, 0x02, 0x8e, 0x38, 0x92, 0x6e, 0xa9, 0xb2, 0x16, 0x98, 0x2f, 0xaf, 0xa6,
|
||||
0xeb, 0xca, 0x37, 0xa7, 0x6b, 0x8e, 0x4c, 0x17, 0x19, 0x93, 0x81, 0xb9, 0xa5, 0xc2, 0x29, 0x9c,
|
||||
0xa0, 0x27, 0x00, 0xb6, 0x17, 0x0c, 0x42, 0x90, 0x79, 0x43, 0xf9, 0xf8, 0xde, 0xf5, 0x3e, 0xb6,
|
||||
0x0f, 0xfb, 0xba, 0x0f, 0x29, 0x5d, 0x5e, 0xec, 0x14, 0xe2, 0x29, 0x2e, 0xd8, 0x5e, 0x10, 0x0e,
|
||||
0x51, 0x0b, 0x8c, 0x31, 0x25, 0xae, 0x18, 0x5b, 0x63, 0x6a, 0x9d, 0x98, 0xe5, 0xd5, 0x6d, 0xc9,
|
||||
0x3d, 0x05, 0xd3, 0x1a, 0x92, 0x24, 0xd4, 0x85, 0x82, 0x13, 0x30, 0x57, 0x1d, 0x91, 0x69, 0xaa,
|
||||
0xfa, 0xf6, 0x1c, 0xd6, 0x75, 0x23, 0x0a, 0x9e, 0xb3, 0xd1, 0x6d, 0x28, 0xf8, 0x8e, 0x1d, 0x3c,
|
||||
0x70, 0x26, 0x8e, 0x30, 0xb7, 0x6b, 0xe9, 0xdd, 0x75, 0x3c, 0x17, 0xa0, 0x7b, 0x90, 0x0b, 0x66,
|
||||
0x81, 0x25, 0xdc, 0xc0, 0xac, 0xa8, 0xcd, 0x6d, 0x5c, 0xbf, 0x4c, 0x3f, 0x24, 0x84, 0x85, 0x23,
|
||||
0xa2, 0xcb, 0x8e, 0xc7, 0x22, 0xbe, 0x7e, 0x0b, 0x0c, 0x88, 0x6d, 0x9b, 0xb7, 0xd4, 0x86, 0x97,
|
||||
0xe6, 0xd2, 0xa6, 0x6d, 0xa3, 0xb7, 0xe1, 0x46, 0x02, 0x66, 0x73, 0xe6, 0x9b, 0xb7, 0x15, 0x2e,
|
||||
0xc1, 0x6e, 0x73, 0xe6, 0x57, 0x3e, 0x02, 0x23, 0x51, 0xa0, 0x64, 0xf1, 0x38, 0xa1, 0x33, 0x5d,
|
||||
0xf3, 0xe4, 0x50, 0x1e, 0xec, 0xa9, 0xcc, 0x57, 0x55, 0x94, 0x0b, 0x38, 0x9c, 0x7c, 0xbc, 0xf6,
|
||||
0x61, 0xba, 0xb2, 0x0f, 0x46, 0x22, 0x18, 0xd1, 0x1b, 0xf2, 0xc2, 0x18, 0x39, 0x81, 0xe0, 0xb3,
|
||||
0x01, 0x99, 0x8a, 0xb1, 0xf9, 0x73, 0x45, 0x28, 0x46, 0xc2, 0xe6, 0x54, 0x8c, 0x2b, 0x03, 0x98,
|
||||
0x9f, 0x26, 0xaa, 0x81, 0x21, 0x6b, 0x58, 0x40, 0xf9, 0x29, 0xe5, 0xb2, 0xfd, 0x93, 0x06, 0x26,
|
||||
0x45, 0xb2, 0x4a, 0x05, 0x94, 0x70, 0x6b, 0xac, 0xca, 0x6d, 0x01, 0xeb, 0x99, 0xac, 0x9f, 0x51,
|
||||
0xe2, 0xe8, 0xfa, 0xa9, 0xa7, 0x95, 0x8f, 0xa1, 0x98, 0xdc, 0xb8, 0xff, 0xc4, 0xa1, 0xfa, 0x5f,
|
||||
0xd2, 0x50, 0x88, 0x0f, 0x17, 0xbd, 0x0f, 0x5b, 0xdd, 0xfe, 0xa3, 0x07, 0xcd, 0xa3, 0xee, 0xa3,
|
||||
0xc3, 0x41, 0xbb, 0xf3, 0x59, 0xf3, 0x8b, 0x07, 0x47, 0xe5, 0x54, 0xe5, 0xb5, 0xb3, 0xf3, 0xda,
|
||||
0xf6, 0xfc, 0x1e, 0x89, 0xe0, 0x6d, 0x7a, 0x4c, 0xa6, 0xae, 0x58, 0x64, 0xf5, 0xf0, 0xa3, 0x83,
|
||||
0x4e, 0xbf, 0x5f, 0x4e, 0xaf, 0x62, 0xf5, 0x38, 0xb3, 0x68, 0x10, 0xa0, 0x7d, 0x28, 0xcf, 0x59,
|
||||
0xf7, 0x9e, 0xf4, 0x3a, 0xf8, 0x71, 0x79, 0xad, 0x72, 0xfb, 0xec, 0xbc, 0x66, 0x3e, 0x4d, 0xba,
|
||||
0x37, 0xf3, 0x29, 0x7f, 0xac, 0x9f, 0x80, 0xff, 0x48, 0x43, 0x31, 0xd9, 0x77, 0xa3, 0x83, 0xb0,
|
||||
0xdb, 0x56, 0x1e, 0x6f, 0xee, 0xef, 0x5d, 0xd7, 0xa7, 0xab, 0xbb, 0xdb, 0x9d, 0x4a, 0xbd, 0x0f,
|
||||
0xe5, 0x63, 0x5f, 0x91, 0xd1, 0xfb, 0xb0, 0xe1, 0x33, 0x2e, 0xa2, 0x5b, 0x6e, 0xf9, 0x05, 0xc0,
|
||||
0x78, 0xd4, 0x08, 0x85, 0xe0, 0xfa, 0x18, 0x36, 0x17, 0xb5, 0xa1, 0x3b, 0xb0, 0xfe, 0xb8, 0xdb,
|
||||
0x2b, 0xa7, 0x2a, 0xb7, 0xce, 0xce, 0x6b, 0xaf, 0x2e, 0x7e, 0x7c, 0xec, 0x70, 0x31, 0x25, 0x6e,
|
||||
0xb7, 0x87, 0xde, 0x85, 0x8d, 0xf6, 0x61, 0x1f, 0xe3, 0x72, 0xba, 0xb2, 0x73, 0x76, 0x5e, 0xbb,
|
||||
0xb5, 0x88, 0x93, 0x9f, 0xd8, 0xd4, 0xb3, 0x31, 0x1b, 0xc6, 0x0f, 0xdf, 0x7f, 0xae, 0x81, 0xa1,
|
||||
0x2f, 0xff, 0x17, 0xfd, 0x6f, 0xa4, 0x14, 0x36, 0x92, 0x51, 0xcd, 0x5a, 0xbb, 0xb6, 0x9f, 0x2c,
|
||||
0x86, 0x04, 0x1d, 0xd3, 0xaf, 0x43, 0xd1, 0xf1, 0x4f, 0x3f, 0x18, 0x50, 0x8f, 0x0c, 0x5d, 0xfd,
|
||||
0x06, 0xce, 0x63, 0x43, 0xca, 0x3a, 0xa1, 0x48, 0x5e, 0xe7, 0x8e, 0x27, 0x28, 0xf7, 0xf4, 0xeb,
|
||||
0x36, 0x8f, 0xe3, 0x39, 0xfa, 0x14, 0x32, 0x8e, 0x4f, 0x26, 0xba, 0x09, 0x5e, 0xea, 0x41, 0xb7,
|
||||
0xd7, 0x7c, 0xa8, 0x73, 0xae, 0x95, 0xbf, 0xbc, 0xd8, 0xc9, 0x48, 0x01, 0x56, 0x34, 0x54, 0x8d,
|
||||
0x5e, 0x28, 0x72, 0x25, 0xd5, 0x02, 0xe4, 0x71, 0x42, 0x22, 0xf3, 0xc6, 0xf1, 0x46, 0x9c, 0x06,
|
||||
0x81, 0x6a, 0x06, 0xf2, 0x38, 0x9a, 0xa2, 0x0a, 0xe4, 0x74, 0x37, 0xab, 0x1e, 0x36, 0x05, 0xf9,
|
||||
0x46, 0xd0, 0x82, 0x56, 0x09, 0x8c, 0x70, 0x37, 0x06, 0xc7, 0x9c, 0x4d, 0xea, 0xff, 0xca, 0x80,
|
||||
0x71, 0xe0, 0x4e, 0x03, 0xa1, 0x3b, 0xa5, 0x17, 0xb6, 0xf9, 0x4f, 0x60, 0x8b, 0xa8, 0x7f, 0x2d,
|
||||
0xc4, 0x93, 0x57, 0xab, 0x7a, 0x24, 0xe8, 0x03, 0xb8, 0xb3, 0x54, 0x5d, 0x0c, 0x0e, 0x1f, 0x14,
|
||||
0xad, 0xac, 0xd4, 0x69, 0xa6, 0x71, 0x99, 0x5c, 0xf9, 0x82, 0xfa, 0x50, 0x62, 0xdc, 0x1a, 0xd3,
|
||||
0x40, 0x84, 0x17, 0xb2, 0xfe, 0x37, 0xb1, 0xf4, 0xaf, 0xd5, 0xa3, 0x24, 0x50, 0xdf, 0x43, 0xa1,
|
||||
0xb5, 0x8b, 0x3a, 0xd0, 0x87, 0x90, 0xe1, 0xe4, 0x38, 0x7a, 0xf0, 0x2c, 0x4d, 0x12, 0x4c, 0x8e,
|
||||
0xc5, 0x82, 0x0a, 0xc5, 0x40, 0x9f, 0x03, 0xd8, 0x4e, 0xe0, 0x13, 0x61, 0x8d, 0x29, 0xd7, 0x87,
|
||||
0xbd, 0xd4, 0xc5, 0x76, 0x8c, 0x5a, 0xd0, 0x92, 0x60, 0xa3, 0xfb, 0x50, 0xb0, 0x48, 0x14, 0xae,
|
||||
0xd9, 0xd5, 0x3f, 0x6c, 0x0e, 0x9a, 0x5a, 0x45, 0x59, 0xaa, 0xb8, 0xbc, 0xd8, 0xc9, 0x47, 0x12,
|
||||
0x9c, 0xb7, 0x88, 0x0e, 0xdf, 0xfb, 0x50, 0x12, 0x24, 0x38, 0x19, 0xd8, 0x61, 0x39, 0x0b, 0xc3,
|
||||
0x64, 0xc5, 0xbd, 0x2a, 0xdf, 0xc5, 0xba, 0xec, 0x45, 0xc7, 0x59, 0x14, 0x09, 0x19, 0xfa, 0x05,
|
||||
0x6c, 0x51, 0xcf, 0xe2, 0x33, 0x15, 0xac, 0x91, 0x85, 0xf9, 0xd5, 0xce, 0x76, 0x62, 0xf0, 0x82,
|
||||
0xb3, 0x65, 0x7a, 0x45, 0x5e, 0xff, 0x5b, 0x1a, 0x20, 0x6c, 0x64, 0x5e, 0x6c, 0x00, 0x22, 0xc8,
|
||||
0xd8, 0x44, 0x10, 0x15, 0x73, 0x45, 0xac, 0xc6, 0xe8, 0x63, 0x00, 0x41, 0x27, 0xbe, 0x2c, 0xbd,
|
||||
0xde, 0x48, 0x87, 0xcd, 0xb3, 0xca, 0x41, 0x02, 0x8d, 0xf6, 0x21, 0xab, 0x9f, 0xa5, 0x99, 0x6b,
|
||||
0x79, 0x1a, 0x59, 0xff, 0x63, 0x1a, 0x20, 0x74, 0xf3, 0x7f, 0xda, 0xb7, 0xd6, 0x9d, 0x6f, 0xbe,
|
||||
0xaf, 0xa6, 0xbe, 0xfb, 0xbe, 0x9a, 0xfa, 0xed, 0x65, 0x35, 0xfd, 0xcd, 0x65, 0x35, 0xfd, 0xed,
|
||||
0x65, 0x35, 0xfd, 0xf7, 0xcb, 0x6a, 0xfa, 0xf7, 0x3f, 0x54, 0x53, 0xdf, 0xfe, 0x50, 0x4d, 0x7d,
|
||||
0xf7, 0x43, 0x35, 0x35, 0xcc, 0xaa, 0x56, 0xf8, 0x47, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x92,
|
||||
0xc4, 0x34, 0xad, 0xa7, 0x17, 0x00, 0x00,
|
||||
0x23, 0xdd, 0x02, 0xc7, 0x8c, 0x56, 0x16, 0x32, 0x13, 0x66, 0xd3, 0xfa, 0x1e, 0x6c, 0x3d, 0x73,
|
||||
0xed, 0xa8, 0x02, 0x79, 0x7d, 0xe0, 0xa1, 0xbf, 0x66, 0x70, 0x3c, 0xaf, 0x5f, 0x83, 0xd2, 0xc2,
|
||||
0x15, 0xd7, 0x2d, 0x28, 0x2d, 0x5c, 0x17, 0x7a, 0x0b, 0x36, 0x27, 0xe4, 0xab, 0x81, 0xc5, 0x3c,
|
||||
0x6b, 0xca, 0x39, 0xf5, 0x84, 0xd6, 0x51, 0x9a, 0x90, 0xaf, 0x0e, 0x62, 0x21, 0x7a, 0x1f, 0xb6,
|
||||
0x04, 0x13, 0xc4, 0x1d, 0x58, 0x6c, 0xe2, 0xbb, 0x34, 0x8c, 0x8e, 0x35, 0x85, 0x2c, 0xab, 0x0f,
|
||||
0x07, 0x73, 0x79, 0xdd, 0x80, 0x42, 0x7c, 0x97, 0xf5, 0x3f, 0x6f, 0x40, 0x3e, 0xf2, 0x74, 0x74,
|
||||
0x0f, 0x80, 0xc4, 0x07, 0xa5, 0x0f, 0xe2, 0xdd, 0x17, 0x3a, 0x55, 0x49, 0x97, 0x1e, 0x3e, 0xa7,
|
||||
0xa3, 0x26, 0x14, 0x2c, 0xe6, 0x09, 0xe2, 0x78, 0x94, 0xeb, 0x48, 0x5d, 0xea, 0x9f, 0x07, 0x11,
|
||||
0x48, 0xeb, 0x98, 0xb3, 0x50, 0x0b, 0x72, 0x23, 0xea, 0x51, 0xee, 0x58, 0xda, 0xc1, 0xdf, 0x5e,
|
||||
0xea, 0x98, 0x21, 0x04, 0x4f, 0x3d, 0xe1, 0x4c, 0xa8, 0xd6, 0x12, 0x11, 0xd1, 0xe7, 0x50, 0xe0,
|
||||
0x34, 0x60, 0x53, 0x6e, 0xd1, 0x40, 0x87, 0xfb, 0xee, 0xf2, 0x30, 0x09, 0x41, 0x98, 0xfe, 0x66,
|
||||
0xea, 0x70, 0x2a, 0xb7, 0x10, 0xe0, 0x39, 0x15, 0x7d, 0x0a, 0x39, 0x4e, 0x03, 0x41, 0xb8, 0x78,
|
||||
0x5e, 0xc4, 0xe2, 0x10, 0xd2, 0x63, 0xae, 0x63, 0xcd, 0x70, 0xc4, 0x40, 0x9f, 0x42, 0xc1, 0x77,
|
||||
0x89, 0xa5, 0xb4, 0x9a, 0x1b, 0xab, 0x63, 0xac, 0x17, 0x81, 0xf0, 0x1c, 0x8f, 0x3e, 0x06, 0x70,
|
||||
0xd9, 0x68, 0x60, 0x73, 0xe7, 0x94, 0x72, 0x1d, 0x65, 0x95, 0x65, 0xec, 0xb6, 0x42, 0xe0, 0x82,
|
||||
0xcb, 0x46, 0xe1, 0x10, 0xdd, 0xf9, 0xaf, 0x82, 0x24, 0x11, 0x20, 0x6f, 0x40, 0xf1, 0x98, 0x71,
|
||||
0x8b, 0x0e, 0x74, 0xac, 0x17, 0x94, 0x6f, 0x19, 0x4a, 0x16, 0x06, 0x28, 0xfa, 0x15, 0xbc, 0x12,
|
||||
0x9d, 0xd6, 0x80, 0xd3, 0x63, 0xca, 0xa9, 0x27, 0x8f, 0xdc, 0x50, 0xcb, 0xbe, 0xf5, 0xfc, 0x23,
|
||||
0xd7, 0x68, 0x9d, 0x6a, 0x11, 0xbf, 0xfc, 0x21, 0x68, 0x15, 0x20, 0xc7, 0xc3, 0x0b, 0xae, 0xff,
|
||||
0x2e, 0x2d, 0xe3, 0xec, 0x12, 0x02, 0xed, 0x81, 0x11, 0x2f, 0xef, 0xd8, 0xca, 0xe1, 0x0a, 0xad,
|
||||
0xcd, 0x8b, 0xf3, 0x1d, 0x88, 0xb0, 0xdd, 0xb6, 0xcc, 0xc0, 0x7a, 0x6c, 0xa3, 0x0e, 0x94, 0x62,
|
||||
0x82, 0x2c, 0x82, 0x74, 0x99, 0x50, 0x7b, 0x9e, 0xa5, 0x47, 0x33, 0x9f, 0xe2, 0x22, 0x4f, 0xcc,
|
||||
0xea, 0xbf, 0x04, 0xf4, 0xac, 0x03, 0x22, 0x04, 0x99, 0x13, 0xc7, 0xd3, 0x66, 0x60, 0x35, 0x46,
|
||||
0x0d, 0xc8, 0xf9, 0x64, 0xe6, 0x32, 0x62, 0x6b, 0x3f, 0xbc, 0xde, 0x08, 0xcb, 0xa3, 0x46, 0x54,
|
||||
0x1e, 0x35, 0x9a, 0xde, 0x0c, 0x47, 0xa0, 0xfa, 0x3d, 0x78, 0x75, 0x69, 0x9c, 0xa1, 0x7d, 0x28,
|
||||
0xc6, 0x31, 0x32, 0xdf, 0xeb, 0xb5, 0x8b, 0xf3, 0x1d, 0x23, 0x0e, 0xa6, 0x6e, 0x1b, 0x1b, 0x31,
|
||||
0xa8, 0x6b, 0xd7, 0xff, 0xba, 0x09, 0xa5, 0x85, 0x48, 0x43, 0xd7, 0x61, 0xc3, 0x99, 0x90, 0x11,
|
||||
0xd5, 0x36, 0x86, 0x13, 0xd4, 0x81, 0xac, 0x4b, 0x86, 0xd4, 0x95, 0xb1, 0x22, 0x2f, 0xee, 0xff,
|
||||
0xae, 0x0c, 0xd9, 0xc6, 0x7d, 0x85, 0xef, 0x78, 0x82, 0xcf, 0xb0, 0x26, 0x23, 0x13, 0x72, 0x16,
|
||||
0x9b, 0x4c, 0x88, 0x27, 0x1f, 0xc9, 0xf5, 0xdd, 0x02, 0x8e, 0xa6, 0xf2, 0x64, 0x08, 0x1f, 0x05,
|
||||
0x66, 0x46, 0x89, 0xd5, 0x58, 0xe6, 0xc8, 0x31, 0x0b, 0x84, 0x47, 0x26, 0xd4, 0xdc, 0x54, 0xd6,
|
||||
0xc4, 0x73, 0x54, 0x86, 0x75, 0xea, 0x9d, 0x9a, 0x1b, 0x0a, 0x2e, 0x87, 0x52, 0x62, 0x3b, 0x61,
|
||||
0x20, 0x14, 0xb0, 0x1c, 0x4a, 0x9d, 0xd3, 0x80, 0x72, 0x33, 0x17, 0x9e, 0xb6, 0x1c, 0xa3, 0xd7,
|
||||
0x20, 0x3b, 0xe2, 0x6c, 0xea, 0x87, 0x1e, 0x58, 0xc0, 0x7a, 0x26, 0xdf, 0x3b, 0x9f, 0x3b, 0xa7,
|
||||
0x8e, 0x4b, 0x47, 0x34, 0x30, 0x5f, 0x53, 0x17, 0x51, 0x5d, 0x1a, 0x8b, 0x31, 0x0a, 0x27, 0x18,
|
||||
0xa8, 0x01, 0x19, 0xc7, 0x73, 0x84, 0xf9, 0xba, 0x8e, 0xc3, 0xcb, 0x57, 0xd8, 0x62, 0xcc, 0x7d,
|
||||
0x4c, 0xdc, 0x29, 0xc5, 0x0a, 0x87, 0xb6, 0x61, 0x5d, 0x88, 0x99, 0x59, 0xaa, 0xa5, 0x77, 0xf3,
|
||||
0xad, 0xdc, 0xc5, 0xf9, 0xce, 0xfa, 0xd1, 0xd1, 0x13, 0x2c, 0x65, 0xe8, 0x16, 0x00, 0xf3, 0xa9,
|
||||
0x37, 0x08, 0x84, 0xed, 0x78, 0x26, 0x92, 0x08, 0x5c, 0x90, 0x92, 0xbe, 0x14, 0xa0, 0x1b, 0x32,
|
||||
0x73, 0x11, 0x7b, 0xc0, 0x3c, 0x77, 0x66, 0xbe, 0xa2, 0xbe, 0xe6, 0xa5, 0xe0, 0xa1, 0xe7, 0xce,
|
||||
0xd0, 0x0e, 0x18, 0x81, 0x60, 0xfe, 0x20, 0x70, 0x46, 0x1e, 0x71, 0xcd, 0xeb, 0x6a, 0xe7, 0x20,
|
||||
0x45, 0x7d, 0x25, 0x41, 0x3f, 0x86, 0xec, 0x84, 0x4d, 0x3d, 0x11, 0x98, 0x79, 0x75, 0x91, 0xdb,
|
||||
0xcb, 0xf6, 0xf8, 0x40, 0x22, 0x74, 0xd4, 0x69, 0x38, 0xea, 0xc0, 0x96, 0xd2, 0x3c, 0xe2, 0xc4,
|
||||
0xa2, 0x03, 0x9f, 0x72, 0x87, 0xd9, 0xfa, 0x7d, 0xde, 0x7e, 0x66, 0xb7, 0x6d, 0xdd, 0x0a, 0xe0,
|
||||
0x6b, 0x92, 0x73, 0x47, 0x52, 0x7a, 0x8a, 0x81, 0x7a, 0x50, 0xf4, 0xa7, 0xae, 0x3b, 0x60, 0x7e,
|
||||
0xf8, 0x1a, 0x85, 0x09, 0xfc, 0x05, 0xdc, 0xa9, 0x37, 0x75, 0xdd, 0x87, 0x21, 0x09, 0x1b, 0xfe,
|
||||
0x7c, 0x82, 0x3e, 0x83, 0x5c, 0x40, 0x2d, 0x4e, 0x45, 0x60, 0x16, 0xd5, 0x96, 0xde, 0x5c, 0xa6,
|
||||
0xac, 0xaf, 0x20, 0x71, 0x5e, 0xc0, 0x11, 0x47, 0xd2, 0x2d, 0x95, 0xd6, 0x02, 0xf3, 0xd5, 0xd5,
|
||||
0x74, 0x9d, 0xf9, 0xe6, 0x74, 0xcd, 0x91, 0xe1, 0x22, 0x7d, 0x32, 0x30, 0xb7, 0x94, 0x3b, 0x85,
|
||||
0x13, 0xf4, 0x04, 0xc0, 0xf6, 0x82, 0x41, 0x08, 0x32, 0xaf, 0xa9, 0x3d, 0xbe, 0x7f, 0xf5, 0x1e,
|
||||
0xdb, 0x87, 0x7d, 0x5d, 0x87, 0x94, 0x2e, 0xce, 0x77, 0x0a, 0xf1, 0x14, 0x17, 0x6c, 0x2f, 0x08,
|
||||
0x87, 0xa8, 0x05, 0xc6, 0x98, 0x12, 0x57, 0x8c, 0xad, 0x31, 0xb5, 0x4e, 0xcc, 0xf2, 0xea, 0xb2,
|
||||
0xe4, 0xae, 0x82, 0x69, 0x0d, 0x49, 0x12, 0xea, 0x42, 0xc1, 0x09, 0x98, 0xab, 0xae, 0xc8, 0x34,
|
||||
0x55, 0x7e, 0x7b, 0x01, 0xeb, 0xba, 0x11, 0x05, 0xcf, 0xd9, 0xe8, 0x26, 0x14, 0x7c, 0xc7, 0x0e,
|
||||
0xee, 0x3b, 0x13, 0x47, 0x98, 0xdb, 0xb5, 0xf4, 0xee, 0x3a, 0x9e, 0x0b, 0xd0, 0x5d, 0xc8, 0x05,
|
||||
0xb3, 0xc0, 0x12, 0x6e, 0x60, 0x56, 0xd4, 0xe1, 0x36, 0xae, 0x5e, 0xa6, 0x1f, 0x12, 0xc2, 0xc4,
|
||||
0x11, 0xd1, 0x65, 0xc5, 0x63, 0x11, 0x5f, 0xf7, 0x02, 0x03, 0x62, 0xdb, 0xe6, 0x0d, 0x75, 0xe0,
|
||||
0xa5, 0xb9, 0xb4, 0x69, 0xdb, 0xe8, 0x1d, 0xb8, 0x96, 0x80, 0xd9, 0x9c, 0xf9, 0xe6, 0x4d, 0x85,
|
||||
0x4b, 0xb0, 0xdb, 0x9c, 0xf9, 0xb2, 0x86, 0x98, 0xba, 0xd2, 0xc6, 0xc0, 0xbc, 0xa5, 0x2c, 0xdb,
|
||||
0xbd, 0xda, 0xb2, 0x47, 0x8a, 0x80, 0x23, 0x62, 0xe5, 0x63, 0x30, 0x12, 0x49, 0x4e, 0x26, 0xa0,
|
||||
0x13, 0x3a, 0xd3, 0x79, 0x53, 0x0e, 0xa5, 0x73, 0x9c, 0xca, 0x98, 0x57, 0x89, 0xbd, 0x80, 0xc3,
|
||||
0xc9, 0x27, 0x6b, 0x1f, 0xa5, 0x2b, 0xfb, 0x60, 0x24, 0x1c, 0x1a, 0xbd, 0x29, 0x1f, 0x9d, 0x91,
|
||||
0x13, 0x08, 0x3e, 0x1b, 0x90, 0xa9, 0x18, 0x9b, 0x3f, 0x57, 0x84, 0x62, 0x24, 0x6c, 0x4e, 0xc5,
|
||||
0xb8, 0x32, 0x80, 0xb9, 0x47, 0xa0, 0x1a, 0x18, 0x32, 0x0f, 0x06, 0x94, 0x9f, 0x52, 0x2e, 0x4b,
|
||||
0x48, 0xb9, 0xc9, 0xa4, 0x48, 0x66, 0xba, 0x80, 0x12, 0x6e, 0x8d, 0x55, 0xca, 0x2e, 0x60, 0x3d,
|
||||
0x93, 0x39, 0x38, 0x0a, 0x3e, 0x9d, 0x83, 0xf5, 0xb4, 0xf2, 0x09, 0x14, 0x93, 0x87, 0xff, 0x1f,
|
||||
0x6d, 0xa8, 0x0d, 0xd9, 0xf0, 0x78, 0x64, 0xd6, 0x55, 0x19, 0x5b, 0xbf, 0x71, 0x2a, 0x5b, 0x23,
|
||||
0xc8, 0x04, 0xec, 0x58, 0x28, 0xda, 0x3a, 0x56, 0x63, 0x29, 0x1b, 0x13, 0x1e, 0x76, 0x4b, 0xeb,
|
||||
0x58, 0x8d, 0xeb, 0x7f, 0x49, 0x43, 0x21, 0x76, 0x33, 0xf4, 0x01, 0x6c, 0x75, 0xfb, 0x0f, 0xef,
|
||||
0x37, 0x8f, 0xba, 0x0f, 0x0f, 0x07, 0xed, 0xce, 0xe7, 0xcd, 0x47, 0xf7, 0x8f, 0xca, 0xa9, 0xca,
|
||||
0xad, 0xa7, 0x67, 0xb5, 0xed, 0xf9, 0x8b, 0x16, 0xc1, 0xdb, 0xf4, 0x98, 0x4c, 0x5d, 0xb1, 0xc8,
|
||||
0xea, 0xe1, 0x87, 0x07, 0x9d, 0x7e, 0xbf, 0x9c, 0x5e, 0xc5, 0xea, 0x71, 0x66, 0xd1, 0x20, 0x40,
|
||||
0xfb, 0x50, 0x9e, 0xb3, 0xee, 0x3e, 0xe9, 0x75, 0xf0, 0xe3, 0xf2, 0x5a, 0xe5, 0xe6, 0xd3, 0xb3,
|
||||
0x9a, 0xf9, 0x2c, 0xe9, 0xee, 0xcc, 0xa7, 0xfc, 0xb1, 0x6e, 0x46, 0xff, 0x91, 0x86, 0x62, 0xb2,
|
||||
0x03, 0x40, 0x07, 0x61, 0xdd, 0xaf, 0x0e, 0x60, 0x73, 0x7f, 0xef, 0xaa, 0x8e, 0x41, 0x55, 0x11,
|
||||
0xee, 0x54, 0xea, 0x7d, 0xc0, 0x6c, 0x8a, 0x15, 0x19, 0x7d, 0x00, 0x1b, 0x3e, 0xe3, 0x22, 0x7a,
|
||||
0x6f, 0x97, 0x3f, 0x45, 0x8c, 0x47, 0x25, 0x59, 0x08, 0xae, 0x8f, 0x61, 0x73, 0x51, 0x1b, 0xba,
|
||||
0x0d, 0xeb, 0x8f, 0xbb, 0xbd, 0x72, 0xaa, 0x72, 0xe3, 0xe9, 0x59, 0xed, 0xf5, 0xc5, 0x8f, 0x8f,
|
||||
0x1d, 0x2e, 0xa6, 0xc4, 0xed, 0xf6, 0xd0, 0x7b, 0xb0, 0xd1, 0x3e, 0xec, 0x63, 0x5c, 0x4e, 0x57,
|
||||
0x76, 0x9e, 0x9e, 0xd5, 0x6e, 0x2c, 0xe2, 0xe4, 0x27, 0x36, 0xf5, 0x6c, 0xcc, 0x86, 0x71, 0x0b,
|
||||
0xfe, 0xcf, 0x35, 0x30, 0x74, 0x19, 0xf2, 0xb2, 0xff, 0xd2, 0x94, 0xc2, 0x92, 0x36, 0xca, 0x9e,
|
||||
0x6b, 0x57, 0x56, 0xb6, 0xc5, 0x90, 0xa0, 0x23, 0xe3, 0x0d, 0x28, 0x3a, 0xfe, 0xe9, 0x87, 0x03,
|
||||
0xea, 0x91, 0xa1, 0xab, 0xbb, 0xf1, 0x3c, 0x36, 0xa4, 0xac, 0x13, 0x8a, 0x64, 0x61, 0xe1, 0x78,
|
||||
0x82, 0x72, 0x4f, 0xf7, 0xd9, 0x79, 0x1c, 0xcf, 0xd1, 0x67, 0x90, 0x71, 0x7c, 0x32, 0xd1, 0xe5,
|
||||
0xf8, 0xd2, 0x1d, 0x74, 0x7b, 0xcd, 0x07, 0x3a, 0x72, 0x5b, 0xf9, 0x8b, 0xf3, 0x9d, 0x8c, 0x14,
|
||||
0x60, 0x45, 0x43, 0xd5, 0xa8, 0x57, 0x92, 0x2b, 0xa9, 0x62, 0x24, 0x8f, 0x13, 0x12, 0x19, 0x7d,
|
||||
0x8e, 0x37, 0xe2, 0x34, 0x08, 0x54, 0x59, 0x92, 0xc7, 0xd1, 0x14, 0x55, 0x20, 0xa7, 0xeb, 0x6a,
|
||||
0xd5, 0x62, 0x15, 0x64, 0xb7, 0xa2, 0x05, 0xad, 0x12, 0x18, 0xe1, 0x69, 0x0c, 0x8e, 0x39, 0x9b,
|
||||
0xd4, 0xff, 0x95, 0x01, 0xe3, 0xc0, 0x9d, 0x06, 0x42, 0xd7, 0x6c, 0x2f, 0xed, 0xf0, 0x9f, 0xc0,
|
||||
0x16, 0x51, 0x7f, 0x7d, 0x88, 0x27, 0x1f, 0x79, 0xd5, 0xae, 0xe8, 0x0b, 0xb8, 0xbd, 0x54, 0x5d,
|
||||
0x0c, 0x0e, 0x5b, 0x9b, 0x56, 0x56, 0xea, 0x34, 0xd3, 0xb8, 0x4c, 0x2e, 0x7d, 0x41, 0x7d, 0x28,
|
||||
0x31, 0x6e, 0x8d, 0x69, 0x20, 0xc2, 0xd2, 0x40, 0xff, 0x25, 0x59, 0xfa, 0xff, 0xec, 0x61, 0x12,
|
||||
0xa8, 0x5f, 0xc4, 0xd0, 0xda, 0x45, 0x1d, 0xe8, 0x23, 0xc8, 0x70, 0x72, 0x1c, 0xb5, 0x5e, 0x4b,
|
||||
0x83, 0x04, 0x93, 0x63, 0xb1, 0xa0, 0x42, 0x31, 0xd0, 0x17, 0x00, 0xb6, 0x13, 0xf8, 0x44, 0x58,
|
||||
0x63, 0xca, 0xf5, 0x65, 0x2f, 0xdd, 0x62, 0x3b, 0x46, 0x2d, 0x68, 0x49, 0xb0, 0xd1, 0x3d, 0x28,
|
||||
0x58, 0x24, 0x72, 0xd7, 0xec, 0xea, 0x5f, 0x47, 0x07, 0x4d, 0xad, 0xa2, 0x2c, 0x55, 0x5c, 0x9c,
|
||||
0xef, 0xe4, 0x23, 0x09, 0xce, 0x5b, 0x44, 0xbb, 0xef, 0x3d, 0x28, 0x09, 0x12, 0x9c, 0x0c, 0xec,
|
||||
0x30, 0x9d, 0x85, 0x6e, 0xb2, 0xe2, 0x85, 0x97, 0x1d, 0xba, 0x4e, 0x7b, 0xd1, 0x75, 0x16, 0x45,
|
||||
0x42, 0x86, 0x7e, 0x01, 0x5b, 0xd4, 0xb3, 0xf8, 0x4c, 0x39, 0x6b, 0x64, 0x61, 0x7e, 0xf5, 0x66,
|
||||
0x3b, 0x31, 0x78, 0x61, 0xb3, 0x65, 0x7a, 0x49, 0x5e, 0xff, 0x5b, 0x1a, 0x20, 0x2c, 0xa9, 0x5e,
|
||||
0xae, 0x03, 0x22, 0xc8, 0xd8, 0x44, 0x10, 0xe5, 0x73, 0x45, 0xac, 0xc6, 0xe8, 0x13, 0x00, 0x41,
|
||||
0x27, 0xbe, 0x4c, 0xbd, 0xde, 0x48, 0xbb, 0xcd, 0xf3, 0xd2, 0x41, 0x02, 0x8d, 0xf6, 0x21, 0xab,
|
||||
0x1b, 0xe4, 0xcc, 0x95, 0x3c, 0x8d, 0xac, 0xff, 0x31, 0x0d, 0x10, 0x6e, 0xf3, 0x7f, 0x7a, 0x6f,
|
||||
0xad, 0xdb, 0xdf, 0x7c, 0x5f, 0x4d, 0x7d, 0xf7, 0x7d, 0x35, 0xf5, 0xdb, 0x8b, 0x6a, 0xfa, 0x9b,
|
||||
0x8b, 0x6a, 0xfa, 0xdb, 0x8b, 0x6a, 0xfa, 0xef, 0x17, 0xd5, 0xf4, 0xef, 0x7f, 0xa8, 0xa6, 0xbe,
|
||||
0xfd, 0xa1, 0x9a, 0xfa, 0xee, 0x87, 0x6a, 0x6a, 0x98, 0x55, 0x45, 0xf9, 0x8f, 0xfe, 0x1d, 0x00,
|
||||
0x00, 0xff, 0xff, 0x74, 0x9e, 0x83, 0x44, 0x31, 0x18, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *NodeSpec) Copy() *NodeSpec {
|
||||
|
@ -2010,6 +2055,14 @@ func (m *ContainerSpec) CopyFrom(src interface{}) {
|
|||
copy(m.CapabilityDrop, o.CapabilityDrop)
|
||||
}
|
||||
|
||||
if o.Ulimits != nil {
|
||||
m.Ulimits = make([]*ContainerSpec_Ulimit, len(o.Ulimits))
|
||||
for i := range m.Ulimits {
|
||||
m.Ulimits[i] = &ContainerSpec_Ulimit{}
|
||||
github_com_docker_swarmkit_api_deepcopy.Copy(m.Ulimits[i], o.Ulimits[i])
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (m *ContainerSpec_PullOptions) Copy() *ContainerSpec_PullOptions {
|
||||
|
@ -2057,6 +2110,21 @@ func (m *ContainerSpec_DNSConfig) CopyFrom(src interface{}) {
|
|||
|
||||
}
|
||||
|
||||
func (m *ContainerSpec_Ulimit) Copy() *ContainerSpec_Ulimit {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
o := &ContainerSpec_Ulimit{}
|
||||
o.CopyFrom(m)
|
||||
return o
|
||||
}
|
||||
|
||||
func (m *ContainerSpec_Ulimit) CopyFrom(src interface{}) {
|
||||
|
||||
o := src.(*ContainerSpec_Ulimit)
|
||||
*m = *o
|
||||
}
|
||||
|
||||
func (m *EndpointSpec) Copy() *EndpointSpec {
|
||||
if m == nil {
|
||||
return nil
|
||||
|
@ -3025,6 +3093,20 @@ func (m *ContainerSpec) MarshalTo(dAtA []byte) (int, error) {
|
|||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
if len(m.Ulimits) > 0 {
|
||||
for _, msg := range m.Ulimits {
|
||||
dAtA[i] = 0xea
|
||||
i++
|
||||
dAtA[i] = 0x1
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(msg.Size()))
|
||||
n, err := msg.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
|
@ -3117,6 +3199,40 @@ func (m *ContainerSpec_DNSConfig) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func (m *ContainerSpec_Ulimit) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ContainerSpec_Ulimit) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Name) > 0 {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(len(m.Name)))
|
||||
i += copy(dAtA[i:], m.Name)
|
||||
}
|
||||
if m.Soft != 0 {
|
||||
dAtA[i] = 0x10
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.Soft))
|
||||
}
|
||||
if m.Hard != 0 {
|
||||
dAtA[i] = 0x18
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.Hard))
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *EndpointSpec) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
|
@ -3851,6 +3967,12 @@ func (m *ContainerSpec) Size() (n int) {
|
|||
n += 2 + l + sovSpecs(uint64(l))
|
||||
}
|
||||
}
|
||||
if len(m.Ulimits) > 0 {
|
||||
for _, e := range m.Ulimits {
|
||||
l = e.Size()
|
||||
n += 2 + l + sovSpecs(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -3894,6 +4016,25 @@ func (m *ContainerSpec_DNSConfig) Size() (n int) {
|
|||
return n
|
||||
}
|
||||
|
||||
func (m *ContainerSpec_Ulimit) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Name)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovSpecs(uint64(l))
|
||||
}
|
||||
if m.Soft != 0 {
|
||||
n += 1 + sovSpecs(uint64(m.Soft))
|
||||
}
|
||||
if m.Hard != 0 {
|
||||
n += 1 + sovSpecs(uint64(m.Hard))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *EndpointSpec) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
|
@ -4276,6 +4417,7 @@ func (this *ContainerSpec) String() string {
|
|||
`Sysctls:` + mapStringForSysctls + `,`,
|
||||
`CapabilityAdd:` + fmt.Sprintf("%v", this.CapabilityAdd) + `,`,
|
||||
`CapabilityDrop:` + fmt.Sprintf("%v", this.CapabilityDrop) + `,`,
|
||||
`Ulimits:` + strings.Replace(fmt.Sprintf("%v", this.Ulimits), "ContainerSpec_Ulimit", "ContainerSpec_Ulimit", 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
@ -4302,6 +4444,18 @@ func (this *ContainerSpec_DNSConfig) String() string {
|
|||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *ContainerSpec_Ulimit) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&ContainerSpec_Ulimit{`,
|
||||
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||
`Soft:` + fmt.Sprintf("%v", this.Soft) + `,`,
|
||||
`Hard:` + fmt.Sprintf("%v", this.Hard) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *EndpointSpec) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
|
@ -6985,6 +7139,40 @@ func (m *ContainerSpec) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
m.CapabilityDrop = append(m.CapabilityDrop, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
case 29:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Ulimits", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSpecs
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthSpecs
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthSpecs
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Ulimits = append(m.Ulimits, &ContainerSpec_Ulimit{})
|
||||
if err := m.Ulimits[len(m.Ulimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipSpecs(dAtA[iNdEx:])
|
||||
|
@ -7243,6 +7431,129 @@ func (m *ContainerSpec_DNSConfig) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
func (m *ContainerSpec_Ulimit) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSpecs
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Ulimit: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Ulimit: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSpecs
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthSpecs
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthSpecs
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Name = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Soft", wireType)
|
||||
}
|
||||
m.Soft = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSpecs
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Soft |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType)
|
||||
}
|
||||
m.Hard = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSpecs
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Hard |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipSpecs(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthSpecs
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthSpecs
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *EndpointSpec) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
|
|
|
@ -358,8 +358,18 @@ message ContainerSpec {
|
|||
|
||||
// CapabilityAdd sets the list of capabilities to add to the default capability list
|
||||
repeated string capability_add = 27;
|
||||
// CapabilityAdd sets the list of capabilities to drop from the default capability list
|
||||
// CapabilityDrop sets the list of capabilities to drop from the default capability list
|
||||
repeated string capability_drop = 28;
|
||||
|
||||
message Ulimit {
|
||||
string name = 1;
|
||||
int64 soft = 2;
|
||||
int64 hard = 3;
|
||||
}
|
||||
|
||||
// Ulimits defines the list of ulimits to set in the container. This option
|
||||
// is equivalent to passing --ulimit to docker run.
|
||||
repeated Ulimit ulimits = 29;
|
||||
}
|
||||
|
||||
// EndpointSpec defines the properties that can be configured to
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
module github.com/golang/protobuf
|
||||
|
||||
go 1.12
|
||||
go 1.9
|
||||
|
|
|
@ -102,7 +102,8 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|||
//
|
||||
type Any struct {
|
||||
// A URL/resource name that uniquely identifies the type of the serialized
|
||||
// protocol buffer message. The last segment of the URL's path must represent
|
||||
// protocol buffer message. This string must contain at least
|
||||
// one "/" character. The last segment of the URL's path must represent
|
||||
// the fully qualified name of the type (as in
|
||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
||||
// (e.g., leading "." is not accepted).
|
||||
|
@ -181,7 +182,9 @@ func init() {
|
|||
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
|
||||
func init() {
|
||||
proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4)
|
||||
}
|
||||
|
||||
var fileDescriptor_b53526c13ae22eb4 = []byte{
|
||||
// 185 bytes of a gzipped FileDescriptorProto
|
||||
|
|
|
@ -121,7 +121,8 @@ option objc_class_prefix = "GPB";
|
|||
//
|
||||
message Any {
|
||||
// A URL/resource name that uniquely identifies the type of the serialized
|
||||
// protocol buffer message. The last segment of the URL's path must represent
|
||||
// protocol buffer message. This string must contain at least
|
||||
// one "/" character. The last segment of the URL's path must represent
|
||||
// the fully qualified name of the type (as in
|
||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
||||
// (e.g., leading "." is not accepted).
|
||||
|
|
|
@ -41,7 +41,7 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
||||
// } else if (duration.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
|
@ -142,7 +142,9 @@ func init() {
|
|||
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
|
||||
func init() {
|
||||
proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5)
|
||||
}
|
||||
|
||||
var fileDescriptor_23597b2ebd7ac6c5 = []byte{
|
||||
// 190 bytes of a gzipped FileDescriptorProto
|
||||
|
|
|
@ -61,7 +61,7 @@ option objc_class_prefix = "GPB";
|
|||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
||||
// } else if (duration.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
|
@ -101,7 +101,6 @@ option objc_class_prefix = "GPB";
|
|||
//
|
||||
//
|
||||
message Duration {
|
||||
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
|
||||
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
|
||||
|
|
|
@ -20,17 +20,19 @@ var _ = math.Inf
|
|||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone
|
||||
// or calendar, represented as seconds and fractions of seconds at
|
||||
// nanosecond resolution in UTC Epoch time. It is encoded using the
|
||||
// Proleptic Gregorian Calendar which extends the Gregorian calendar
|
||||
// backwards to year one. It is encoded assuming all minutes are 60
|
||||
// seconds long, i.e. leap seconds are "smeared" so that no leap second
|
||||
// table is needed for interpretation. Range is from
|
||||
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
|
||||
// By restricting to that range, we ensure that we can convert to
|
||||
// and from RFC 3339 date strings.
|
||||
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
// A Timestamp represents a point in time independent of any time zone or local
|
||||
// calendar, encoded as a count of seconds and fractions of seconds at
|
||||
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
|
||||
// January 1, 1970, in the proleptic Gregorian calendar which extends the
|
||||
// Gregorian calendar backwards to year one.
|
||||
//
|
||||
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
|
||||
// second table is needed for interpretation, using a [24-hour linear
|
||||
// smear](https://developers.google.com/time/smear).
|
||||
//
|
||||
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
|
||||
// restricting to that range, we ensure that we can convert to and from [RFC
|
||||
// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
|
@ -91,12 +93,14 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|||
// 01:30 UTC on January 15, 2017.
|
||||
//
|
||||
// In JavaScript, one can convert a Date object to this format using the
|
||||
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
|
||||
// standard
|
||||
// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
|
||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
||||
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
|
||||
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
|
||||
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
|
||||
// to this format using
|
||||
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
|
||||
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
|
||||
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
|
||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
||||
//
|
||||
//
|
||||
|
@ -160,7 +164,9 @@ func init() {
|
|||
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
|
||||
func init() {
|
||||
proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e)
|
||||
}
|
||||
|
||||
var fileDescriptor_292007bbfe81227e = []byte{
|
||||
// 191 bytes of a gzipped FileDescriptorProto
|
||||
|
|
|
@ -40,17 +40,19 @@ option java_outer_classname = "TimestampProto";
|
|||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone
|
||||
// or calendar, represented as seconds and fractions of seconds at
|
||||
// nanosecond resolution in UTC Epoch time. It is encoded using the
|
||||
// Proleptic Gregorian Calendar which extends the Gregorian calendar
|
||||
// backwards to year one. It is encoded assuming all minutes are 60
|
||||
// seconds long, i.e. leap seconds are "smeared" so that no leap second
|
||||
// table is needed for interpretation. Range is from
|
||||
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
|
||||
// By restricting to that range, we ensure that we can convert to
|
||||
// and from RFC 3339 date strings.
|
||||
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
// A Timestamp represents a point in time independent of any time zone or local
|
||||
// calendar, encoded as a count of seconds and fractions of seconds at
|
||||
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
|
||||
// January 1, 1970, in the proleptic Gregorian calendar which extends the
|
||||
// Gregorian calendar backwards to year one.
|
||||
//
|
||||
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
|
||||
// second table is needed for interpretation, using a [24-hour linear
|
||||
// smear](https://developers.google.com/time/smear).
|
||||
//
|
||||
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
|
||||
// restricting to that range, we ensure that we can convert to and from [RFC
|
||||
// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
|
@ -111,17 +113,18 @@ option objc_class_prefix = "GPB";
|
|||
// 01:30 UTC on January 15, 2017.
|
||||
//
|
||||
// In JavaScript, one can convert a Date object to this format using the
|
||||
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
|
||||
// standard
|
||||
// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
|
||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
||||
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
|
||||
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
|
||||
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
|
||||
// to this format using
|
||||
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
|
||||
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
|
||||
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
|
||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
||||
//
|
||||
//
|
||||
message Timestamp {
|
||||
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
|
|
|
@ -435,8 +435,7 @@ func Vars(r *http.Request) map[string]string {
|
|||
// CurrentRoute returns the matched route for the current request, if any.
|
||||
// This only works when called inside the handler of the matched route
|
||||
// because the matched route is stored in the request context which is cleared
|
||||
// after the handler returns, unless the KeepContext option is set on the
|
||||
// Router.
|
||||
// after the handler returns.
|
||||
func CurrentRoute(r *http.Request) *Route {
|
||||
if rv := r.Context().Value(routeKey); rv != nil {
|
||||
return rv.(*Route)
|
||||
|
|
|
@ -325,6 +325,12 @@ func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
|
|||
// Store host variables.
|
||||
if v.host != nil {
|
||||
host := getHost(req)
|
||||
if v.host.wildcardHostPort {
|
||||
// Don't be strict on the port match
|
||||
if i := strings.Index(host, ":"); i != -1 {
|
||||
host = host[:i]
|
||||
}
|
||||
}
|
||||
matches := v.host.regexp.FindStringSubmatchIndex(host)
|
||||
if len(matches) > 0 {
|
||||
extractVars(host, matches, v.host.varsN, m.Vars)
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
[![Build Status](https://travis-ci.org/opencontainers/runc.svg?branch=master)](https://travis-ci.org/opencontainers/runc)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/runc)](https://goreportcard.com/report/github.com/opencontainers/runc)
|
||||
[![GoDoc](https://godoc.org/github.com/opencontainers/runc?status.svg)](https://godoc.org/github.com/opencontainers/runc)
|
||||
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/588/badge)](https://bestpractices.coreinfrastructure.org/projects/588)
|
||||
|
||||
## Introduction
|
||||
|
||||
|
@ -18,22 +19,23 @@ You can find official releases of `runc` on the [release](https://github.com/ope
|
|||
|
||||
Currently, the following features are not considered to be production-ready:
|
||||
|
||||
* Support for cgroup v2
|
||||
* [Support for cgroup v2](./docs/cgroup-v2.md)
|
||||
|
||||
## Security
|
||||
|
||||
The reporting process and disclosure communications are outlined in [/org/security](https://github.com/opencontainers/org/blob/master/security/).
|
||||
The reporting process and disclosure communications are outlined [here](https://github.com/opencontainers/org/blob/master/SECURITY.md).
|
||||
|
||||
### Security Audit
|
||||
A third party security audit was performed by Cure53, you can see the full report [here](https://github.com/opencontainers/runc/blob/master/docs/Security-Audit.pdf).
|
||||
|
||||
## Building
|
||||
|
||||
`runc` currently supports the Linux platform with various architecture support.
|
||||
It must be built with Go version 1.6 or higher in order for some features to function properly.
|
||||
It must be built with Go version 1.13 or higher.
|
||||
|
||||
In order to enable seccomp support you will need to install `libseccomp` on your platform.
|
||||
> e.g. `libseccomp-devel` for CentOS, or `libseccomp-dev` for Ubuntu
|
||||
|
||||
Otherwise, if you do not want to build `runc` with seccomp support you can add `BUILDTAGS=""` when running make.
|
||||
|
||||
```bash
|
||||
# create a 'github.com/opencontainers' in your GOPATH/src
|
||||
cd github.com/opencontainers
|
||||
|
@ -58,20 +60,22 @@ sudo make install
|
|||
|
||||
#### Build Tags
|
||||
|
||||
`runc` supports optional build tags for compiling support of various features.
|
||||
To add build tags to the make option the `BUILDTAGS` variable must be set.
|
||||
`runc` supports optional build tags for compiling support of various features,
|
||||
with some of them enabled by default (see `BUILDTAGS` in top-level `Makefile`).
|
||||
|
||||
To change build tags from the default, set the `BUILDTAGS` variable for make,
|
||||
e.g.
|
||||
|
||||
```bash
|
||||
make BUILDTAGS='seccomp apparmor'
|
||||
```
|
||||
|
||||
| Build Tag | Feature | Dependency |
|
||||
|-----------|------------------------------------|-------------|
|
||||
| seccomp | Syscall filtering | libseccomp |
|
||||
| selinux | selinux process and mount labeling | <none> |
|
||||
| apparmor | apparmor profile support | <none> |
|
||||
| ambient | ambient capability support | kernel 4.3 |
|
||||
| nokmem | disable kernel memory account | <none> |
|
||||
| Build Tag | Feature | Enabled by default | Dependency |
|
||||
|-----------|------------------------------------|--------------------|------------|
|
||||
| seccomp | Syscall filtering | yes | libseccomp |
|
||||
| selinux | selinux process and mount labeling | yes | <none> |
|
||||
| apparmor | apparmor profile support | yes | <none> |
|
||||
| nokmem | disable kernel memory accounting | no | <none> |
|
||||
|
||||
|
||||
### Running the test suite
|
||||
|
@ -97,17 +101,30 @@ You can run a specific integration test by setting the `TESTPATH` variable.
|
|||
# make test TESTPATH="/checkpoint.bats"
|
||||
```
|
||||
|
||||
You can run a test in your proxy environment by setting `DOCKER_BUILD_PROXY` and `DOCKER_RUN_PROXY` variables.
|
||||
You can run a specific rootless integration test by setting the `ROOTLESS_TESTPATH` variable.
|
||||
|
||||
```bash
|
||||
# make test DOCKER_BUILD_PROXY="--build-arg HTTP_PROXY=http://yourproxy/" DOCKER_RUN_PROXY="-e HTTP_PROXY=http://yourproxy/"
|
||||
# make test ROOTLESS_TESTPATH="/checkpoint.bats"
|
||||
```
|
||||
|
||||
You can run a test using your container engine's flags by setting `CONTAINER_ENGINE_BUILD_FLAGS` and `CONTAINER_ENGINE_RUN_FLAGS` variables.
|
||||
|
||||
```bash
|
||||
# make test CONTAINER_ENGINE_BUILD_FLAGS="--build-arg http_proxy=http://yourproxy/" CONTAINER_ENGINE_RUN_FLAGS="-e http_proxy=http://yourproxy/"
|
||||
```
|
||||
|
||||
### Dependencies Management
|
||||
|
||||
`runc` uses [vndr](https://github.com/LK4D4/vndr) for dependencies management.
|
||||
Please refer to [vndr](https://github.com/LK4D4/vndr) for how to add or update
|
||||
new dependencies.
|
||||
`runc` uses [Go Modules](https://github.com/golang/go/wiki/Modules) for dependencies management.
|
||||
Please refer to [Go Modules](https://github.com/golang/go/wiki/Modules) for how to add or update
|
||||
new dependencies. When updating dependencies, be sure that you are running Go `1.14` or newer.
|
||||
|
||||
```
|
||||
# Update vendored dependencies
|
||||
make vendor
|
||||
# Verify all dependencies
|
||||
make verify-dependencies
|
||||
```
|
||||
|
||||
## Using runc
|
||||
|
||||
|
@ -275,6 +292,9 @@ PIDFile=/run/mycontainerid.pid
|
|||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
#### cgroup v2
|
||||
See [`./docs/cgroup-v2.md`](./docs/cgroup-v2.md).
|
||||
|
||||
## License
|
||||
|
||||
The code and docs are released under the [Apache 2.0 license](LICENSE).
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
module github.com/opencontainers/runc
|
||||
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/checkpoint-restore/go-criu/v4 v4.1.0
|
||||
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775
|
||||
github.com/containerd/console v1.0.0
|
||||
github.com/coreos/go-systemd/v22 v22.1.0
|
||||
github.com/cyphar/filepath-securejoin v0.2.2
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/godbus/dbus/v5 v5.0.3
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/moby/sys/mountinfo v0.1.3
|
||||
github.com/mrunalp/fileutils v0.0.0-20200520151820-abd8a0e76976
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6
|
||||
github.com/opencontainers/selinux v1.6.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/seccomp/libseccomp-golang v0.9.1
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
|
||||
// NOTE: urfave/cli must be <= v1.22.1 due to a regression: https://github.com/urfave/cli/issues/1092
|
||||
github.com/urfave/cli v1.22.1
|
||||
github.com/vishvananda/netlink v1.1.0
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1
|
||||
)
|
|
@ -155,8 +155,7 @@ config := &configs.Config{
|
|||
Parent: "system",
|
||||
Resources: &configs.Resources{
|
||||
MemorySwappiness: nil,
|
||||
AllowAllDevices: nil,
|
||||
AllowedDevices: configs.DefaultAllowedDevices,
|
||||
Devices: specconv.AllowedDevices,
|
||||
},
|
||||
},
|
||||
MaskPaths: []string{
|
||||
|
@ -166,7 +165,7 @@ config := &configs.Config{
|
|||
ReadonlyPaths: []string{
|
||||
"/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus",
|
||||
},
|
||||
Devices: configs.DefaultAutoCreatedDevices,
|
||||
Devices: specconv.AllowedDevices,
|
||||
Hostname: "testing",
|
||||
Mounts: []*configs.Mount{
|
||||
{
|
||||
|
|
|
@ -1,7 +1,14 @@
|
|||
// SPDX-License-Identifier: Apache-2.0 OR LGPL-2.1-or-later
|
||||
/*
|
||||
* Copyright (C) 2019 Aleksa Sarai <cyphar@cyphar.com>
|
||||
* Copyright (C) 2019 SUSE LLC
|
||||
*
|
||||
* This work is dual licensed under the following licenses. You may use,
|
||||
* redistribute, and/or modify the work under the conditions of either (or
|
||||
* both) licenses.
|
||||
*
|
||||
* === Apache-2.0 ===
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
|
@ -13,6 +20,23 @@
|
|||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
* === LGPL-2.1-or-later ===
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library. If not, see
|
||||
* <https://www.gnu.org/licenses/>.
|
||||
*
|
||||
*/
|
||||
|
||||
#define _GNU_SOURCE
|
||||
|
@ -95,8 +119,10 @@ static int is_self_cloned(void)
|
|||
struct statfs fsbuf = {};
|
||||
|
||||
fd = open("/proc/self/exe", O_RDONLY|O_CLOEXEC);
|
||||
if (fd < 0)
|
||||
if (fd < 0) {
|
||||
fprintf(stderr, "you have no read access to runc binary file\n");
|
||||
return -ENOTRECOVERABLE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Is the binary a fully-sealed memfd? We don't need CLONED_BINARY_ENV for
|
||||
|
|
|
@ -714,12 +714,12 @@ void nsexec(void)
|
|||
* ready, so we can receive all possible error codes
|
||||
* generated by children.
|
||||
*/
|
||||
while (!ready) {
|
||||
enum sync_t s;
|
||||
|
||||
syncfd = sync_child_pipe[1];
|
||||
close(sync_child_pipe[0]);
|
||||
|
||||
while (!ready) {
|
||||
enum sync_t s;
|
||||
|
||||
if (read(syncfd, &s, sizeof(s)) != sizeof(s))
|
||||
bail("failed to sync with child: next state");
|
||||
|
||||
|
@ -789,13 +789,13 @@ void nsexec(void)
|
|||
|
||||
/* Now sync with grandchild. */
|
||||
|
||||
syncfd = sync_grandchild_pipe[1];
|
||||
close(sync_grandchild_pipe[0]);
|
||||
|
||||
ready = false;
|
||||
while (!ready) {
|
||||
enum sync_t s;
|
||||
|
||||
syncfd = sync_grandchild_pipe[1];
|
||||
close(sync_grandchild_pipe[0]);
|
||||
|
||||
s = SYNC_GRANDCHILD;
|
||||
if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
|
||||
kill(child, SIGKILL);
|
||||
|
|
|
@ -60,7 +60,7 @@ type Group struct {
|
|||
|
||||
// groupFromOS converts an os/user.(*Group) to local Group
|
||||
//
|
||||
// (This does not include Pass, Shell or Gecos)
|
||||
// (This does not include Pass or List)
|
||||
func groupFromOS(g *user.Group) (Group, error) {
|
||||
newGroup := Group{
|
||||
Name: g.Name,
|
||||
|
@ -162,10 +162,6 @@ func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
|
|||
)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
line := strings.TrimSpace(s.Text())
|
||||
if line == "" {
|
||||
continue
|
||||
|
@ -183,6 +179,9 @@ func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
|
|||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
@ -221,10 +220,6 @@ func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
|
|||
)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
text := s.Text()
|
||||
if text == "" {
|
||||
continue
|
||||
|
@ -242,6 +237,9 @@ func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
|
|||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
@ -532,10 +530,6 @@ func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) {
|
|||
)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
line := strings.TrimSpace(s.Text())
|
||||
if line == "" {
|
||||
continue
|
||||
|
@ -549,6 +543,9 @@ func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) {
|
|||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
@ -586,10 +583,6 @@ func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) {
|
|||
)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
line := strings.TrimSpace(s.Text())
|
||||
if line == "" {
|
||||
continue
|
||||
|
@ -603,6 +596,9 @@ func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) {
|
|||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
# OCI runtime-spec. When updating this, make sure you use a version tag rather
|
||||
# than a commit ID so it's much more obvious what version of the spec we are
|
||||
# using.
|
||||
github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db
|
||||
|
||||
# Core libcontainer functionality.
|
||||
github.com/checkpoint-restore/go-criu 17b0214f6c48980c45dc47ecb0cfd6d9e02df723 # v3.11
|
||||
github.com/mrunalp/fileutils 7d4729fb36185a7c1719923406c9d40e54fb93c7
|
||||
github.com/opencontainers/selinux 5215b1806f52b1fcc2070a8826c542c9d33cd3cf # v1.3.0 (+ CVE-2019-16884)
|
||||
github.com/seccomp/libseccomp-golang 689e3c1541a84461afc49c1c87352a6cedf72e9c # v0.9.1
|
||||
github.com/sirupsen/logrus 8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f # v1.4.1
|
||||
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
|
||||
github.com/vishvananda/netlink 1e2e08e8a2dcdacaae3f14ac44c5cfa31361f270
|
||||
|
||||
# systemd integration.
|
||||
github.com/coreos/go-systemd 95778dfbb74eb7e4dbaf43bf7d71809650ef8076 # v19
|
||||
github.com/godbus/dbus 2ff6f7ffd60f0f2410b3105864bdd12c7894f844 # v5.0.1
|
||||
github.com/golang/protobuf 925541529c1fa6821df4e44ce2723319eb2be768 # v1.0.0
|
||||
|
||||
# Command-line interface.
|
||||
github.com/cyphar/filepath-securejoin a261ee33d7a517f054effbf451841abaafe3e0fd # v0.2.2
|
||||
github.com/docker/go-units 47565b4f722fb6ceae66b95f853feed578a4a51c # v0.3.3
|
||||
github.com/urfave/cli cfb38830724cc34fedffe9a2a29fb54fa9169cd1 # v1.20.0
|
||||
golang.org/x/sys 9eafafc0a87e0fd0aeeba439a4573537970c44c7 https://github.com/golang/sys
|
||||
|
||||
# console dependencies
|
||||
github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f
|
||||
github.com/pkg/errors ba968bfe8b2f7e042a574c888954fccecfa385b4 # v0.8.1
|
||||
|
||||
# ebpf dependencies
|
||||
github.com/cilium/ebpf 95b36a581eed7b0f127306ed1d16cc0ddc06cf67
|
|
@ -1,12 +1,34 @@
|
|||
# Prometheus Go client library
|
||||
|
||||
[![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/client_golang)](https://goreportcard.com/report/github.com/prometheus/client_golang)
|
||||
[![go-doc](https://godoc.org/github.com/prometheus/client_golang?status.svg)](https://godoc.org/github.com/prometheus/client_golang)
|
||||
|
||||
This is the [Go](http://golang.org) client library for
|
||||
[Prometheus](http://prometheus.io). It has two separate parts, one for
|
||||
instrumenting application code, and one for creating clients that talk to the
|
||||
Prometheus HTTP API.
|
||||
|
||||
__This library requires Go1.9 or later.__ The minimum required patch releases for older Go versions are Go1.9.7 and Go1.10.3.
|
||||
|
||||
## Important note about releases and stability
|
||||
|
||||
This repository generally follows [Semantic
|
||||
Versioning](https://semver.org/). However, the API client in
|
||||
prometheus/client_golang/api/… is still considered experimental. Breaking
|
||||
changes of the API client will _not_ trigger a new major release. The same is
|
||||
true for selected other new features explicitly marked as **EXPERIMENTAL** in
|
||||
CHANGELOG.md.
|
||||
|
||||
Features that require breaking changes in the stable parts of the repository
|
||||
are being batched up and tracked in the [v2
|
||||
milestone](https://github.com/prometheus/client_golang/milestone/2). The v2
|
||||
development happens in a [separate
|
||||
branch](https://github.com/prometheus/client_golang/tree/dev-v2) for the time
|
||||
being. v2 releases off that branch will happen once sufficient stability is
|
||||
reached. In view of the widespread use of this repository, v1 and v2 will
|
||||
coexist for a while to enable a convenient transition.
|
||||
|
||||
## Instrumenting applications
|
||||
|
||||
[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus)
|
||||
|
@ -14,8 +36,8 @@ Prometheus HTTP API.
|
|||
The
|
||||
[`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus)
|
||||
contains the instrumentation library. See the
|
||||
[best practices section](http://prometheus.io/docs/practices/naming/) of the
|
||||
Prometheus documentation to learn more about instrumenting applications.
|
||||
[guide](https://prometheus.io/docs/guides/go-application/) on the Prometheus
|
||||
website to learn more about instrumenting applications.
|
||||
|
||||
The
|
||||
[`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples)
|
||||
|
@ -23,13 +45,14 @@ contains simple examples of instrumented code.
|
|||
|
||||
## Client for the Prometheus HTTP API
|
||||
|
||||
[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api/prometheus)
|
||||
[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus/v1)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus/v1) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api)
|
||||
|
||||
The
|
||||
[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus)
|
||||
contains the client for the
|
||||
[Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you
|
||||
to write Go applications that query time series data from a Prometheus server.
|
||||
to write Go applications that query time series data from a Prometheus
|
||||
server. It is still in alpha stage.
|
||||
|
||||
## Where is `model`, `extraction`, and `text`?
|
||||
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
module github.com/prometheus/client_golang
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1
|
||||
github.com/cespare/xxhash/v2 v2.1.1
|
||||
github.com/golang/protobuf v1.4.0
|
||||
github.com/json-iterator/go v1.1.9
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/common v0.9.1
|
||||
github.com/prometheus/procfs v0.0.11
|
||||
github.com/stretchr/testify v1.4.0 // indirect
|
||||
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.5 // indirect
|
||||
)
|
||||
|
||||
go 1.11
|
29
vendor/github.com/prometheus/client_golang/prometheus/build_info.go
generated
vendored
Normal file
29
vendor/github.com/prometheus/client_golang/prometheus/build_info.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.12
|
||||
|
||||
package prometheus
|
||||
|
||||
import "runtime/debug"
|
||||
|
||||
// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+.
|
||||
func readBuildInfo() (path, version, sum string) {
|
||||
path, version, sum = "unknown", "unknown", "unknown"
|
||||
if bi, ok := debug.ReadBuildInfo(); ok {
|
||||
path = bi.Main.Path
|
||||
version = bi.Main.Version
|
||||
sum = bi.Main.Sum
|
||||
}
|
||||
return
|
||||
}
|
22
vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
generated
vendored
Normal file
22
vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !go1.12
|
||||
|
||||
package prometheus
|
||||
|
||||
// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before
|
||||
// 1.12. Remove this whole file once the minimum supported Go version is 1.12.
|
||||
func readBuildInfo() (path, version, sum string) {
|
||||
return "unknown", "unknown", "unknown"
|
||||
}
|
|
@ -29,27 +29,72 @@ type Collector interface {
|
|||
// collected by this Collector to the provided channel and returns once
|
||||
// the last descriptor has been sent. The sent descriptors fulfill the
|
||||
// consistency and uniqueness requirements described in the Desc
|
||||
// documentation. (It is valid if one and the same Collector sends
|
||||
// duplicate descriptors. Those duplicates are simply ignored. However,
|
||||
// two different Collectors must not send duplicate descriptors.) This
|
||||
// method idempotently sends the same descriptors throughout the
|
||||
// lifetime of the Collector. If a Collector encounters an error while
|
||||
// executing this method, it must send an invalid descriptor (created
|
||||
// with NewInvalidDesc) to signal the error to the registry.
|
||||
// documentation.
|
||||
//
|
||||
// It is valid if one and the same Collector sends duplicate
|
||||
// descriptors. Those duplicates are simply ignored. However, two
|
||||
// different Collectors must not send duplicate descriptors.
|
||||
//
|
||||
// Sending no descriptor at all marks the Collector as “unchecked”,
|
||||
// i.e. no checks will be performed at registration time, and the
|
||||
// Collector may yield any Metric it sees fit in its Collect method.
|
||||
//
|
||||
// This method idempotently sends the same descriptors throughout the
|
||||
// lifetime of the Collector. It may be called concurrently and
|
||||
// therefore must be implemented in a concurrency safe way.
|
||||
//
|
||||
// If a Collector encounters an error while executing this method, it
|
||||
// must send an invalid descriptor (created with NewInvalidDesc) to
|
||||
// signal the error to the registry.
|
||||
Describe(chan<- *Desc)
|
||||
// Collect is called by the Prometheus registry when collecting
|
||||
// metrics. The implementation sends each collected metric via the
|
||||
// provided channel and returns once the last metric has been sent. The
|
||||
// descriptor of each sent metric is one of those returned by
|
||||
// Describe. Returned metrics that share the same descriptor must differ
|
||||
// in their variable label values. This method may be called
|
||||
// concurrently and must therefore be implemented in a concurrency safe
|
||||
// way. Blocking occurs at the expense of total performance of rendering
|
||||
// all registered metrics. Ideally, Collector implementations support
|
||||
// concurrent readers.
|
||||
// descriptor of each sent metric is one of those returned by Describe
|
||||
// (unless the Collector is unchecked, see above). Returned metrics that
|
||||
// share the same descriptor must differ in their variable label
|
||||
// values.
|
||||
//
|
||||
// This method may be called concurrently and must therefore be
|
||||
// implemented in a concurrency safe way. Blocking occurs at the expense
|
||||
// of total performance of rendering all registered metrics. Ideally,
|
||||
// Collector implementations support concurrent readers.
|
||||
Collect(chan<- Metric)
|
||||
}
|
||||
|
||||
// DescribeByCollect is a helper to implement the Describe method of a custom
|
||||
// Collector. It collects the metrics from the provided Collector and sends
|
||||
// their descriptors to the provided channel.
|
||||
//
|
||||
// If a Collector collects the same metrics throughout its lifetime, its
|
||||
// Describe method can simply be implemented as:
|
||||
//
|
||||
// func (c customCollector) Describe(ch chan<- *Desc) {
|
||||
// DescribeByCollect(c, ch)
|
||||
// }
|
||||
//
|
||||
// However, this will not work if the metrics collected change dynamically over
|
||||
// the lifetime of the Collector in a way that their combined set of descriptors
|
||||
// changes as well. The shortcut implementation will then violate the contract
|
||||
// of the Describe method. If a Collector sometimes collects no metrics at all
|
||||
// (for example vectors like CounterVec, GaugeVec, etc., which only collect
|
||||
// metrics after a metric with a fully specified label set has been accessed),
|
||||
// it might even get registered as an unchecked Collector (cf. the Register
|
||||
// method of the Registerer interface). Hence, only use this shortcut
|
||||
// implementation of Describe if you are certain to fulfill the contract.
|
||||
//
|
||||
// The Collector example demonstrates a use of DescribeByCollect.
|
||||
func DescribeByCollect(c Collector, descs chan<- *Desc) {
|
||||
metrics := make(chan Metric)
|
||||
go func() {
|
||||
c.Collect(metrics)
|
||||
close(metrics)
|
||||
}()
|
||||
for m := range metrics {
|
||||
descs <- m.Desc()
|
||||
}
|
||||
}
|
||||
|
||||
// selfCollector implements Collector for a single Metric so that the Metric
|
||||
// collects itself. Add it as an anonymous field to a struct that implements
|
||||
// Metric, and call init with the Metric itself as an argument.
|
||||
|
|
|
@ -15,6 +15,11 @@ package prometheus
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"math"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// Counter is a Metric that represents a single numerical value that only ever
|
||||
|
@ -30,26 +35,42 @@ type Counter interface {
|
|||
Metric
|
||||
Collector
|
||||
|
||||
// Set is used to set the Counter to an arbitrary value. It is only used
|
||||
// if you have to transfer a value from an external counter into this
|
||||
// Prometheus metric. Do not use it for regular handling of a
|
||||
// Prometheus counter (as it can be used to break the contract of
|
||||
// monotonically increasing values).
|
||||
//
|
||||
// Deprecated: Use NewConstMetric to create a counter for an external
|
||||
// value. A Counter should never be set.
|
||||
Set(float64)
|
||||
// Inc increments the counter by 1.
|
||||
// Inc increments the counter by 1. Use Add to increment it by arbitrary
|
||||
// non-negative values.
|
||||
Inc()
|
||||
// Add adds the given value to the counter. It panics if the value is <
|
||||
// 0.
|
||||
Add(float64)
|
||||
}
|
||||
|
||||
// ExemplarAdder is implemented by Counters that offer the option of adding a
|
||||
// value to the Counter together with an exemplar. Its AddWithExemplar method
|
||||
// works like the Add method of the Counter interface but also replaces the
|
||||
// currently saved exemplar (if any) with a new one, created from the provided
|
||||
// value, the current time as timestamp, and the provided labels. Empty Labels
|
||||
// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
|
||||
// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
|
||||
// of the provided labels are invalid, or if the provided labels contain more
|
||||
// than 64 runes in total.
|
||||
type ExemplarAdder interface {
|
||||
AddWithExemplar(value float64, exemplar Labels)
|
||||
}
|
||||
|
||||
// CounterOpts is an alias for Opts. See there for doc comments.
|
||||
type CounterOpts Opts
|
||||
|
||||
// NewCounter creates a new Counter based on the provided CounterOpts.
|
||||
//
|
||||
// The returned implementation also implements ExemplarAdder. It is safe to
|
||||
// perform the corresponding type assertion.
|
||||
//
|
||||
// The returned implementation tracks the counter value in two separate
|
||||
// variables, a float64 and a uint64. The latter is used to track calls of the
|
||||
// Inc method and calls of the Add method with a value that can be represented
|
||||
// as a uint64. This allows atomic increments of the counter with optimal
|
||||
// performance. (It is common to have an Inc call in very hot execution paths.)
|
||||
// Both internal tracking values are added up in the Write method. This has to
|
||||
// be taken into account when it comes to precision and overflow behavior.
|
||||
func NewCounter(opts CounterOpts) Counter {
|
||||
desc := NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
|
@ -57,20 +78,83 @@ func NewCounter(opts CounterOpts) Counter {
|
|||
nil,
|
||||
opts.ConstLabels,
|
||||
)
|
||||
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
|
||||
result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now}
|
||||
result.init(result) // Init self-collection.
|
||||
return result
|
||||
}
|
||||
|
||||
type counter struct {
|
||||
value
|
||||
// valBits contains the bits of the represented float64 value, while
|
||||
// valInt stores values that are exact integers. Both have to go first
|
||||
// in the struct to guarantee alignment for atomic operations.
|
||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
valBits uint64
|
||||
valInt uint64
|
||||
|
||||
selfCollector
|
||||
desc *Desc
|
||||
|
||||
labelPairs []*dto.LabelPair
|
||||
exemplar atomic.Value // Containing nil or a *dto.Exemplar.
|
||||
|
||||
now func() time.Time // To mock out time.Now() for testing.
|
||||
}
|
||||
|
||||
func (c *counter) Desc() *Desc {
|
||||
return c.desc
|
||||
}
|
||||
|
||||
func (c *counter) Add(v float64) {
|
||||
if v < 0 {
|
||||
panic(errors.New("counter cannot decrease in value"))
|
||||
}
|
||||
c.value.Add(v)
|
||||
|
||||
ival := uint64(v)
|
||||
if float64(ival) == v {
|
||||
atomic.AddUint64(&c.valInt, ival)
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&c.valBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||
if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *counter) AddWithExemplar(v float64, e Labels) {
|
||||
c.Add(v)
|
||||
c.updateExemplar(v, e)
|
||||
}
|
||||
|
||||
func (c *counter) Inc() {
|
||||
atomic.AddUint64(&c.valInt, 1)
|
||||
}
|
||||
|
||||
func (c *counter) Write(out *dto.Metric) error {
|
||||
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
|
||||
ival := atomic.LoadUint64(&c.valInt)
|
||||
val := fval + float64(ival)
|
||||
|
||||
var exemplar *dto.Exemplar
|
||||
if e := c.exemplar.Load(); e != nil {
|
||||
exemplar = e.(*dto.Exemplar)
|
||||
}
|
||||
|
||||
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
|
||||
}
|
||||
|
||||
func (c *counter) updateExemplar(v float64, l Labels) {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
e, err := newExemplar(v, c.now(), l)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
c.exemplar.Store(e)
|
||||
}
|
||||
|
||||
// CounterVec is a Collector that bundles a set of Counters that all share the
|
||||
|
@ -78,16 +162,12 @@ func (c *counter) Add(v float64) {
|
|||
// if you want to count the same thing partitioned by various dimensions
|
||||
// (e.g. number of HTTP requests, partitioned by response code and
|
||||
// method). Create instances with NewCounterVec.
|
||||
//
|
||||
// CounterVec embeds MetricVec. See there for a full list of methods with
|
||||
// detailed documentation.
|
||||
type CounterVec struct {
|
||||
*MetricVec
|
||||
*metricVec
|
||||
}
|
||||
|
||||
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
||||
// partitioned by the given label names. At least one label name must be
|
||||
// provided.
|
||||
// partitioned by the given label names.
|
||||
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
||||
desc := NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
|
@ -96,34 +176,62 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
|||
opts.ConstLabels,
|
||||
)
|
||||
return &CounterVec{
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
result := &counter{value: value{
|
||||
desc: desc,
|
||||
valType: CounterValue,
|
||||
labelPairs: makeLabelPairs(desc, lvs),
|
||||
}}
|
||||
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
if len(lvs) != len(desc.variableLabels) {
|
||||
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
|
||||
}
|
||||
result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs), now: time.Now}
|
||||
result.init(result) // Init self-collection.
|
||||
return result
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetricWithLabelValues replaces the method of the same name in
|
||||
// MetricVec. The difference is that this method returns a Counter and not a
|
||||
// Metric so that no type conversion is required.
|
||||
func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
|
||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
||||
// GetMetricWithLabelValues returns the Counter for the given slice of label
|
||||
// values (same order as the VariableLabels in Desc). If that combination of
|
||||
// label values is accessed for the first time, a new Counter is created.
|
||||
//
|
||||
// It is possible to call this method without using the returned Counter to only
|
||||
// create the new Counter but leave it at its starting value 0. See also the
|
||||
// SummaryVec example.
|
||||
//
|
||||
// Keeping the Counter for later use is possible (and should be considered if
|
||||
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
||||
// Delete can be used to delete the Counter from the CounterVec. In that case,
|
||||
// the Counter will still exist, but it will not be exported anymore, even if a
|
||||
// Counter with the same label values is created later.
|
||||
//
|
||||
// An error is returned if the number of label values is not the same as the
|
||||
// number of VariableLabels in Desc (minus any curried labels).
|
||||
//
|
||||
// Note that for more than one label value, this method is prone to mistakes
|
||||
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
||||
// an alternative to avoid that type of mistake. For higher label numbers, the
|
||||
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||
// with a performance overhead (for creating and processing the Labels map).
|
||||
// See also the GaugeVec example.
|
||||
func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
|
||||
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
|
||||
if metric != nil {
|
||||
return metric.(Counter), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
||||
// difference is that this method returns a Counter and not a Metric so that no
|
||||
// type conversion is required.
|
||||
func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
|
||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
||||
// GetMetricWith returns the Counter for the given Labels map (the label names
|
||||
// must match those of the VariableLabels in Desc). If that label map is
|
||||
// accessed for the first time, a new Counter is created. Implications of
|
||||
// creating a Counter without using it and keeping the Counter for later use are
|
||||
// the same as for GetMetricWithLabelValues.
|
||||
//
|
||||
// An error is returned if the number and names of the Labels are inconsistent
|
||||
// with those of the VariableLabels in Desc (minus any curried labels).
|
||||
//
|
||||
// This method is used for the same purpose as
|
||||
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||
// methods.
|
||||
func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
|
||||
metric, err := v.metricVec.getMetricWith(labels)
|
||||
if metric != nil {
|
||||
return metric.(Counter), err
|
||||
}
|
||||
|
@ -131,18 +239,57 @@ func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
|
|||
}
|
||||
|
||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
||||
// error, WithLabelValues allows shortcuts like
|
||||
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||
// error allows shortcuts like
|
||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||
func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
|
||||
return m.MetricVec.WithLabelValues(lvs...).(Counter)
|
||||
func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
|
||||
c, err := v.GetMetricWithLabelValues(lvs...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||
// returned an error. By not returning an error, With allows shortcuts like
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
func (m *CounterVec) With(labels Labels) Counter {
|
||||
return m.MetricVec.With(labels).(Counter)
|
||||
// returned an error. Not returning an error allows shortcuts like
|
||||
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
func (v *CounterVec) With(labels Labels) Counter {
|
||||
c, err := v.GetMetricWith(labels)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// CurryWith returns a vector curried with the provided labels, i.e. the
|
||||
// returned vector has those labels pre-set for all labeled operations performed
|
||||
// on it. The cardinality of the curried vector is reduced accordingly. The
|
||||
// order of the remaining labels stays the same (just with the curried labels
|
||||
// taken out of the sequence – which is relevant for the
|
||||
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
|
||||
// vector, but only with labels not yet used for currying before.
|
||||
//
|
||||
// The metrics contained in the CounterVec are shared between the curried and
|
||||
// uncurried vectors. They are just accessed differently. Curried and uncurried
|
||||
// vectors behave identically in terms of collection. Only one must be
|
||||
// registered with a given registry (usually the uncurried version). The Reset
|
||||
// method deletes all metrics, even if called on a curried vector.
|
||||
func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
|
||||
vec, err := v.curryWith(labels)
|
||||
if vec != nil {
|
||||
return &CounterVec{vec}, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// MustCurryWith works as CurryWith but panics where CurryWith would have
|
||||
// returned an error.
|
||||
func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
|
||||
vec, err := v.CurryWith(labels)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return vec
|
||||
}
|
||||
|
||||
// CounterFunc is a Counter whose value is determined at collect time by calling a
|
||||
|
@ -162,6 +309,8 @@ type CounterFunc interface {
|
|||
// provided function must be concurrency-safe. The function should also honor
|
||||
// the contract for a Counter (values only go up, not down), but compliance will
|
||||
// not be checked.
|
||||
//
|
||||
// Check out the ExampleGaugeFunc examples for the similar GaugeFunc.
|
||||
func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
|
||||
return newValueFunc(NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
|
|
|
@ -16,33 +16,16 @@ package prometheus
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
var (
|
||||
metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
|
||||
labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
|
||||
)
|
||||
|
||||
// reservedLabelPrefix is a prefix which is not legal in user-supplied
|
||||
// label names.
|
||||
const reservedLabelPrefix = "__"
|
||||
|
||||
// Labels represents a collection of label name -> value mappings. This type is
|
||||
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
|
||||
// metric vector Collectors, e.g.:
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
//
|
||||
// The other use-case is the specification of constant label pairs in Opts or to
|
||||
// create a Desc.
|
||||
type Labels map[string]string
|
||||
|
||||
// Desc is the descriptor used by every Prometheus Metric. It is essentially
|
||||
// the immutable meta-data of a Metric. The normal Metric implementations
|
||||
// included in this package manage their Desc under the hood. Users only have to
|
||||
|
@ -78,32 +61,27 @@ type Desc struct {
|
|||
// Help string. Each Desc with the same fqName must have the same
|
||||
// dimHash.
|
||||
dimHash uint64
|
||||
// err is an error that occured during construction. It is reported on
|
||||
// err is an error that occurred during construction. It is reported on
|
||||
// registration time.
|
||||
err error
|
||||
}
|
||||
|
||||
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
|
||||
// and will be reported on registration time. variableLabels and constLabels can
|
||||
// be nil if no such labels should be set. fqName and help must not be empty.
|
||||
// be nil if no such labels should be set. fqName must not be empty.
|
||||
//
|
||||
// variableLabels only contain the label names. Their label values are variable
|
||||
// and therefore not part of the Desc. (They are managed within the Metric.)
|
||||
//
|
||||
// For constLabels, the label values are constant. Therefore, they are fully
|
||||
// specified in the Desc. See the Opts documentation for the implications of
|
||||
// constant labels.
|
||||
// specified in the Desc. See the Collector example for a usage pattern.
|
||||
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
|
||||
d := &Desc{
|
||||
fqName: fqName,
|
||||
help: help,
|
||||
variableLabels: variableLabels,
|
||||
}
|
||||
if help == "" {
|
||||
d.err = errors.New("empty help string")
|
||||
return d
|
||||
}
|
||||
if !metricNameRE.MatchString(fqName) {
|
||||
if !model.IsValidMetricName(model.LabelValue(fqName)) {
|
||||
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
|
||||
return d
|
||||
}
|
||||
|
@ -116,7 +94,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
|||
// First add only the const label names and sort them...
|
||||
for labelName := range constLabels {
|
||||
if !checkLabelName(labelName) {
|
||||
d.err = fmt.Errorf("%q is not a valid label name", labelName)
|
||||
d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
|
||||
return d
|
||||
}
|
||||
labelNames = append(labelNames, labelName)
|
||||
|
@ -127,12 +105,18 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
|||
for _, labelName := range labelNames {
|
||||
labelValues = append(labelValues, constLabels[labelName])
|
||||
}
|
||||
// Validate the const label values. They can't have a wrong cardinality, so
|
||||
// use in len(labelValues) as expectedNumberOfValues.
|
||||
if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
|
||||
d.err = err
|
||||
return d
|
||||
}
|
||||
// Now add the variable label names, but prefix them with something that
|
||||
// cannot be in a regular label name. That prevents matching the label
|
||||
// dimension with a different mix between preset and variable labels.
|
||||
for _, labelName := range variableLabels {
|
||||
if !checkLabelName(labelName) {
|
||||
d.err = fmt.Errorf("%q is not a valid label name", labelName)
|
||||
d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
|
||||
return d
|
||||
}
|
||||
labelNames = append(labelNames, "$"+labelName)
|
||||
|
@ -142,24 +126,25 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
|||
d.err = errors.New("duplicate label names")
|
||||
return d
|
||||
}
|
||||
vh := hashNew()
|
||||
|
||||
xxh := xxhash.New()
|
||||
for _, val := range labelValues {
|
||||
vh = hashAdd(vh, val)
|
||||
vh = hashAddByte(vh, separatorByte)
|
||||
xxh.WriteString(val)
|
||||
xxh.Write(separatorByteSlice)
|
||||
}
|
||||
d.id = vh
|
||||
d.id = xxh.Sum64()
|
||||
// Sort labelNames so that order doesn't matter for the hash.
|
||||
sort.Strings(labelNames)
|
||||
// Now hash together (in this order) the help string and the sorted
|
||||
// label names.
|
||||
lh := hashNew()
|
||||
lh = hashAdd(lh, help)
|
||||
lh = hashAddByte(lh, separatorByte)
|
||||
xxh.Reset()
|
||||
xxh.WriteString(help)
|
||||
xxh.Write(separatorByteSlice)
|
||||
for _, labelName := range labelNames {
|
||||
lh = hashAdd(lh, labelName)
|
||||
lh = hashAddByte(lh, separatorByte)
|
||||
xxh.WriteString(labelName)
|
||||
xxh.Write(separatorByteSlice)
|
||||
}
|
||||
d.dimHash = lh
|
||||
d.dimHash = xxh.Sum64()
|
||||
|
||||
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
|
||||
for n, v := range constLabels {
|
||||
|
@ -168,7 +153,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
|||
Value: proto.String(v),
|
||||
})
|
||||
}
|
||||
sort.Sort(LabelPairSorter(d.constLabelPairs))
|
||||
sort.Sort(labelPairSorter(d.constLabelPairs))
|
||||
return d
|
||||
}
|
||||
|
||||
|
@ -198,8 +183,3 @@ func (d *Desc) String() string {
|
|||
d.variableLabels,
|
||||
)
|
||||
}
|
||||
|
||||
func checkLabelName(l string) bool {
|
||||
return labelNameRE.MatchString(l) &&
|
||||
!strings.HasPrefix(l, reservedLabelPrefix)
|
||||
}
|
||||
|
|
|
@ -11,10 +11,12 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package prometheus provides metrics primitives to instrument code for
|
||||
// monitoring. It also offers a registry for metrics. Sub-packages allow to
|
||||
// expose the registered metrics via HTTP (package promhttp) or push them to a
|
||||
// Pushgateway (package push).
|
||||
// Package prometheus is the core instrumentation package. It provides metrics
|
||||
// primitives to instrument code for monitoring. It also offers a registry for
|
||||
// metrics. Sub-packages allow to expose the registered metrics via HTTP
|
||||
// (package promhttp) or push them to a Pushgateway (package push). There is
|
||||
// also a sub-package promauto, which provides metrics constructors with
|
||||
// automatic registration.
|
||||
//
|
||||
// All exported functions and methods are safe to be used concurrently unless
|
||||
// specified otherwise.
|
||||
|
@ -26,6 +28,7 @@
|
|||
// package main
|
||||
//
|
||||
// import (
|
||||
// "log"
|
||||
// "net/http"
|
||||
//
|
||||
// "github.com/prometheus/client_golang/prometheus"
|
||||
|
@ -59,7 +62,7 @@
|
|||
// // The Handler function provides a default handler to expose metrics
|
||||
// // via an HTTP server. "/metrics" is the usual endpoint for that.
|
||||
// http.Handle("/metrics", promhttp.Handler())
|
||||
// http.ListenAndServe(":8080", nil)
|
||||
// log.Fatal(http.ListenAndServe(":8080", nil))
|
||||
// }
|
||||
//
|
||||
//
|
||||
|
@ -69,34 +72,33 @@
|
|||
// Metrics
|
||||
//
|
||||
// The number of exported identifiers in this package might appear a bit
|
||||
// overwhelming. Hovever, in addition to the basic plumbing shown in the example
|
||||
// overwhelming. However, in addition to the basic plumbing shown in the example
|
||||
// above, you only need to understand the different metric types and their
|
||||
// vector versions for basic usage.
|
||||
// vector versions for basic usage. Furthermore, if you are not concerned with
|
||||
// fine-grained control of when and how to register metrics with the registry,
|
||||
// have a look at the promauto package, which will effectively allow you to
|
||||
// ignore registration altogether in simple cases.
|
||||
//
|
||||
// Above, you have already touched the Counter and the Gauge. There are two more
|
||||
// advanced metric types: the Summary and Histogram. A more thorough description
|
||||
// of those four metric types can be found in the Prometheus docs:
|
||||
// https://prometheus.io/docs/concepts/metric_types/
|
||||
//
|
||||
// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
|
||||
// Prometheus server not to assume anything about its type.
|
||||
//
|
||||
// In addition to the fundamental metric types Gauge, Counter, Summary,
|
||||
// Histogram, and Untyped, a very important part of the Prometheus data model is
|
||||
// the partitioning of samples along dimensions called labels, which results in
|
||||
// In addition to the fundamental metric types Gauge, Counter, Summary, and
|
||||
// Histogram, a very important part of the Prometheus data model is the
|
||||
// partitioning of samples along dimensions called labels, which results in
|
||||
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
|
||||
// HistogramVec, and UntypedVec.
|
||||
// and HistogramVec.
|
||||
//
|
||||
// While only the fundamental metric types implement the Metric interface, both
|
||||
// the metrics and their vector versions implement the Collector interface. A
|
||||
// Collector manages the collection of a number of Metrics, but for convenience,
|
||||
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
|
||||
// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
|
||||
// SummaryVec, HistogramVec, and UntypedVec are not.
|
||||
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and
|
||||
// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec,
|
||||
// and HistogramVec are not.
|
||||
//
|
||||
// To create instances of Metrics and their vector versions, you need a suitable
|
||||
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts,
|
||||
// HistogramOpts, or UntypedOpts.
|
||||
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
|
||||
//
|
||||
// Custom Collectors and constant Metrics
|
||||
//
|
||||
|
@ -112,10 +114,23 @@
|
|||
// existing numbers into Prometheus Metrics during collection. An own
|
||||
// implementation of the Collector interface is perfect for that. You can create
|
||||
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
|
||||
// NewConstSummary (and their respective Must… versions). That will happen in
|
||||
// the Collect method. The Describe method has to return separate Desc
|
||||
// instances, representative of the “throw-away” metrics to be created
|
||||
// later. NewDesc comes in handy to create those Desc instances.
|
||||
// NewConstSummary (and their respective Must… versions). NewConstMetric is used
|
||||
// for all metric types with just a float64 as their value: Counter, Gauge, and
|
||||
// a special “type” called Untyped. Use the latter if you are not sure if the
|
||||
// mirrored metric is a Counter or a Gauge. Creation of the Metric instance
|
||||
// happens in the Collect method. The Describe method has to return separate
|
||||
// Desc instances, representative of the “throw-away” metrics to be created
|
||||
// later. NewDesc comes in handy to create those Desc instances. Alternatively,
|
||||
// you could return no Desc at all, which will mark the Collector “unchecked”.
|
||||
// No checks are performed at registration time, but metric consistency will
|
||||
// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape
|
||||
// errors. Thus, with unchecked Collectors, the responsibility to not collect
|
||||
// metrics that lead to inconsistencies in the total scrape result lies with the
|
||||
// implementer of the Collector. While this is not a desirable state, it is
|
||||
// sometimes necessary. The typical use case is a situation where the exact
|
||||
// metrics to be returned by a Collector cannot be predicted at registration
|
||||
// time, but the implementer has sufficient knowledge of the whole system to
|
||||
// guarantee metric consistency.
|
||||
//
|
||||
// The Collector example illustrates the use case. You can also look at the
|
||||
// source code of the processCollector (mirroring process metrics), the
|
||||
|
@ -129,34 +144,34 @@
|
|||
// Advanced Uses of the Registry
|
||||
//
|
||||
// While MustRegister is the by far most common way of registering a Collector,
|
||||
// sometimes you might want to handle the errors the registration might
|
||||
// cause. As suggested by the name, MustRegister panics if an error occurs. With
|
||||
// the Register function, the error is returned and can be handled.
|
||||
// sometimes you might want to handle the errors the registration might cause.
|
||||
// As suggested by the name, MustRegister panics if an error occurs. With the
|
||||
// Register function, the error is returned and can be handled.
|
||||
//
|
||||
// An error is returned if the registered Collector is incompatible or
|
||||
// inconsistent with already registered metrics. The registry aims for
|
||||
// consistency of the collected metrics according to the Prometheus data
|
||||
// model. Inconsistencies are ideally detected at registration time, not at
|
||||
// collect time. The former will usually be detected at start-up time of a
|
||||
// program, while the latter will only happen at scrape time, possibly not even
|
||||
// on the first scrape if the inconsistency only becomes relevant later. That is
|
||||
// the main reason why a Collector and a Metric have to describe themselves to
|
||||
// the registry.
|
||||
// consistency of the collected metrics according to the Prometheus data model.
|
||||
// Inconsistencies are ideally detected at registration time, not at collect
|
||||
// time. The former will usually be detected at start-up time of a program,
|
||||
// while the latter will only happen at scrape time, possibly not even on the
|
||||
// first scrape if the inconsistency only becomes relevant later. That is the
|
||||
// main reason why a Collector and a Metric have to describe themselves to the
|
||||
// registry.
|
||||
//
|
||||
// So far, everything we did operated on the so-called default registry, as it
|
||||
// can be found in the global DefaultRegistry variable. With NewRegistry, you
|
||||
// can be found in the global DefaultRegisterer variable. With NewRegistry, you
|
||||
// can create a custom registry, or you can even implement the Registerer or
|
||||
// Gatherer interfaces yourself. The methods Register and Unregister work in
|
||||
// the same way on a custom registry as the global functions Register and
|
||||
// Unregister on the default registry.
|
||||
// Gatherer interfaces yourself. The methods Register and Unregister work in the
|
||||
// same way on a custom registry as the global functions Register and Unregister
|
||||
// on the default registry.
|
||||
//
|
||||
// There are a number of uses for custom registries: You can use registries
|
||||
// with special properties, see NewPedanticRegistry. You can avoid global state,
|
||||
// as it is imposed by the DefaultRegistry. You can use multiple registries at
|
||||
// There are a number of uses for custom registries: You can use registries with
|
||||
// special properties, see NewPedanticRegistry. You can avoid global state, as
|
||||
// it is imposed by the DefaultRegisterer. You can use multiple registries at
|
||||
// the same time to expose different metrics in different ways. You can use
|
||||
// separate registries for testing purposes.
|
||||
//
|
||||
// Also note that the DefaultRegistry comes registered with a Collector for Go
|
||||
// Also note that the DefaultRegisterer comes registered with a Collector for Go
|
||||
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
|
||||
// NewProcessCollector). With a custom registry, you are in control and decide
|
||||
// yourself about the Collectors to register.
|
||||
|
@ -166,16 +181,19 @@
|
|||
// The Registry implements the Gatherer interface. The caller of the Gather
|
||||
// method can then expose the gathered metrics in some way. Usually, the metrics
|
||||
// are served via HTTP on the /metrics endpoint. That's happening in the example
|
||||
// above. The tools to expose metrics via HTTP are in the promhttp
|
||||
// sub-package. (The top-level functions in the prometheus package are
|
||||
// deprecated.)
|
||||
// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
|
||||
//
|
||||
// Pushing to the Pushgateway
|
||||
//
|
||||
// Function for pushing to the Pushgateway can be found in the push sub-package.
|
||||
//
|
||||
// Graphite Bridge
|
||||
//
|
||||
// Functions and examples to push metrics from a Gatherer to Graphite can be
|
||||
// found in the graphite sub-package.
|
||||
//
|
||||
// Other Means of Exposition
|
||||
//
|
||||
// More ways of exposing metrics can easily be added. Sending metrics to
|
||||
// Graphite would be an example that will soon be implemented.
|
||||
// More ways of exposing metrics can easily be added by following the approaches
|
||||
// of the existing implementations.
|
||||
package prometheus
|
||||
|
|
|
@ -1,3 +1,16 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
// Inline and byte-free variant of hash/fnv's fnv64a.
|
||||
|
|
|
@ -13,6 +13,14 @@
|
|||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// Gauge is a Metric that represents a single numerical value that can
|
||||
// arbitrarily go up and down.
|
||||
//
|
||||
|
@ -27,29 +35,95 @@ type Gauge interface {
|
|||
|
||||
// Set sets the Gauge to an arbitrary value.
|
||||
Set(float64)
|
||||
// Inc increments the Gauge by 1.
|
||||
// Inc increments the Gauge by 1. Use Add to increment it by arbitrary
|
||||
// values.
|
||||
Inc()
|
||||
// Dec decrements the Gauge by 1.
|
||||
// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
|
||||
// values.
|
||||
Dec()
|
||||
// Add adds the given value to the Gauge. (The value can be
|
||||
// negative, resulting in a decrease of the Gauge.)
|
||||
// Add adds the given value to the Gauge. (The value can be negative,
|
||||
// resulting in a decrease of the Gauge.)
|
||||
Add(float64)
|
||||
// Sub subtracts the given value from the Gauge. (The value can be
|
||||
// negative, resulting in an increase of the Gauge.)
|
||||
Sub(float64)
|
||||
|
||||
// SetToCurrentTime sets the Gauge to the current Unix time in seconds.
|
||||
SetToCurrentTime()
|
||||
}
|
||||
|
||||
// GaugeOpts is an alias for Opts. See there for doc comments.
|
||||
type GaugeOpts Opts
|
||||
|
||||
// NewGauge creates a new Gauge based on the provided GaugeOpts.
|
||||
//
|
||||
// The returned implementation is optimized for a fast Set method. If you have a
|
||||
// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
|
||||
// the former. For example, the Inc method of the returned Gauge is slower than
|
||||
// the Inc method of a Counter returned by NewCounter. This matches the typical
|
||||
// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
|
||||
// the latter Inc-heavy.
|
||||
func NewGauge(opts GaugeOpts) Gauge {
|
||||
return newValue(NewDesc(
|
||||
desc := NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
nil,
|
||||
opts.ConstLabels,
|
||||
), GaugeValue, 0)
|
||||
)
|
||||
result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
|
||||
result.init(result) // Init self-collection.
|
||||
return result
|
||||
}
|
||||
|
||||
type gauge struct {
|
||||
// valBits contains the bits of the represented float64 value. It has
|
||||
// to go first in the struct to guarantee alignment for atomic
|
||||
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
valBits uint64
|
||||
|
||||
selfCollector
|
||||
|
||||
desc *Desc
|
||||
labelPairs []*dto.LabelPair
|
||||
}
|
||||
|
||||
func (g *gauge) Desc() *Desc {
|
||||
return g.desc
|
||||
}
|
||||
|
||||
func (g *gauge) Set(val float64) {
|
||||
atomic.StoreUint64(&g.valBits, math.Float64bits(val))
|
||||
}
|
||||
|
||||
func (g *gauge) SetToCurrentTime() {
|
||||
g.Set(float64(time.Now().UnixNano()) / 1e9)
|
||||
}
|
||||
|
||||
func (g *gauge) Inc() {
|
||||
g.Add(1)
|
||||
}
|
||||
|
||||
func (g *gauge) Dec() {
|
||||
g.Add(-1)
|
||||
}
|
||||
|
||||
func (g *gauge) Add(val float64) {
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&g.valBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
|
||||
if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *gauge) Sub(val float64) {
|
||||
g.Add(val * -1)
|
||||
}
|
||||
|
||||
func (g *gauge) Write(out *dto.Metric) error {
|
||||
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
|
||||
return populateMetric(GaugeValue, val, g.labelPairs, nil, out)
|
||||
}
|
||||
|
||||
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
|
||||
|
@ -58,12 +132,11 @@ func NewGauge(opts GaugeOpts) Gauge {
|
|||
// (e.g. number of operations queued, partitioned by user and operation
|
||||
// type). Create instances with NewGaugeVec.
|
||||
type GaugeVec struct {
|
||||
*MetricVec
|
||||
*metricVec
|
||||
}
|
||||
|
||||
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
||||
// partitioned by the given label names. At least one label name must be
|
||||
// provided.
|
||||
// partitioned by the given label names.
|
||||
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
||||
desc := NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
|
@ -72,28 +145,62 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
|||
opts.ConstLabels,
|
||||
)
|
||||
return &GaugeVec{
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
return newValue(desc, GaugeValue, 0, lvs...)
|
||||
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
if len(lvs) != len(desc.variableLabels) {
|
||||
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
|
||||
}
|
||||
result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
|
||||
result.init(result) // Init self-collection.
|
||||
return result
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetricWithLabelValues replaces the method of the same name in
|
||||
// MetricVec. The difference is that this method returns a Gauge and not a
|
||||
// Metric so that no type conversion is required.
|
||||
func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
|
||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
||||
// GetMetricWithLabelValues returns the Gauge for the given slice of label
|
||||
// values (same order as the VariableLabels in Desc). If that combination of
|
||||
// label values is accessed for the first time, a new Gauge is created.
|
||||
//
|
||||
// It is possible to call this method without using the returned Gauge to only
|
||||
// create the new Gauge but leave it at its starting value 0. See also the
|
||||
// SummaryVec example.
|
||||
//
|
||||
// Keeping the Gauge for later use is possible (and should be considered if
|
||||
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
||||
// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
|
||||
// Gauge will still exist, but it will not be exported anymore, even if a
|
||||
// Gauge with the same label values is created later. See also the CounterVec
|
||||
// example.
|
||||
//
|
||||
// An error is returned if the number of label values is not the same as the
|
||||
// number of VariableLabels in Desc (minus any curried labels).
|
||||
//
|
||||
// Note that for more than one label value, this method is prone to mistakes
|
||||
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
||||
// an alternative to avoid that type of mistake. For higher label numbers, the
|
||||
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||
// with a performance overhead (for creating and processing the Labels map).
|
||||
func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
|
||||
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
|
||||
if metric != nil {
|
||||
return metric.(Gauge), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
||||
// difference is that this method returns a Gauge and not a Metric so that no
|
||||
// type conversion is required.
|
||||
func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
|
||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
||||
// GetMetricWith returns the Gauge for the given Labels map (the label names
|
||||
// must match those of the VariableLabels in Desc). If that label map is
|
||||
// accessed for the first time, a new Gauge is created. Implications of
|
||||
// creating a Gauge without using it and keeping the Gauge for later use are
|
||||
// the same as for GetMetricWithLabelValues.
|
||||
//
|
||||
// An error is returned if the number and names of the Labels are inconsistent
|
||||
// with those of the VariableLabels in Desc (minus any curried labels).
|
||||
//
|
||||
// This method is used for the same purpose as
|
||||
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||
// methods.
|
||||
func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
|
||||
metric, err := v.metricVec.getMetricWith(labels)
|
||||
if metric != nil {
|
||||
return metric.(Gauge), err
|
||||
}
|
||||
|
@ -101,18 +208,57 @@ func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
|
|||
}
|
||||
|
||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
||||
// error, WithLabelValues allows shortcuts like
|
||||
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||
// error allows shortcuts like
|
||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||
func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
|
||||
return m.MetricVec.WithLabelValues(lvs...).(Gauge)
|
||||
func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
|
||||
g, err := v.GetMetricWithLabelValues(lvs...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||
// returned an error. By not returning an error, With allows shortcuts like
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
func (m *GaugeVec) With(labels Labels) Gauge {
|
||||
return m.MetricVec.With(labels).(Gauge)
|
||||
// returned an error. Not returning an error allows shortcuts like
|
||||
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
func (v *GaugeVec) With(labels Labels) Gauge {
|
||||
g, err := v.GetMetricWith(labels)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
// CurryWith returns a vector curried with the provided labels, i.e. the
|
||||
// returned vector has those labels pre-set for all labeled operations performed
|
||||
// on it. The cardinality of the curried vector is reduced accordingly. The
|
||||
// order of the remaining labels stays the same (just with the curried labels
|
||||
// taken out of the sequence – which is relevant for the
|
||||
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
|
||||
// vector, but only with labels not yet used for currying before.
|
||||
//
|
||||
// The metrics contained in the GaugeVec are shared between the curried and
|
||||
// uncurried vectors. They are just accessed differently. Curried and uncurried
|
||||
// vectors behave identically in terms of collection. Only one must be
|
||||
// registered with a given registry (usually the uncurried version). The Reset
|
||||
// method deletes all metrics, even if called on a curried vector.
|
||||
func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
|
||||
vec, err := v.curryWith(labels)
|
||||
if vec != nil {
|
||||
return &GaugeVec{vec}, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// MustCurryWith works as CurryWith but panics where CurryWith would have
|
||||
// returned an error.
|
||||
func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec {
|
||||
vec, err := v.CurryWith(labels)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return vec
|
||||
}
|
||||
|
||||
// GaugeFunc is a Gauge whose value is determined at collect time by calling a
|
||||
|
@ -127,9 +273,12 @@ type GaugeFunc interface {
|
|||
// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
|
||||
// value reported is determined by calling the given function from within the
|
||||
// Write method. Take into account that metric collection may happen
|
||||
// concurrently. If that results in concurrent calls to Write, like in the case
|
||||
// where a GaugeFunc is directly registered with Prometheus, the provided
|
||||
// function must be concurrency-safe.
|
||||
// concurrently. Therefore, it must be safe to call the provided function
|
||||
// concurrently.
|
||||
//
|
||||
// NewGaugeFunc is a good way to create an “info” style metric with a constant
|
||||
// value of 1. Example:
|
||||
// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56
|
||||
func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
|
||||
return newValueFunc(NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
|
|
|
@ -1,34 +1,89 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type goCollector struct {
|
||||
goroutines Gauge
|
||||
goroutinesDesc *Desc
|
||||
threadsDesc *Desc
|
||||
gcDesc *Desc
|
||||
goInfoDesc *Desc
|
||||
|
||||
// metrics to describe and collect
|
||||
metrics memStatsMetrics
|
||||
// ms... are memstats related.
|
||||
msLast *runtime.MemStats // Previously collected memstats.
|
||||
msLastTimestamp time.Time
|
||||
msMtx sync.Mutex // Protects msLast and msLastTimestamp.
|
||||
msMetrics memStatsMetrics
|
||||
msRead func(*runtime.MemStats) // For mocking in tests.
|
||||
msMaxWait time.Duration // Wait time for fresh memstats.
|
||||
msMaxAge time.Duration // Maximum allowed age of old memstats.
|
||||
}
|
||||
|
||||
// NewGoCollector returns a collector which exports metrics about the current
|
||||
// go process.
|
||||
// NewGoCollector returns a collector that exports metrics about the current Go
|
||||
// process. This includes memory stats. To collect those, runtime.ReadMemStats
|
||||
// is called. This requires to “stop the world”, which usually only happens for
|
||||
// garbage collection (GC). Take the following implications into account when
|
||||
// deciding whether to use the Go collector:
|
||||
//
|
||||
// 1. The performance impact of stopping the world is the more relevant the more
|
||||
// frequently metrics are collected. However, with Go1.9 or later the
|
||||
// stop-the-world time per metrics collection is very short (~25µs) so that the
|
||||
// performance impact will only matter in rare cases. However, with older Go
|
||||
// versions, the stop-the-world duration depends on the heap size and can be
|
||||
// quite significant (~1.7 ms/GiB as per
|
||||
// https://go-review.googlesource.com/c/go/+/34937).
|
||||
//
|
||||
// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the
|
||||
// metrics collection happens to coincide with GC, it will only complete after
|
||||
// GC has finished. Usually, GC is fast enough to not cause problems. However,
|
||||
// with a very large heap, GC might take multiple seconds, which is enough to
|
||||
// cause scrape timeouts in common setups. To avoid this problem, the Go
|
||||
// collector will use the memstats from a previous collection if
|
||||
// runtime.ReadMemStats takes more than 1s. However, if there are no previously
|
||||
// collected memstats, or their collection is more than 5m ago, the collection
|
||||
// will block until runtime.ReadMemStats succeeds. (The problem might be solved
|
||||
// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go
|
||||
// issue.)
|
||||
func NewGoCollector() Collector {
|
||||
return &goCollector{
|
||||
goroutines: NewGauge(GaugeOpts{
|
||||
Namespace: "go",
|
||||
Name: "goroutines",
|
||||
Help: "Number of goroutines that currently exist.",
|
||||
}),
|
||||
goroutinesDesc: NewDesc(
|
||||
"go_goroutines",
|
||||
"Number of goroutines that currently exist.",
|
||||
nil, nil),
|
||||
threadsDesc: NewDesc(
|
||||
"go_threads",
|
||||
"Number of OS threads created.",
|
||||
nil, nil),
|
||||
gcDesc: NewDesc(
|
||||
"go_gc_duration_seconds",
|
||||
"A summary of the GC invocation durations.",
|
||||
"A summary of the pause duration of garbage collection cycles.",
|
||||
nil, nil),
|
||||
metrics: memStatsMetrics{
|
||||
goInfoDesc: NewDesc(
|
||||
"go_info",
|
||||
"Information about the Go environment.",
|
||||
nil, Labels{"version": runtime.Version()}),
|
||||
msLast: &runtime.MemStats{},
|
||||
msRead: runtime.ReadMemStats,
|
||||
msMaxWait: time.Second,
|
||||
msMaxAge: 5 * time.Minute,
|
||||
msMetrics: memStatsMetrics{
|
||||
{
|
||||
desc: NewDesc(
|
||||
memstatNamespace("alloc_bytes"),
|
||||
|
@ -48,7 +103,7 @@ func NewGoCollector() Collector {
|
|||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("sys_bytes"),
|
||||
"Number of bytes obtained by system. Sum of all system allocations.",
|
||||
"Number of bytes obtained from system.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
|
||||
|
@ -111,12 +166,12 @@ func NewGoCollector() Collector {
|
|||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_released_bytes_total"),
|
||||
"Total number of heap bytes released to OS.",
|
||||
memstatNamespace("heap_released_bytes"),
|
||||
"Number of heap bytes released to OS.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
|
||||
valType: CounterValue,
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_objects"),
|
||||
|
@ -213,29 +268,53 @@ func NewGoCollector() Collector {
|
|||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("gc_cpu_fraction"),
|
||||
"The fraction of this program's available CPU time used by the GC since the program started.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
|
||||
valType: GaugeValue,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func memstatNamespace(s string) string {
|
||||
return fmt.Sprintf("go_memstats_%s", s)
|
||||
return "go_memstats_" + s
|
||||
}
|
||||
|
||||
// Describe returns all descriptions of the collector.
|
||||
func (c *goCollector) Describe(ch chan<- *Desc) {
|
||||
ch <- c.goroutines.Desc()
|
||||
ch <- c.goroutinesDesc
|
||||
ch <- c.threadsDesc
|
||||
ch <- c.gcDesc
|
||||
|
||||
for _, i := range c.metrics {
|
||||
ch <- c.goInfoDesc
|
||||
for _, i := range c.msMetrics {
|
||||
ch <- i.desc
|
||||
}
|
||||
}
|
||||
|
||||
// Collect returns the current state of all metrics of the collector.
|
||||
func (c *goCollector) Collect(ch chan<- Metric) {
|
||||
c.goroutines.Set(float64(runtime.NumGoroutine()))
|
||||
ch <- c.goroutines
|
||||
var (
|
||||
ms = &runtime.MemStats{}
|
||||
done = make(chan struct{})
|
||||
)
|
||||
// Start reading memstats first as it might take a while.
|
||||
go func() {
|
||||
c.msRead(ms)
|
||||
c.msMtx.Lock()
|
||||
c.msLast = ms
|
||||
c.msLastTimestamp = time.Now()
|
||||
c.msMtx.Unlock()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
|
||||
n, _ := runtime.ThreadCreateProfile(nil)
|
||||
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
|
||||
|
||||
var stats debug.GCStats
|
||||
stats.PauseQuantiles = make([]time.Duration, 5)
|
||||
|
@ -246,11 +325,35 @@ func (c *goCollector) Collect(ch chan<- Metric) {
|
|||
quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
|
||||
}
|
||||
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
|
||||
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
|
||||
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
|
||||
|
||||
ms := &runtime.MemStats{}
|
||||
runtime.ReadMemStats(ms)
|
||||
for _, i := range c.metrics {
|
||||
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
|
||||
|
||||
timer := time.NewTimer(c.msMaxWait)
|
||||
select {
|
||||
case <-done: // Our own ReadMemStats succeeded in time. Use it.
|
||||
timer.Stop() // Important for high collection frequencies to not pile up timers.
|
||||
c.msCollect(ch, ms)
|
||||
return
|
||||
case <-timer.C: // Time out, use last memstats if possible. Continue below.
|
||||
}
|
||||
c.msMtx.Lock()
|
||||
if time.Since(c.msLastTimestamp) < c.msMaxAge {
|
||||
// Last memstats are recent enough. Collect from them under the lock.
|
||||
c.msCollect(ch, c.msLast)
|
||||
c.msMtx.Unlock()
|
||||
return
|
||||
}
|
||||
// If we are here, the last memstats are too old or don't exist. We have
|
||||
// to wait until our own ReadMemStats finally completes. For that to
|
||||
// happen, we have to release the lock.
|
||||
c.msMtx.Unlock()
|
||||
<-done
|
||||
c.msCollect(ch, ms)
|
||||
}
|
||||
|
||||
func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
|
||||
for _, i := range c.msMetrics {
|
||||
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
|
||||
}
|
||||
}
|
||||
|
@ -261,3 +364,33 @@ type memStatsMetrics []struct {
|
|||
eval func(*runtime.MemStats) float64
|
||||
valType ValueType
|
||||
}
|
||||
|
||||
// NewBuildInfoCollector returns a collector collecting a single metric
|
||||
// "go_build_info" with the constant value 1 and three labels "path", "version",
|
||||
// and "checksum". Their label values contain the main module path, version, and
|
||||
// checksum, respectively. The labels will only have meaningful values if the
|
||||
// binary is built with Go module support and from source code retrieved from
|
||||
// the source repository (rather than the local file system). This is usually
|
||||
// accomplished by building from outside of GOPATH, specifying the full address
|
||||
// of the main package, e.g. "GO111MODULE=on go run
|
||||
// github.com/prometheus/client_golang/examples/random". If built without Go
|
||||
// module support, all label values will be "unknown". If built with Go module
|
||||
// support but using the source code from the local file system, the "path" will
|
||||
// be set appropriately, but "checksum" will be empty and "version" will be
|
||||
// "(devel)".
|
||||
//
|
||||
// This collector uses only the build information for the main module. See
|
||||
// https://github.com/povilasv/prommod for an example of a collector for the
|
||||
// module dependencies.
|
||||
func NewBuildInfoCollector() Collector {
|
||||
path, version, sum := readBuildInfo()
|
||||
c := &selfCollector{MustNewConstMetric(
|
||||
NewDesc(
|
||||
"go_build_info",
|
||||
"Build information about the main Go module.",
|
||||
nil, Labels{"path": path, "version": version, "checksum": sum},
|
||||
),
|
||||
GaugeValue, 1)}
|
||||
c.init(c.self)
|
||||
return c
|
||||
}
|
||||
|
|
|
@ -16,8 +16,11 @@ package prometheus
|
|||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
|
@ -108,8 +111,9 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
|
|||
}
|
||||
|
||||
// HistogramOpts bundles the options for creating a Histogram metric. It is
|
||||
// mandatory to set Name and Help to a non-empty string. All other fields are
|
||||
// optional and can safely be left at their zero value.
|
||||
// mandatory to set Name to a non-empty string. All other fields are optional
|
||||
// and can safely be left at their zero value, although it is strongly
|
||||
// encouraged to set a Help string.
|
||||
type HistogramOpts struct {
|
||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||
// name of the Histogram (created by joining these components with
|
||||
|
@ -120,29 +124,22 @@ type HistogramOpts struct {
|
|||
Subsystem string
|
||||
Name string
|
||||
|
||||
// Help provides information about this Histogram. Mandatory!
|
||||
// Help provides information about this Histogram.
|
||||
//
|
||||
// Metrics with the same fully-qualified name must have the same Help
|
||||
// string.
|
||||
Help string
|
||||
|
||||
// ConstLabels are used to attach fixed labels to this
|
||||
// Histogram. Histograms with the same fully-qualified name must have the
|
||||
// same label names in their ConstLabels.
|
||||
// ConstLabels are used to attach fixed labels to this metric. Metrics
|
||||
// with the same fully-qualified name must have the same label names in
|
||||
// their ConstLabels.
|
||||
//
|
||||
// Note that in most cases, labels have a value that varies during the
|
||||
// lifetime of a process. Those labels are usually managed with a
|
||||
// HistogramVec. ConstLabels serve only special purposes. One is for the
|
||||
// special case where the value of a label does not change during the
|
||||
// lifetime of a process, e.g. if the revision of the running binary is
|
||||
// put into a label. Another, more advanced purpose is if more than one
|
||||
// Collector needs to collect Histograms with the same fully-qualified
|
||||
// name. In that case, those Summaries must differ in the values of
|
||||
// their ConstLabels. See the Collector examples.
|
||||
//
|
||||
// If the value of a label never changes (not even between binaries),
|
||||
// that label most likely should not be a label at all (but part of the
|
||||
// metric name).
|
||||
// ConstLabels are only used rarely. In particular, do not use them to
|
||||
// attach the same labels to all your metrics. Those use cases are
|
||||
// better covered by target labels set by the scraping Prometheus
|
||||
// server, or by one specific metric (e.g. a build_info or a
|
||||
// machine_role metric). See also
|
||||
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
|
||||
ConstLabels Labels
|
||||
|
||||
// Buckets defines the buckets into which observations are counted. Each
|
||||
|
@ -155,6 +152,10 @@ type HistogramOpts struct {
|
|||
|
||||
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
|
||||
// panics if the buckets in HistogramOpts are not in strictly increasing order.
|
||||
//
|
||||
// The returned implementation also implements ExemplarObserver. It is safe to
|
||||
// perform the corresponding type assertion. Exemplars are tracked separately
|
||||
// for each bucket.
|
||||
func NewHistogram(opts HistogramOpts) Histogram {
|
||||
return newHistogram(
|
||||
NewDesc(
|
||||
|
@ -169,7 +170,7 @@ func NewHistogram(opts HistogramOpts) Histogram {
|
|||
|
||||
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
|
||||
if len(desc.variableLabels) != len(labelValues) {
|
||||
panic(errInconsistentCardinality)
|
||||
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
|
||||
}
|
||||
|
||||
for _, n := range desc.variableLabels {
|
||||
|
@ -191,6 +192,8 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
|||
desc: desc,
|
||||
upperBounds: opts.Buckets,
|
||||
labelPairs: makeLabelPairs(desc, labelValues),
|
||||
counts: [2]*histogramCounts{{}, {}},
|
||||
now: time.Now,
|
||||
}
|
||||
for i, upperBound := range h.upperBounds {
|
||||
if i < len(h.upperBounds)-1 {
|
||||
|
@ -207,30 +210,60 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
|||
}
|
||||
}
|
||||
}
|
||||
// Finally we know the final length of h.upperBounds and can make counts.
|
||||
h.counts = make([]uint64, len(h.upperBounds))
|
||||
// Finally we know the final length of h.upperBounds and can make buckets
|
||||
// for both counts as well as exemplars:
|
||||
h.counts[0].buckets = make([]uint64, len(h.upperBounds))
|
||||
h.counts[1].buckets = make([]uint64, len(h.upperBounds))
|
||||
h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
|
||||
|
||||
h.init(h) // Init self-collection.
|
||||
return h
|
||||
}
|
||||
|
||||
type histogram struct {
|
||||
type histogramCounts struct {
|
||||
// sumBits contains the bits of the float64 representing the sum of all
|
||||
// observations. sumBits and count have to go first in the struct to
|
||||
// guarantee alignment for atomic operations.
|
||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
sumBits uint64
|
||||
count uint64
|
||||
buckets []uint64
|
||||
}
|
||||
|
||||
type histogram struct {
|
||||
// countAndHotIdx enables lock-free writes with use of atomic updates.
|
||||
// The most significant bit is the hot index [0 or 1] of the count field
|
||||
// below. Observe calls update the hot one. All remaining bits count the
|
||||
// number of Observe calls. Observe starts by incrementing this counter,
|
||||
// and finish by incrementing the count field in the respective
|
||||
// histogramCounts, as a marker for completion.
|
||||
//
|
||||
// Calls of the Write method (which are non-mutating reads from the
|
||||
// perspective of the histogram) swap the hot–cold under the writeMtx
|
||||
// lock. A cooldown is awaited (while locked) by comparing the number of
|
||||
// observations with the initiation count. Once they match, then the
|
||||
// last observation on the now cool one has completed. All cool fields must
|
||||
// be merged into the new hot before releasing writeMtx.
|
||||
//
|
||||
// Fields with atomic access first! See alignment constraint:
|
||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
countAndHotIdx uint64
|
||||
|
||||
selfCollector
|
||||
// Note that there is no mutex required.
|
||||
|
||||
desc *Desc
|
||||
writeMtx sync.Mutex // Only used in the Write method.
|
||||
|
||||
// Two counts, one is "hot" for lock-free observations, the other is
|
||||
// "cold" for writing out a dto.Metric. It has to be an array of
|
||||
// pointers to guarantee 64bit alignment of the histogramCounts, see
|
||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
|
||||
counts [2]*histogramCounts
|
||||
|
||||
upperBounds []float64
|
||||
counts []uint64
|
||||
|
||||
labelPairs []*dto.LabelPair
|
||||
exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
|
||||
|
||||
now func() time.Time // To mock out time.Now() for testing.
|
||||
}
|
||||
|
||||
func (h *histogram) Desc() *Desc {
|
||||
|
@ -238,6 +271,89 @@ func (h *histogram) Desc() *Desc {
|
|||
}
|
||||
|
||||
func (h *histogram) Observe(v float64) {
|
||||
h.observe(v, h.findBucket(v))
|
||||
}
|
||||
|
||||
func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
|
||||
i := h.findBucket(v)
|
||||
h.observe(v, i)
|
||||
h.updateExemplar(v, i, e)
|
||||
}
|
||||
|
||||
func (h *histogram) Write(out *dto.Metric) error {
|
||||
// For simplicity, we protect this whole method by a mutex. It is not in
|
||||
// the hot path, i.e. Observe is called much more often than Write. The
|
||||
// complication of making Write lock-free isn't worth it, if possible at
|
||||
// all.
|
||||
h.writeMtx.Lock()
|
||||
defer h.writeMtx.Unlock()
|
||||
|
||||
// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
|
||||
// without touching the count bits. See the struct comments for a full
|
||||
// description of the algorithm.
|
||||
n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
|
||||
// count is contained unchanged in the lower 63 bits.
|
||||
count := n & ((1 << 63) - 1)
|
||||
// The most significant bit tells us which counts is hot. The complement
|
||||
// is thus the cold one.
|
||||
hotCounts := h.counts[n>>63]
|
||||
coldCounts := h.counts[(^n)>>63]
|
||||
|
||||
// Await cooldown.
|
||||
for count != atomic.LoadUint64(&coldCounts.count) {
|
||||
runtime.Gosched() // Let observations get work done.
|
||||
}
|
||||
|
||||
his := &dto.Histogram{
|
||||
Bucket: make([]*dto.Bucket, len(h.upperBounds)),
|
||||
SampleCount: proto.Uint64(count),
|
||||
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
|
||||
}
|
||||
var cumCount uint64
|
||||
for i, upperBound := range h.upperBounds {
|
||||
cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
|
||||
his.Bucket[i] = &dto.Bucket{
|
||||
CumulativeCount: proto.Uint64(cumCount),
|
||||
UpperBound: proto.Float64(upperBound),
|
||||
}
|
||||
if e := h.exemplars[i].Load(); e != nil {
|
||||
his.Bucket[i].Exemplar = e.(*dto.Exemplar)
|
||||
}
|
||||
}
|
||||
// If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly.
|
||||
if e := h.exemplars[len(h.upperBounds)].Load(); e != nil {
|
||||
b := &dto.Bucket{
|
||||
CumulativeCount: proto.Uint64(count),
|
||||
UpperBound: proto.Float64(math.Inf(1)),
|
||||
Exemplar: e.(*dto.Exemplar),
|
||||
}
|
||||
his.Bucket = append(his.Bucket, b)
|
||||
}
|
||||
|
||||
out.Histogram = his
|
||||
out.Label = h.labelPairs
|
||||
|
||||
// Finally add all the cold counts to the new hot counts and reset the cold counts.
|
||||
atomic.AddUint64(&hotCounts.count, count)
|
||||
atomic.StoreUint64(&coldCounts.count, 0)
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
|
||||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||
atomic.StoreUint64(&coldCounts.sumBits, 0)
|
||||
break
|
||||
}
|
||||
}
|
||||
for i := range h.upperBounds {
|
||||
atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
|
||||
atomic.StoreUint64(&coldCounts.buckets[i], 0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// findBucket returns the index of the bucket for the provided value, or
|
||||
// len(h.upperBounds) for the +Inf bucket.
|
||||
func (h *histogram) findBucket(v float64) int {
|
||||
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
|
||||
// slightly faster than the binary search. If we really care, we could
|
||||
// switch from one search strategy to the other depending on the number
|
||||
|
@ -247,38 +363,43 @@ func (h *histogram) Observe(v float64) {
|
|||
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
|
||||
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
|
||||
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
||||
i := sort.SearchFloat64s(h.upperBounds, v)
|
||||
if i < len(h.counts) {
|
||||
atomic.AddUint64(&h.counts[i], 1)
|
||||
return sort.SearchFloat64s(h.upperBounds, v)
|
||||
}
|
||||
|
||||
// observe is the implementation for Observe without the findBucket part.
|
||||
func (h *histogram) observe(v float64, bucket int) {
|
||||
// We increment h.countAndHotIdx so that the counter in the lower
|
||||
// 63 bits gets incremented. At the same time, we get the new value
|
||||
// back, which we can use to find the currently-hot counts.
|
||||
n := atomic.AddUint64(&h.countAndHotIdx, 1)
|
||||
hotCounts := h.counts[n>>63]
|
||||
|
||||
if bucket < len(h.upperBounds) {
|
||||
atomic.AddUint64(&hotCounts.buckets[bucket], 1)
|
||||
}
|
||||
atomic.AddUint64(&h.count, 1)
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&h.sumBits)
|
||||
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||
if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
|
||||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Increment count last as we take it as a signal that the observation
|
||||
// is complete.
|
||||
atomic.AddUint64(&hotCounts.count, 1)
|
||||
}
|
||||
|
||||
func (h *histogram) Write(out *dto.Metric) error {
|
||||
his := &dto.Histogram{}
|
||||
buckets := make([]*dto.Bucket, len(h.upperBounds))
|
||||
|
||||
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
|
||||
his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
|
||||
var count uint64
|
||||
for i, upperBound := range h.upperBounds {
|
||||
count += atomic.LoadUint64(&h.counts[i])
|
||||
buckets[i] = &dto.Bucket{
|
||||
CumulativeCount: proto.Uint64(count),
|
||||
UpperBound: proto.Float64(upperBound),
|
||||
// updateExemplar replaces the exemplar for the provided bucket. With empty
|
||||
// labels, it's a no-op. It panics if any of the labels is invalid.
|
||||
func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
e, err := newExemplar(v, h.now(), l)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
his.Bucket = buckets
|
||||
out.Histogram = his
|
||||
out.Label = h.labelPairs
|
||||
return nil
|
||||
h.exemplars[bucket].Store(e)
|
||||
}
|
||||
|
||||
// HistogramVec is a Collector that bundles a set of Histograms that all share the
|
||||
|
@ -287,12 +408,11 @@ func (h *histogram) Write(out *dto.Metric) error {
|
|||
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
||||
// instances with NewHistogramVec.
|
||||
type HistogramVec struct {
|
||||
*MetricVec
|
||||
*metricVec
|
||||
}
|
||||
|
||||
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
|
||||
// partitioned by the given label names. At least one label name must be
|
||||
// provided.
|
||||
// partitioned by the given label names.
|
||||
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
|
||||
desc := NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
|
@ -301,47 +421,116 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
|
|||
opts.ConstLabels,
|
||||
)
|
||||
return &HistogramVec{
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
return newHistogram(desc, opts, lvs...)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetricWithLabelValues replaces the method of the same name in
|
||||
// MetricVec. The difference is that this method returns a Histogram and not a
|
||||
// Metric so that no type conversion is required.
|
||||
func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
|
||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
||||
// GetMetricWithLabelValues returns the Histogram for the given slice of label
|
||||
// values (same order as the VariableLabels in Desc). If that combination of
|
||||
// label values is accessed for the first time, a new Histogram is created.
|
||||
//
|
||||
// It is possible to call this method without using the returned Histogram to only
|
||||
// create the new Histogram but leave it at its starting value, a Histogram without
|
||||
// any observations.
|
||||
//
|
||||
// Keeping the Histogram for later use is possible (and should be considered if
|
||||
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
||||
// Delete can be used to delete the Histogram from the HistogramVec. In that case, the
|
||||
// Histogram will still exist, but it will not be exported anymore, even if a
|
||||
// Histogram with the same label values is created later. See also the CounterVec
|
||||
// example.
|
||||
//
|
||||
// An error is returned if the number of label values is not the same as the
|
||||
// number of VariableLabels in Desc (minus any curried labels).
|
||||
//
|
||||
// Note that for more than one label value, this method is prone to mistakes
|
||||
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
||||
// an alternative to avoid that type of mistake. For higher label numbers, the
|
||||
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||
// with a performance overhead (for creating and processing the Labels map).
|
||||
// See also the GaugeVec example.
|
||||
func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
|
||||
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
|
||||
if metric != nil {
|
||||
return metric.(Histogram), err
|
||||
return metric.(Observer), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
||||
// difference is that this method returns a Histogram and not a Metric so that no
|
||||
// type conversion is required.
|
||||
func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
|
||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
||||
// GetMetricWith returns the Histogram for the given Labels map (the label names
|
||||
// must match those of the VariableLabels in Desc). If that label map is
|
||||
// accessed for the first time, a new Histogram is created. Implications of
|
||||
// creating a Histogram without using it and keeping the Histogram for later use
|
||||
// are the same as for GetMetricWithLabelValues.
|
||||
//
|
||||
// An error is returned if the number and names of the Labels are inconsistent
|
||||
// with those of the VariableLabels in Desc (minus any curried labels).
|
||||
//
|
||||
// This method is used for the same purpose as
|
||||
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||
// methods.
|
||||
func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
|
||||
metric, err := v.metricVec.getMetricWith(labels)
|
||||
if metric != nil {
|
||||
return metric.(Histogram), err
|
||||
return metric.(Observer), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
||||
// error, WithLabelValues allows shortcuts like
|
||||
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||
// error allows shortcuts like
|
||||
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
||||
func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
|
||||
return m.MetricVec.WithLabelValues(lvs...).(Histogram)
|
||||
func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
|
||||
h, err := v.GetMetricWithLabelValues(lvs...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||
// returned an error. By not returning an error, With allows shortcuts like
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
||||
func (m *HistogramVec) With(labels Labels) Histogram {
|
||||
return m.MetricVec.With(labels).(Histogram)
|
||||
// With works as GetMetricWith but panics where GetMetricWithLabels would have
|
||||
// returned an error. Not returning an error allows shortcuts like
|
||||
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
||||
func (v *HistogramVec) With(labels Labels) Observer {
|
||||
h, err := v.GetMetricWith(labels)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// CurryWith returns a vector curried with the provided labels, i.e. the
|
||||
// returned vector has those labels pre-set for all labeled operations performed
|
||||
// on it. The cardinality of the curried vector is reduced accordingly. The
|
||||
// order of the remaining labels stays the same (just with the curried labels
|
||||
// taken out of the sequence – which is relevant for the
|
||||
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
|
||||
// vector, but only with labels not yet used for currying before.
|
||||
//
|
||||
// The metrics contained in the HistogramVec are shared between the curried and
|
||||
// uncurried vectors. They are just accessed differently. Curried and uncurried
|
||||
// vectors behave identically in terms of collection. Only one must be
|
||||
// registered with a given registry (usually the uncurried version). The Reset
|
||||
// method deletes all metrics, even if called on a curried vector.
|
||||
func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
|
||||
vec, err := v.curryWith(labels)
|
||||
if vec != nil {
|
||||
return &HistogramVec{vec}, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// MustCurryWith works as CurryWith but panics where CurryWith would have
|
||||
// returned an error.
|
||||
func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec {
|
||||
vec, err := v.CurryWith(labels)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return vec
|
||||
}
|
||||
|
||||
type constHistogram struct {
|
||||
|
@ -393,7 +582,7 @@ func (h *constHistogram) Write(out *dto.Metric) error {
|
|||
// bucket.
|
||||
//
|
||||
// NewConstHistogram returns an error if the length of labelValues is not
|
||||
// consistent with the variable labels in Desc.
|
||||
// consistent with the variable labels in Desc or if Desc is invalid.
|
||||
func NewConstHistogram(
|
||||
desc *Desc,
|
||||
count uint64,
|
||||
|
@ -401,8 +590,11 @@ func NewConstHistogram(
|
|||
buckets map[float64]uint64,
|
||||
labelValues ...string,
|
||||
) (Metric, error) {
|
||||
if len(desc.variableLabels) != len(labelValues) {
|
||||
return nil, errInconsistentCardinality
|
||||
if desc.err != nil {
|
||||
return nil, desc.err
|
||||
}
|
||||
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &constHistogram{
|
||||
desc: desc,
|
||||
|
|
|
@ -1,490 +0,0 @@
|
|||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
)
|
||||
|
||||
// TODO(beorn7): Remove this whole file. It is a partial mirror of
|
||||
// promhttp/http.go (to avoid circular import chains) where everything HTTP
|
||||
// related should live. The functions here are just for avoiding
|
||||
// breakage. Everything is deprecated.
|
||||
|
||||
const (
|
||||
contentTypeHeader = "Content-Type"
|
||||
contentLengthHeader = "Content-Length"
|
||||
contentEncodingHeader = "Content-Encoding"
|
||||
acceptEncodingHeader = "Accept-Encoding"
|
||||
)
|
||||
|
||||
var bufPool sync.Pool
|
||||
|
||||
func getBuf() *bytes.Buffer {
|
||||
buf := bufPool.Get()
|
||||
if buf == nil {
|
||||
return &bytes.Buffer{}
|
||||
}
|
||||
return buf.(*bytes.Buffer)
|
||||
}
|
||||
|
||||
func giveBuf(buf *bytes.Buffer) {
|
||||
buf.Reset()
|
||||
bufPool.Put(buf)
|
||||
}
|
||||
|
||||
// Handler returns an HTTP handler for the DefaultGatherer. It is
|
||||
// already instrumented with InstrumentHandler (using "prometheus" as handler
|
||||
// name).
|
||||
//
|
||||
// Deprecated: Please note the issues described in the doc comment of
|
||||
// InstrumentHandler. You might want to consider using promhttp.Handler instead
|
||||
// (which is non instrumented).
|
||||
func Handler() http.Handler {
|
||||
return InstrumentHandler("prometheus", UninstrumentedHandler())
|
||||
}
|
||||
|
||||
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
|
||||
//
|
||||
// Deprecated: Use promhttp.Handler instead. See there for further documentation.
|
||||
func UninstrumentedHandler() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
mfs, err := DefaultGatherer.Gather()
|
||||
if err != nil {
|
||||
http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
contentType := expfmt.Negotiate(req.Header)
|
||||
buf := getBuf()
|
||||
defer giveBuf(buf)
|
||||
writer, encoding := decorateWriter(req, buf)
|
||||
enc := expfmt.NewEncoder(writer, contentType)
|
||||
var lastErr error
|
||||
for _, mf := range mfs {
|
||||
if err := enc.Encode(mf); err != nil {
|
||||
lastErr = err
|
||||
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
if closer, ok := writer.(io.Closer); ok {
|
||||
closer.Close()
|
||||
}
|
||||
if lastErr != nil && buf.Len() == 0 {
|
||||
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
header := w.Header()
|
||||
header.Set(contentTypeHeader, string(contentType))
|
||||
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
|
||||
if encoding != "" {
|
||||
header.Set(contentEncodingHeader, encoding)
|
||||
}
|
||||
w.Write(buf.Bytes())
|
||||
})
|
||||
}
|
||||
|
||||
// decorateWriter wraps a writer to handle gzip compression if requested. It
|
||||
// returns the decorated writer and the appropriate "Content-Encoding" header
|
||||
// (which is empty if no compression is enabled).
|
||||
func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
|
||||
header := request.Header.Get(acceptEncodingHeader)
|
||||
parts := strings.Split(header, ",")
|
||||
for _, part := range parts {
|
||||
part := strings.TrimSpace(part)
|
||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
||||
return gzip.NewWriter(writer), "gzip"
|
||||
}
|
||||
}
|
||||
return writer, ""
|
||||
}
|
||||
|
||||
var instLabels = []string{"method", "code"}
|
||||
|
||||
type nower interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
type nowFunc func() time.Time
|
||||
|
||||
func (n nowFunc) Now() time.Time {
|
||||
return n()
|
||||
}
|
||||
|
||||
var now nower = nowFunc(func() time.Time {
|
||||
return time.Now()
|
||||
})
|
||||
|
||||
func nowSeries(t ...time.Time) nower {
|
||||
return nowFunc(func() time.Time {
|
||||
defer func() {
|
||||
t = t[1:]
|
||||
}()
|
||||
|
||||
return t[0]
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentHandler wraps the given HTTP handler for instrumentation. It
|
||||
// registers four metric collectors (if not already done) and reports HTTP
|
||||
// metrics to the (newly or already) registered collectors: http_requests_total
|
||||
// (CounterVec), http_request_duration_microseconds (Summary),
|
||||
// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
|
||||
// has a constant label named "handler" with the provided handlerName as
|
||||
// value. http_requests_total is a metric vector partitioned by HTTP method
|
||||
// (label name "method") and HTTP status code (label name "code").
|
||||
//
|
||||
// Deprecated: InstrumentHandler has several issues:
|
||||
//
|
||||
// - It uses Summaries rather than Histograms. Summaries are not useful if
|
||||
// aggregation across multiple instances is required.
|
||||
//
|
||||
// - It uses microseconds as unit, which is deprecated and should be replaced by
|
||||
// seconds.
|
||||
//
|
||||
// - The size of the request is calculated in a separate goroutine. Since this
|
||||
// calculator requires access to the request header, it creates a race with
|
||||
// any writes to the header performed during request handling.
|
||||
// httputil.ReverseProxy is a prominent example for a handler
|
||||
// performing such writes.
|
||||
//
|
||||
// Upcoming versions of this package will provide ways of instrumenting HTTP
|
||||
// handlers that are more flexible and have fewer issues. Please prefer direct
|
||||
// instrumentation in the meantime.
|
||||
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
|
||||
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
|
||||
}
|
||||
|
||||
// InstrumentHandlerFunc wraps the given function for instrumentation. It
|
||||
// otherwise works in the same way as InstrumentHandler (and shares the same
|
||||
// issues).
|
||||
//
|
||||
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
|
||||
// InstrumentHandler is.
|
||||
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
||||
return InstrumentHandlerFuncWithOpts(
|
||||
SummaryOpts{
|
||||
Subsystem: "http",
|
||||
ConstLabels: Labels{"handler": handlerName},
|
||||
},
|
||||
handlerFunc,
|
||||
)
|
||||
}
|
||||
|
||||
// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
|
||||
// issues) but provides more flexibility (at the cost of a more complex call
|
||||
// syntax). As InstrumentHandler, this function registers four metric
|
||||
// collectors, but it uses the provided SummaryOpts to create them. However, the
|
||||
// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
|
||||
// by "requests_total", "request_duration_microseconds", "request_size_bytes",
|
||||
// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
|
||||
// help string. The names of the variable labels of the http_requests_total
|
||||
// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
|
||||
//
|
||||
// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
|
||||
// behavior of InstrumentHandler:
|
||||
//
|
||||
// prometheus.InstrumentHandlerWithOpts(
|
||||
// prometheus.SummaryOpts{
|
||||
// Subsystem: "http",
|
||||
// ConstLabels: prometheus.Labels{"handler": handlerName},
|
||||
// },
|
||||
// handler,
|
||||
// )
|
||||
//
|
||||
// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
|
||||
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
|
||||
// and all its fields are set to the equally named fields in the provided
|
||||
// SummaryOpts.
|
||||
//
|
||||
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
|
||||
// InstrumentHandler is.
|
||||
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
|
||||
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
|
||||
}
|
||||
|
||||
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
|
||||
// the same issues) but provides more flexibility (at the cost of a more complex
|
||||
// call syntax). See InstrumentHandlerWithOpts for details how the provided
|
||||
// SummaryOpts are used.
|
||||
//
|
||||
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
|
||||
// as InstrumentHandler is.
|
||||
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
||||
reqCnt := NewCounterVec(
|
||||
CounterOpts{
|
||||
Namespace: opts.Namespace,
|
||||
Subsystem: opts.Subsystem,
|
||||
Name: "requests_total",
|
||||
Help: "Total number of HTTP requests made.",
|
||||
ConstLabels: opts.ConstLabels,
|
||||
},
|
||||
instLabels,
|
||||
)
|
||||
|
||||
opts.Name = "request_duration_microseconds"
|
||||
opts.Help = "The HTTP request latencies in microseconds."
|
||||
reqDur := NewSummary(opts)
|
||||
|
||||
opts.Name = "request_size_bytes"
|
||||
opts.Help = "The HTTP request sizes in bytes."
|
||||
reqSz := NewSummary(opts)
|
||||
|
||||
opts.Name = "response_size_bytes"
|
||||
opts.Help = "The HTTP response sizes in bytes."
|
||||
resSz := NewSummary(opts)
|
||||
|
||||
regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec)
|
||||
regReqDur := MustRegisterOrGet(reqDur).(Summary)
|
||||
regReqSz := MustRegisterOrGet(reqSz).(Summary)
|
||||
regResSz := MustRegisterOrGet(resSz).(Summary)
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
now := time.Now()
|
||||
|
||||
delegate := &responseWriterDelegator{ResponseWriter: w}
|
||||
out := make(chan int)
|
||||
urlLen := 0
|
||||
if r.URL != nil {
|
||||
urlLen = len(r.URL.String())
|
||||
}
|
||||
go computeApproximateRequestSize(r, out, urlLen)
|
||||
|
||||
_, cn := w.(http.CloseNotifier)
|
||||
_, fl := w.(http.Flusher)
|
||||
_, hj := w.(http.Hijacker)
|
||||
_, rf := w.(io.ReaderFrom)
|
||||
var rw http.ResponseWriter
|
||||
if cn && fl && hj && rf {
|
||||
rw = &fancyResponseWriterDelegator{delegate}
|
||||
} else {
|
||||
rw = delegate
|
||||
}
|
||||
handlerFunc(rw, r)
|
||||
|
||||
elapsed := float64(time.Since(now)) / float64(time.Microsecond)
|
||||
|
||||
method := sanitizeMethod(r.Method)
|
||||
code := sanitizeCode(delegate.status)
|
||||
regReqCnt.WithLabelValues(method, code).Inc()
|
||||
regReqDur.Observe(elapsed)
|
||||
regResSz.Observe(float64(delegate.written))
|
||||
regReqSz.Observe(float64(<-out))
|
||||
})
|
||||
}
|
||||
|
||||
func computeApproximateRequestSize(r *http.Request, out chan int, s int) {
|
||||
s += len(r.Method)
|
||||
s += len(r.Proto)
|
||||
for name, values := range r.Header {
|
||||
s += len(name)
|
||||
for _, value := range values {
|
||||
s += len(value)
|
||||
}
|
||||
}
|
||||
s += len(r.Host)
|
||||
|
||||
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
|
||||
|
||||
if r.ContentLength != -1 {
|
||||
s += int(r.ContentLength)
|
||||
}
|
||||
out <- s
|
||||
}
|
||||
|
||||
type responseWriterDelegator struct {
|
||||
http.ResponseWriter
|
||||
|
||||
handler, method string
|
||||
status int
|
||||
written int64
|
||||
wroteHeader bool
|
||||
}
|
||||
|
||||
func (r *responseWriterDelegator) WriteHeader(code int) {
|
||||
r.status = code
|
||||
r.wroteHeader = true
|
||||
r.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
|
||||
if !r.wroteHeader {
|
||||
r.WriteHeader(http.StatusOK)
|
||||
}
|
||||
n, err := r.ResponseWriter.Write(b)
|
||||
r.written += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
type fancyResponseWriterDelegator struct {
|
||||
*responseWriterDelegator
|
||||
}
|
||||
|
||||
func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
|
||||
return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
func (f *fancyResponseWriterDelegator) Flush() {
|
||||
f.ResponseWriter.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
return f.ResponseWriter.(http.Hijacker).Hijack()
|
||||
}
|
||||
|
||||
func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
|
||||
if !f.wroteHeader {
|
||||
f.WriteHeader(http.StatusOK)
|
||||
}
|
||||
n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
|
||||
f.written += n
|
||||
return n, err
|
||||
}
|
||||
|
||||
func sanitizeMethod(m string) string {
|
||||
switch m {
|
||||
case "GET", "get":
|
||||
return "get"
|
||||
case "PUT", "put":
|
||||
return "put"
|
||||
case "HEAD", "head":
|
||||
return "head"
|
||||
case "POST", "post":
|
||||
return "post"
|
||||
case "DELETE", "delete":
|
||||
return "delete"
|
||||
case "CONNECT", "connect":
|
||||
return "connect"
|
||||
case "OPTIONS", "options":
|
||||
return "options"
|
||||
case "NOTIFY", "notify":
|
||||
return "notify"
|
||||
default:
|
||||
return strings.ToLower(m)
|
||||
}
|
||||
}
|
||||
|
||||
func sanitizeCode(s int) string {
|
||||
switch s {
|
||||
case 100:
|
||||
return "100"
|
||||
case 101:
|
||||
return "101"
|
||||
|
||||
case 200:
|
||||
return "200"
|
||||
case 201:
|
||||
return "201"
|
||||
case 202:
|
||||
return "202"
|
||||
case 203:
|
||||
return "203"
|
||||
case 204:
|
||||
return "204"
|
||||
case 205:
|
||||
return "205"
|
||||
case 206:
|
||||
return "206"
|
||||
|
||||
case 300:
|
||||
return "300"
|
||||
case 301:
|
||||
return "301"
|
||||
case 302:
|
||||
return "302"
|
||||
case 304:
|
||||
return "304"
|
||||
case 305:
|
||||
return "305"
|
||||
case 307:
|
||||
return "307"
|
||||
|
||||
case 400:
|
||||
return "400"
|
||||
case 401:
|
||||
return "401"
|
||||
case 402:
|
||||
return "402"
|
||||
case 403:
|
||||
return "403"
|
||||
case 404:
|
||||
return "404"
|
||||
case 405:
|
||||
return "405"
|
||||
case 406:
|
||||
return "406"
|
||||
case 407:
|
||||
return "407"
|
||||
case 408:
|
||||
return "408"
|
||||
case 409:
|
||||
return "409"
|
||||
case 410:
|
||||
return "410"
|
||||
case 411:
|
||||
return "411"
|
||||
case 412:
|
||||
return "412"
|
||||
case 413:
|
||||
return "413"
|
||||
case 414:
|
||||
return "414"
|
||||
case 415:
|
||||
return "415"
|
||||
case 416:
|
||||
return "416"
|
||||
case 417:
|
||||
return "417"
|
||||
case 418:
|
||||
return "418"
|
||||
|
||||
case 500:
|
||||
return "500"
|
||||
case 501:
|
||||
return "501"
|
||||
case 502:
|
||||
return "502"
|
||||
case 503:
|
||||
return "503"
|
||||
case 504:
|
||||
return "504"
|
||||
case 505:
|
||||
return "505"
|
||||
|
||||
case 428:
|
||||
return "428"
|
||||
case 429:
|
||||
return "429"
|
||||
case 431:
|
||||
return "431"
|
||||
case 511:
|
||||
return "511"
|
||||
|
||||
default:
|
||||
return strconv.Itoa(s)
|
||||
}
|
||||
}
|
85
vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
generated
vendored
Normal file
85
vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// metricSorter is a sortable slice of *dto.Metric.
|
||||
type metricSorter []*dto.Metric
|
||||
|
||||
func (s metricSorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s metricSorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s metricSorter) Less(i, j int) bool {
|
||||
if len(s[i].Label) != len(s[j].Label) {
|
||||
// This should not happen. The metrics are
|
||||
// inconsistent. However, we have to deal with the fact, as
|
||||
// people might use custom collectors or metric family injection
|
||||
// to create inconsistent metrics. So let's simply compare the
|
||||
// number of labels in this case. That will still yield
|
||||
// reproducible sorting.
|
||||
return len(s[i].Label) < len(s[j].Label)
|
||||
}
|
||||
for n, lp := range s[i].Label {
|
||||
vi := lp.GetValue()
|
||||
vj := s[j].Label[n].GetValue()
|
||||
if vi != vj {
|
||||
return vi < vj
|
||||
}
|
||||
}
|
||||
|
||||
// We should never arrive here. Multiple metrics with the same
|
||||
// label set in the same scrape will lead to undefined ingestion
|
||||
// behavior. However, as above, we have to provide stable sorting
|
||||
// here, even for inconsistent metrics. So sort equal metrics
|
||||
// by their timestamp, with missing timestamps (implying "now")
|
||||
// coming last.
|
||||
if s[i].TimestampMs == nil {
|
||||
return false
|
||||
}
|
||||
if s[j].TimestampMs == nil {
|
||||
return true
|
||||
}
|
||||
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
|
||||
}
|
||||
|
||||
// NormalizeMetricFamilies returns a MetricFamily slice with empty
|
||||
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
|
||||
// the slice, with the contained Metrics sorted within each MetricFamily.
|
||||
func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
|
||||
for _, mf := range metricFamiliesByName {
|
||||
sort.Sort(metricSorter(mf.Metric))
|
||||
}
|
||||
names := make([]string, 0, len(metricFamiliesByName))
|
||||
for name, mf := range metricFamiliesByName {
|
||||
if len(mf.Metric) > 0 {
|
||||
names = append(names, name)
|
||||
}
|
||||
}
|
||||
sort.Strings(names)
|
||||
result := make([]*dto.MetricFamily, 0, len(names))
|
||||
for _, name := range names {
|
||||
result = append(result, metricFamiliesByName[name])
|
||||
}
|
||||
return result
|
||||
}
|
|
@ -0,0 +1,87 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
// Labels represents a collection of label name -> value mappings. This type is
|
||||
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
|
||||
// metric vector Collectors, e.g.:
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
//
|
||||
// The other use-case is the specification of constant label pairs in Opts or to
|
||||
// create a Desc.
|
||||
type Labels map[string]string
|
||||
|
||||
// reservedLabelPrefix is a prefix which is not legal in user-supplied
|
||||
// label names.
|
||||
const reservedLabelPrefix = "__"
|
||||
|
||||
var errInconsistentCardinality = errors.New("inconsistent label cardinality")
|
||||
|
||||
func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
|
||||
return fmt.Errorf(
|
||||
"%s: %q has %d variable labels named %q but %d values %q were provided",
|
||||
errInconsistentCardinality, fqName,
|
||||
len(labels), labels,
|
||||
len(labelValues), labelValues,
|
||||
)
|
||||
}
|
||||
|
||||
func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
|
||||
if len(labels) != expectedNumberOfValues {
|
||||
return fmt.Errorf(
|
||||
"%s: expected %d label values but got %d in %#v",
|
||||
errInconsistentCardinality, expectedNumberOfValues,
|
||||
len(labels), labels,
|
||||
)
|
||||
}
|
||||
|
||||
for name, val := range labels {
|
||||
if !utf8.ValidString(val) {
|
||||
return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateLabelValues(vals []string, expectedNumberOfValues int) error {
|
||||
if len(vals) != expectedNumberOfValues {
|
||||
return fmt.Errorf(
|
||||
"%s: expected %d label values but got %d in %#v",
|
||||
errInconsistentCardinality, expectedNumberOfValues,
|
||||
len(vals), vals,
|
||||
)
|
||||
}
|
||||
|
||||
for _, val := range vals {
|
||||
if !utf8.ValidString(val) {
|
||||
return fmt.Errorf("label value %q is not valid UTF-8", val)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkLabelName(l string) bool {
|
||||
return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
|
||||
}
|
|
@ -15,11 +15,15 @@ package prometheus
|
|||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
const separatorByte byte = 255
|
||||
var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
|
||||
|
||||
// A Metric models a single sample value with its meta data being exported to
|
||||
// Prometheus. Implementations of Metric in this package are Gauge, Counter,
|
||||
|
@ -43,9 +47,8 @@ type Metric interface {
|
|||
// While populating dto.Metric, it is the responsibility of the
|
||||
// implementation to ensure validity of the Metric protobuf (like valid
|
||||
// UTF-8 strings or syntactically valid metric and label names). It is
|
||||
// recommended to sort labels lexicographically. (Implementers may find
|
||||
// LabelPairSorter useful for that.) Callers of Write should still make
|
||||
// sure of sorting if they depend on it.
|
||||
// recommended to sort labels lexicographically. Callers of Write should
|
||||
// still make sure of sorting if they depend on it.
|
||||
Write(*dto.Metric) error
|
||||
// TODO(beorn7): The original rationale of passing in a pre-allocated
|
||||
// dto.Metric protobuf to save allocations has disappeared. The
|
||||
|
@ -57,8 +60,9 @@ type Metric interface {
|
|||
// implementation XXX has its own XXXOpts type, but in most cases, it is just be
|
||||
// an alias of this type (which might change when the requirement arises.)
|
||||
//
|
||||
// It is mandatory to set Name and Help to a non-empty string. All other fields
|
||||
// are optional and can safely be left at their zero value.
|
||||
// It is mandatory to set Name to a non-empty string. All other fields are
|
||||
// optional and can safely be left at their zero value, although it is strongly
|
||||
// encouraged to set a Help string.
|
||||
type Opts struct {
|
||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||
// name of the Metric (created by joining these components with
|
||||
|
@ -69,7 +73,7 @@ type Opts struct {
|
|||
Subsystem string
|
||||
Name string
|
||||
|
||||
// Help provides information about this metric. Mandatory!
|
||||
// Help provides information about this metric.
|
||||
//
|
||||
// Metrics with the same fully-qualified name must have the same Help
|
||||
// string.
|
||||
|
@ -79,20 +83,12 @@ type Opts struct {
|
|||
// with the same fully-qualified name must have the same label names in
|
||||
// their ConstLabels.
|
||||
//
|
||||
// Note that in most cases, labels have a value that varies during the
|
||||
// lifetime of a process. Those labels are usually managed with a metric
|
||||
// vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
|
||||
// serve only special purposes. One is for the special case where the
|
||||
// value of a label does not change during the lifetime of a process,
|
||||
// e.g. if the revision of the running binary is put into a
|
||||
// label. Another, more advanced purpose is if more than one Collector
|
||||
// needs to collect Metrics with the same fully-qualified name. In that
|
||||
// case, those Metrics must differ in the values of their
|
||||
// ConstLabels. See the Collector examples.
|
||||
//
|
||||
// If the value of a label never changes (not even between binaries),
|
||||
// that label most likely should not be a label at all (but part of the
|
||||
// metric name).
|
||||
// ConstLabels are only used rarely. In particular, do not use them to
|
||||
// attach the same labels to all your metrics. Those use cases are
|
||||
// better covered by target labels set by the scraping Prometheus
|
||||
// server, or by one specific metric (e.g. a build_info or a
|
||||
// machine_role metric). See also
|
||||
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
|
||||
ConstLabels Labels
|
||||
}
|
||||
|
||||
|
@ -118,37 +114,22 @@ func BuildFQName(namespace, subsystem, name string) string {
|
|||
return name
|
||||
}
|
||||
|
||||
// LabelPairSorter implements sort.Interface. It is used to sort a slice of
|
||||
// dto.LabelPair pointers. This is useful for implementing the Write method of
|
||||
// custom metrics.
|
||||
type LabelPairSorter []*dto.LabelPair
|
||||
// labelPairSorter implements sort.Interface. It is used to sort a slice of
|
||||
// dto.LabelPair pointers.
|
||||
type labelPairSorter []*dto.LabelPair
|
||||
|
||||
func (s LabelPairSorter) Len() int {
|
||||
func (s labelPairSorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s LabelPairSorter) Swap(i, j int) {
|
||||
func (s labelPairSorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s LabelPairSorter) Less(i, j int) bool {
|
||||
func (s labelPairSorter) Less(i, j int) bool {
|
||||
return s[i].GetName() < s[j].GetName()
|
||||
}
|
||||
|
||||
type hashSorter []uint64
|
||||
|
||||
func (s hashSorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s hashSorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s hashSorter) Less(i, j int) bool {
|
||||
return s[i] < s[j]
|
||||
}
|
||||
|
||||
type invalidMetric struct {
|
||||
desc *Desc
|
||||
err error
|
||||
|
@ -164,3 +145,31 @@ func NewInvalidMetric(desc *Desc, err error) Metric {
|
|||
func (m *invalidMetric) Desc() *Desc { return m.desc }
|
||||
|
||||
func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
|
||||
|
||||
type timestampedMetric struct {
|
||||
Metric
|
||||
t time.Time
|
||||
}
|
||||
|
||||
func (m timestampedMetric) Write(pb *dto.Metric) error {
|
||||
e := m.Metric.Write(pb)
|
||||
pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
|
||||
return e
|
||||
}
|
||||
|
||||
// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
|
||||
// way that it has an explicit timestamp set to the provided Time. This is only
|
||||
// useful in rare cases as the timestamp of a Prometheus metric should usually
|
||||
// be set by the Prometheus server during scraping. Exceptions include mirroring
|
||||
// metrics with given timestamps from other metric
|
||||
// sources.
|
||||
//
|
||||
// NewMetricWithTimestamp works best with MustNewConstMetric,
|
||||
// MustNewConstHistogram, and MustNewConstSummary, see example.
|
||||
//
|
||||
// Currently, the exposition formats used by Prometheus are limited to
|
||||
// millisecond resolution. Thus, the provided time will be rounded down to the
|
||||
// next full millisecond value.
|
||||
func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
|
||||
return timestampedMetric{Metric: m, t: t}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
// Observer is the interface that wraps the Observe method, which is used by
|
||||
// Histogram and Summary to add observations.
|
||||
type Observer interface {
|
||||
Observe(float64)
|
||||
}
|
||||
|
||||
// The ObserverFunc type is an adapter to allow the use of ordinary
|
||||
// functions as Observers. If f is a function with the appropriate
|
||||
// signature, ObserverFunc(f) is an Observer that calls f.
|
||||
//
|
||||
// This adapter is usually used in connection with the Timer type, and there are
|
||||
// two general use cases:
|
||||
//
|
||||
// The most common one is to use a Gauge as the Observer for a Timer.
|
||||
// See the "Gauge" Timer example.
|
||||
//
|
||||
// The more advanced use case is to create a function that dynamically decides
|
||||
// which Observer to use for observing the duration. See the "Complex" Timer
|
||||
// example.
|
||||
type ObserverFunc func(float64)
|
||||
|
||||
// Observe calls f(value). It implements Observer.
|
||||
func (f ObserverFunc) Observe(value float64) {
|
||||
f(value)
|
||||
}
|
||||
|
||||
// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
|
||||
type ObserverVec interface {
|
||||
GetMetricWith(Labels) (Observer, error)
|
||||
GetMetricWithLabelValues(lvs ...string) (Observer, error)
|
||||
With(Labels) Observer
|
||||
WithLabelValues(...string) Observer
|
||||
CurryWith(Labels) (ObserverVec, error)
|
||||
MustCurryWith(Labels) ObserverVec
|
||||
|
||||
Collector
|
||||
}
|
||||
|
||||
// ExemplarObserver is implemented by Observers that offer the option of
|
||||
// observing a value together with an exemplar. Its ObserveWithExemplar method
|
||||
// works like the Observe method of an Observer but also replaces the currently
|
||||
// saved exemplar (if any) with a new one, created from the provided value, the
|
||||
// current time as timestamp, and the provided Labels. Empty Labels will lead to
|
||||
// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
|
||||
// left in place. ObserveWithExemplar panics if any of the provided labels are
|
||||
// invalid or if the provided labels contain more than 64 runes in total.
|
||||
type ExemplarObserver interface {
|
||||
ObserveWithExemplar(value float64, exemplar Labels)
|
||||
}
|
|
@ -13,89 +13,126 @@
|
|||
|
||||
package prometheus
|
||||
|
||||
import "github.com/prometheus/procfs"
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
)
|
||||
|
||||
type processCollector struct {
|
||||
pid int
|
||||
collectFn func(chan<- Metric)
|
||||
pidFn func() (int, error)
|
||||
cpuTotal Counter
|
||||
openFDs, maxFDs Gauge
|
||||
vsize, rss Gauge
|
||||
startTime Gauge
|
||||
reportErrors bool
|
||||
cpuTotal *Desc
|
||||
openFDs, maxFDs *Desc
|
||||
vsize, maxVsize *Desc
|
||||
rss *Desc
|
||||
startTime *Desc
|
||||
}
|
||||
|
||||
// ProcessCollectorOpts defines the behavior of a process metrics collector
|
||||
// created with NewProcessCollector.
|
||||
type ProcessCollectorOpts struct {
|
||||
// PidFn returns the PID of the process the collector collects metrics
|
||||
// for. It is called upon each collection. By default, the PID of the
|
||||
// current process is used, as determined on construction time by
|
||||
// calling os.Getpid().
|
||||
PidFn func() (int, error)
|
||||
// If non-empty, each of the collected metrics is prefixed by the
|
||||
// provided string and an underscore ("_").
|
||||
Namespace string
|
||||
// If true, any error encountered during collection is reported as an
|
||||
// invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
|
||||
// and the collected metrics will be incomplete. (Possibly, no metrics
|
||||
// will be collected at all.) While that's usually not desired, it is
|
||||
// appropriate for the common "mix-in" of process metrics, where process
|
||||
// metrics are nice to have, but failing to collect them should not
|
||||
// disrupt the collection of the remaining metrics.
|
||||
ReportErrors bool
|
||||
}
|
||||
|
||||
// NewProcessCollector returns a collector which exports the current state of
|
||||
// process metrics including cpu, memory and file descriptor usage as well as
|
||||
// the process start time for the given process id under the given namespace.
|
||||
func NewProcessCollector(pid int, namespace string) Collector {
|
||||
return NewProcessCollectorPIDFn(
|
||||
func() (int, error) { return pid, nil },
|
||||
namespace,
|
||||
)
|
||||
// process metrics including CPU, memory and file descriptor usage as well as
|
||||
// the process start time. The detailed behavior is defined by the provided
|
||||
// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a
|
||||
// collector for the current process with an empty namespace string and no error
|
||||
// reporting.
|
||||
//
|
||||
// The collector only works on operating systems with a Linux-style proc
|
||||
// filesystem and on Microsoft Windows. On other operating systems, it will not
|
||||
// collect any metrics.
|
||||
func NewProcessCollector(opts ProcessCollectorOpts) Collector {
|
||||
ns := ""
|
||||
if len(opts.Namespace) > 0 {
|
||||
ns = opts.Namespace + "_"
|
||||
}
|
||||
|
||||
// NewProcessCollectorPIDFn returns a collector which exports the current state
|
||||
// of process metrics including cpu, memory and file descriptor usage as well
|
||||
// as the process start time under the given namespace. The given pidFn is
|
||||
// called on each collect and is used to determine the process to export
|
||||
// metrics for.
|
||||
func NewProcessCollectorPIDFn(
|
||||
pidFn func() (int, error),
|
||||
namespace string,
|
||||
) Collector {
|
||||
c := processCollector{
|
||||
pidFn: pidFn,
|
||||
collectFn: func(chan<- Metric) {},
|
||||
c := &processCollector{
|
||||
reportErrors: opts.ReportErrors,
|
||||
cpuTotal: NewDesc(
|
||||
ns+"process_cpu_seconds_total",
|
||||
"Total user and system CPU time spent in seconds.",
|
||||
nil, nil,
|
||||
),
|
||||
openFDs: NewDesc(
|
||||
ns+"process_open_fds",
|
||||
"Number of open file descriptors.",
|
||||
nil, nil,
|
||||
),
|
||||
maxFDs: NewDesc(
|
||||
ns+"process_max_fds",
|
||||
"Maximum number of open file descriptors.",
|
||||
nil, nil,
|
||||
),
|
||||
vsize: NewDesc(
|
||||
ns+"process_virtual_memory_bytes",
|
||||
"Virtual memory size in bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
maxVsize: NewDesc(
|
||||
ns+"process_virtual_memory_max_bytes",
|
||||
"Maximum amount of virtual memory available in bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
rss: NewDesc(
|
||||
ns+"process_resident_memory_bytes",
|
||||
"Resident memory size in bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
startTime: NewDesc(
|
||||
ns+"process_start_time_seconds",
|
||||
"Start time of the process since unix epoch in seconds.",
|
||||
nil, nil,
|
||||
),
|
||||
}
|
||||
|
||||
cpuTotal: NewCounter(CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "process_cpu_seconds_total",
|
||||
Help: "Total user and system CPU time spent in seconds.",
|
||||
}),
|
||||
openFDs: NewGauge(GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "process_open_fds",
|
||||
Help: "Number of open file descriptors.",
|
||||
}),
|
||||
maxFDs: NewGauge(GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "process_max_fds",
|
||||
Help: "Maximum number of open file descriptors.",
|
||||
}),
|
||||
vsize: NewGauge(GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "process_virtual_memory_bytes",
|
||||
Help: "Virtual memory size in bytes.",
|
||||
}),
|
||||
rss: NewGauge(GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "process_resident_memory_bytes",
|
||||
Help: "Resident memory size in bytes.",
|
||||
}),
|
||||
startTime: NewGauge(GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "process_start_time_seconds",
|
||||
Help: "Start time of the process since unix epoch in seconds.",
|
||||
}),
|
||||
if opts.PidFn == nil {
|
||||
pid := os.Getpid()
|
||||
c.pidFn = func() (int, error) { return pid, nil }
|
||||
} else {
|
||||
c.pidFn = opts.PidFn
|
||||
}
|
||||
|
||||
// Set up process metric collection if supported by the runtime.
|
||||
if _, err := procfs.NewStat(); err == nil {
|
||||
if canCollectProcess() {
|
||||
c.collectFn = c.processCollect
|
||||
} else {
|
||||
c.collectFn = func(ch chan<- Metric) {
|
||||
c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
|
||||
}
|
||||
}
|
||||
|
||||
return &c
|
||||
return c
|
||||
}
|
||||
|
||||
// Describe returns all descriptions of the collector.
|
||||
func (c *processCollector) Describe(ch chan<- *Desc) {
|
||||
ch <- c.cpuTotal.Desc()
|
||||
ch <- c.openFDs.Desc()
|
||||
ch <- c.maxFDs.Desc()
|
||||
ch <- c.vsize.Desc()
|
||||
ch <- c.rss.Desc()
|
||||
ch <- c.startTime.Desc()
|
||||
ch <- c.cpuTotal
|
||||
ch <- c.openFDs
|
||||
ch <- c.maxFDs
|
||||
ch <- c.vsize
|
||||
ch <- c.maxVsize
|
||||
ch <- c.rss
|
||||
ch <- c.startTime
|
||||
}
|
||||
|
||||
// Collect returns the current state of all metrics of the collector.
|
||||
|
@ -103,40 +140,12 @@ func (c *processCollector) Collect(ch chan<- Metric) {
|
|||
c.collectFn(ch)
|
||||
}
|
||||
|
||||
// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
|
||||
// client allows users to configure the error behavior.
|
||||
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
pid, err := c.pidFn()
|
||||
if err != nil {
|
||||
func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
|
||||
if !c.reportErrors {
|
||||
return
|
||||
}
|
||||
|
||||
p, err := procfs.NewProc(pid)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if stat, err := p.NewStat(); err == nil {
|
||||
c.cpuTotal.Set(stat.CPUTime())
|
||||
ch <- c.cpuTotal
|
||||
c.vsize.Set(float64(stat.VirtualMemory()))
|
||||
ch <- c.vsize
|
||||
c.rss.Set(float64(stat.ResidentMemory()))
|
||||
ch <- c.rss
|
||||
|
||||
if startTime, err := stat.StartTime(); err == nil {
|
||||
c.startTime.Set(startTime)
|
||||
ch <- c.startTime
|
||||
}
|
||||
}
|
||||
|
||||
if fds, err := p.FileDescriptorsLen(); err == nil {
|
||||
c.openFDs.Set(float64(fds))
|
||||
ch <- c.openFDs
|
||||
}
|
||||
|
||||
if limits, err := p.NewLimits(); err == nil {
|
||||
c.maxFDs.Set(float64(limits.OpenFiles))
|
||||
ch <- c.maxFDs
|
||||
if desc == nil {
|
||||
desc = NewInvalidDesc(err)
|
||||
}
|
||||
ch <- NewInvalidMetric(desc, err)
|
||||
}
|
||||
|
|
65
vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
generated
vendored
Normal file
65
vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"github.com/prometheus/procfs"
|
||||
)
|
||||
|
||||
func canCollectProcess() bool {
|
||||
_, err := procfs.NewDefaultFS()
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
pid, err := c.pidFn()
|
||||
if err != nil {
|
||||
c.reportError(ch, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
p, err := procfs.NewProc(pid)
|
||||
if err != nil {
|
||||
c.reportError(ch, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
if stat, err := p.Stat(); err == nil {
|
||||
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
|
||||
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
|
||||
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
|
||||
if startTime, err := stat.StartTime(); err == nil {
|
||||
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
|
||||
} else {
|
||||
c.reportError(ch, c.startTime, err)
|
||||
}
|
||||
} else {
|
||||
c.reportError(ch, nil, err)
|
||||
}
|
||||
|
||||
if fds, err := p.FileDescriptorsLen(); err == nil {
|
||||
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
|
||||
} else {
|
||||
c.reportError(ch, c.openFDs, err)
|
||||
}
|
||||
|
||||
if limits, err := p.Limits(); err == nil {
|
||||
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
|
||||
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
|
||||
} else {
|
||||
c.reportError(ch, nil, err)
|
||||
}
|
||||
}
|
116
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
generated
vendored
Normal file
116
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,116 @@
|
|||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
func canCollectProcess() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
var (
|
||||
modpsapi = syscall.NewLazyDLL("psapi.dll")
|
||||
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
|
||||
procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo")
|
||||
procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount")
|
||||
)
|
||||
|
||||
type processMemoryCounters struct {
|
||||
// System interface description
|
||||
// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex
|
||||
|
||||
// Refer to the Golang internal implementation
|
||||
// https://golang.org/src/internal/syscall/windows/psapi_windows.go
|
||||
_ uint32
|
||||
PageFaultCount uint32
|
||||
PeakWorkingSetSize uintptr
|
||||
WorkingSetSize uintptr
|
||||
QuotaPeakPagedPoolUsage uintptr
|
||||
QuotaPagedPoolUsage uintptr
|
||||
QuotaPeakNonPagedPoolUsage uintptr
|
||||
QuotaNonPagedPoolUsage uintptr
|
||||
PagefileUsage uintptr
|
||||
PeakPagefileUsage uintptr
|
||||
PrivateUsage uintptr
|
||||
}
|
||||
|
||||
func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
|
||||
mem := processMemoryCounters{}
|
||||
r1, _, err := procGetProcessMemoryInfo.Call(
|
||||
uintptr(handle),
|
||||
uintptr(unsafe.Pointer(&mem)),
|
||||
uintptr(unsafe.Sizeof(mem)),
|
||||
)
|
||||
if r1 != 1 {
|
||||
return mem, err
|
||||
} else {
|
||||
return mem, nil
|
||||
}
|
||||
}
|
||||
|
||||
func getProcessHandleCount(handle windows.Handle) (uint32, error) {
|
||||
var count uint32
|
||||
r1, _, err := procGetProcessHandleCount.Call(
|
||||
uintptr(handle),
|
||||
uintptr(unsafe.Pointer(&count)),
|
||||
)
|
||||
if r1 != 1 {
|
||||
return 0, err
|
||||
} else {
|
||||
return count, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
h, err := windows.GetCurrentProcess()
|
||||
if err != nil {
|
||||
c.reportError(ch, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
var startTime, exitTime, kernelTime, userTime windows.Filetime
|
||||
err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
|
||||
if err != nil {
|
||||
c.reportError(ch, nil, err)
|
||||
return
|
||||
}
|
||||
ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9))
|
||||
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime))
|
||||
|
||||
mem, err := getProcessMemoryInfo(h)
|
||||
if err != nil {
|
||||
c.reportError(ch, nil, err)
|
||||
return
|
||||
}
|
||||
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage))
|
||||
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize))
|
||||
|
||||
handles, err := getProcessHandleCount(h)
|
||||
if err != nil {
|
||||
c.reportError(ch, nil, err)
|
||||
return
|
||||
}
|
||||
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles))
|
||||
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
|
||||
}
|
||||
|
||||
func fileTimeToSeconds(ft windows.Filetime) float64 {
|
||||
return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
|
||||
}
|
370
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
generated
vendored
Normal file
370
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
generated
vendored
Normal file
|
@ -0,0 +1,370 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package promhttp
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
const (
|
||||
closeNotifier = 1 << iota
|
||||
flusher
|
||||
hijacker
|
||||
readerFrom
|
||||
pusher
|
||||
)
|
||||
|
||||
type delegator interface {
|
||||
http.ResponseWriter
|
||||
|
||||
Status() int
|
||||
Written() int64
|
||||
}
|
||||
|
||||
type responseWriterDelegator struct {
|
||||
http.ResponseWriter
|
||||
|
||||
status int
|
||||
written int64
|
||||
wroteHeader bool
|
||||
observeWriteHeader func(int)
|
||||
}
|
||||
|
||||
func (r *responseWriterDelegator) Status() int {
|
||||
return r.status
|
||||
}
|
||||
|
||||
func (r *responseWriterDelegator) Written() int64 {
|
||||
return r.written
|
||||
}
|
||||
|
||||
func (r *responseWriterDelegator) WriteHeader(code int) {
|
||||
if r.observeWriteHeader != nil && !r.wroteHeader {
|
||||
// Only call observeWriteHeader for the 1st time. It's a bug if
|
||||
// WriteHeader is called more than once, but we want to protect
|
||||
// against it here. Note that we still delegate the WriteHeader
|
||||
// to the original ResponseWriter to not mask the bug from it.
|
||||
r.observeWriteHeader(code)
|
||||
}
|
||||
r.status = code
|
||||
r.wroteHeader = true
|
||||
r.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
|
||||
// If applicable, call WriteHeader here so that observeWriteHeader is
|
||||
// handled appropriately.
|
||||
if !r.wroteHeader {
|
||||
r.WriteHeader(http.StatusOK)
|
||||
}
|
||||
n, err := r.ResponseWriter.Write(b)
|
||||
r.written += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
type closeNotifierDelegator struct{ *responseWriterDelegator }
|
||||
type flusherDelegator struct{ *responseWriterDelegator }
|
||||
type hijackerDelegator struct{ *responseWriterDelegator }
|
||||
type readerFromDelegator struct{ *responseWriterDelegator }
|
||||
type pusherDelegator struct{ *responseWriterDelegator }
|
||||
|
||||
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
|
||||
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
|
||||
//remove support from client_golang yet.
|
||||
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
func (d flusherDelegator) Flush() {
|
||||
// If applicable, call WriteHeader here so that observeWriteHeader is
|
||||
// handled appropriately.
|
||||
if !d.wroteHeader {
|
||||
d.WriteHeader(http.StatusOK)
|
||||
}
|
||||
d.ResponseWriter.(http.Flusher).Flush()
|
||||
}
|
||||
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
return d.ResponseWriter.(http.Hijacker).Hijack()
|
||||
}
|
||||
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
|
||||
// If applicable, call WriteHeader here so that observeWriteHeader is
|
||||
// handled appropriately.
|
||||
if !d.wroteHeader {
|
||||
d.WriteHeader(http.StatusOK)
|
||||
}
|
||||
n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
|
||||
d.written += n
|
||||
return n, err
|
||||
}
|
||||
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
|
||||
return d.ResponseWriter.(http.Pusher).Push(target, opts)
|
||||
}
|
||||
|
||||
var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
|
||||
|
||||
func init() {
|
||||
// TODO(beorn7): Code generation would help here.
|
||||
pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
|
||||
return d
|
||||
}
|
||||
pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
|
||||
return closeNotifierDelegator{d}
|
||||
}
|
||||
pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
|
||||
return flusherDelegator{d}
|
||||
}
|
||||
pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
|
||||
return hijackerDelegator{d}
|
||||
}
|
||||
pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Hijacker
|
||||
http.CloseNotifier
|
||||
}{d, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
}{d, hijackerDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
|
||||
return readerFromDelegator{d}
|
||||
}
|
||||
pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
io.ReaderFrom
|
||||
http.CloseNotifier
|
||||
}{d, readerFromDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
io.ReaderFrom
|
||||
http.Flusher
|
||||
}{d, readerFromDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
io.ReaderFrom
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
}{d, readerFromDelegator{d}, hijackerDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
http.CloseNotifier
|
||||
}{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
|
||||
return pusherDelegator{d}
|
||||
}
|
||||
pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Flusher
|
||||
}{d, pusherDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Hijacker
|
||||
}{d, pusherDelegator{d}, hijackerDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Hijacker
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Flusher
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
}
|
||||
|
||||
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
|
||||
d := &responseWriterDelegator{
|
||||
ResponseWriter: w,
|
||||
observeWriteHeader: observeWriteHeaderFunc,
|
||||
}
|
||||
|
||||
id := 0
|
||||
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
|
||||
//remove support from client_golang yet.
|
||||
if _, ok := w.(http.CloseNotifier); ok {
|
||||
id += closeNotifier
|
||||
}
|
||||
if _, ok := w.(http.Flusher); ok {
|
||||
id += flusher
|
||||
}
|
||||
if _, ok := w.(http.Hijacker); ok {
|
||||
id += hijacker
|
||||
}
|
||||
if _, ok := w.(io.ReaderFrom); ok {
|
||||
id += readerFrom
|
||||
}
|
||||
if _, ok := w.(http.Pusher); ok {
|
||||
id += pusher
|
||||
}
|
||||
|
||||
return pickDelegator[id](d)
|
||||
}
|
379
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
Normal file
379
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
Normal file
|
@ -0,0 +1,379 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package promhttp provides tooling around HTTP servers and clients.
|
||||
//
|
||||
// First, the package allows the creation of http.Handler instances to expose
|
||||
// Prometheus metrics via HTTP. promhttp.Handler acts on the
|
||||
// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
|
||||
// custom registry or anything that implements the Gatherer interface. It also
|
||||
// allows the creation of handlers that act differently on errors or allow to
|
||||
// log errors.
|
||||
//
|
||||
// Second, the package provides tooling to instrument instances of http.Handler
|
||||
// via middleware. Middleware wrappers follow the naming scheme
|
||||
// InstrumentHandlerX, where X describes the intended use of the middleware.
|
||||
// See each function's doc comment for specific details.
|
||||
//
|
||||
// Finally, the package allows for an http.RoundTripper to be instrumented via
|
||||
// middleware. Middleware wrappers follow the naming scheme
|
||||
// InstrumentRoundTripperX, where X describes the intended use of the
|
||||
// middleware. See each function's doc comment for specific details.
|
||||
package promhttp
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
contentTypeHeader = "Content-Type"
|
||||
contentEncodingHeader = "Content-Encoding"
|
||||
acceptEncodingHeader = "Accept-Encoding"
|
||||
)
|
||||
|
||||
var gzipPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return gzip.NewWriter(nil)
|
||||
},
|
||||
}
|
||||
|
||||
// Handler returns an http.Handler for the prometheus.DefaultGatherer, using
|
||||
// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
|
||||
// no error logging, and it applies compression if requested by the client.
|
||||
//
|
||||
// The returned http.Handler is already instrumented using the
|
||||
// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
|
||||
// create multiple http.Handlers by separate calls of the Handler function, the
|
||||
// metrics used for instrumentation will be shared between them, providing
|
||||
// global scrape counts.
|
||||
//
|
||||
// This function is meant to cover the bulk of basic use cases. If you are doing
|
||||
// anything that requires more customization (including using a non-default
|
||||
// Gatherer, different instrumentation, and non-default HandlerOpts), use the
|
||||
// HandlerFor function. See there for details.
|
||||
func Handler() http.Handler {
|
||||
return InstrumentMetricHandler(
|
||||
prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
|
||||
)
|
||||
}
|
||||
|
||||
// HandlerFor returns an uninstrumented http.Handler for the provided
|
||||
// Gatherer. The behavior of the Handler is defined by the provided
|
||||
// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
|
||||
// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
|
||||
// instrumentation. Use the InstrumentMetricHandler function to apply the same
|
||||
// kind of instrumentation as it is used by the Handler function.
|
||||
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
||||
var (
|
||||
inFlightSem chan struct{}
|
||||
errCnt = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "promhttp_metric_handler_errors_total",
|
||||
Help: "Total number of internal errors encountered by the promhttp metric handler.",
|
||||
},
|
||||
[]string{"cause"},
|
||||
)
|
||||
)
|
||||
|
||||
if opts.MaxRequestsInFlight > 0 {
|
||||
inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
|
||||
}
|
||||
if opts.Registry != nil {
|
||||
// Initialize all possibilites that can occur below.
|
||||
errCnt.WithLabelValues("gathering")
|
||||
errCnt.WithLabelValues("encoding")
|
||||
if err := opts.Registry.Register(errCnt); err != nil {
|
||||
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
|
||||
errCnt = are.ExistingCollector.(*prometheus.CounterVec)
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
|
||||
if inFlightSem != nil {
|
||||
select {
|
||||
case inFlightSem <- struct{}{}: // All good, carry on.
|
||||
defer func() { <-inFlightSem }()
|
||||
default:
|
||||
http.Error(rsp, fmt.Sprintf(
|
||||
"Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
|
||||
), http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
}
|
||||
mfs, err := reg.Gather()
|
||||
if err != nil {
|
||||
if opts.ErrorLog != nil {
|
||||
opts.ErrorLog.Println("error gathering metrics:", err)
|
||||
}
|
||||
errCnt.WithLabelValues("gathering").Inc()
|
||||
switch opts.ErrorHandling {
|
||||
case PanicOnError:
|
||||
panic(err)
|
||||
case ContinueOnError:
|
||||
if len(mfs) == 0 {
|
||||
// Still report the error if no metrics have been gathered.
|
||||
httpError(rsp, err)
|
||||
return
|
||||
}
|
||||
case HTTPErrorOnError:
|
||||
httpError(rsp, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var contentType expfmt.Format
|
||||
if opts.EnableOpenMetrics {
|
||||
contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header)
|
||||
} else {
|
||||
contentType = expfmt.Negotiate(req.Header)
|
||||
}
|
||||
header := rsp.Header()
|
||||
header.Set(contentTypeHeader, string(contentType))
|
||||
|
||||
w := io.Writer(rsp)
|
||||
if !opts.DisableCompression && gzipAccepted(req.Header) {
|
||||
header.Set(contentEncodingHeader, "gzip")
|
||||
gz := gzipPool.Get().(*gzip.Writer)
|
||||
defer gzipPool.Put(gz)
|
||||
|
||||
gz.Reset(w)
|
||||
defer gz.Close()
|
||||
|
||||
w = gz
|
||||
}
|
||||
|
||||
enc := expfmt.NewEncoder(w, contentType)
|
||||
|
||||
// handleError handles the error according to opts.ErrorHandling
|
||||
// and returns true if we have to abort after the handling.
|
||||
handleError := func(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
if opts.ErrorLog != nil {
|
||||
opts.ErrorLog.Println("error encoding and sending metric family:", err)
|
||||
}
|
||||
errCnt.WithLabelValues("encoding").Inc()
|
||||
switch opts.ErrorHandling {
|
||||
case PanicOnError:
|
||||
panic(err)
|
||||
case HTTPErrorOnError:
|
||||
// We cannot really send an HTTP error at this
|
||||
// point because we most likely have written
|
||||
// something to rsp already. But at least we can
|
||||
// stop sending.
|
||||
return true
|
||||
}
|
||||
// Do nothing in all other cases, including ContinueOnError.
|
||||
return false
|
||||
}
|
||||
|
||||
for _, mf := range mfs {
|
||||
if handleError(enc.Encode(mf)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
if closer, ok := enc.(expfmt.Closer); ok {
|
||||
// This in particular takes care of the final "# EOF\n" line for OpenMetrics.
|
||||
if handleError(closer.Close()) {
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
if opts.Timeout <= 0 {
|
||||
return h
|
||||
}
|
||||
return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
|
||||
"Exceeded configured timeout of %v.\n",
|
||||
opts.Timeout,
|
||||
))
|
||||
}
|
||||
|
||||
// InstrumentMetricHandler is usually used with an http.Handler returned by the
|
||||
// HandlerFor function. It instruments the provided http.Handler with two
|
||||
// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
|
||||
// scrapes partitioned by HTTP status code, and a gauge
|
||||
// "promhttp_metric_handler_requests_in_flight" to track the number of
|
||||
// simultaneous scrapes. This function idempotently registers collectors for
|
||||
// both metrics with the provided Registerer. It panics if the registration
|
||||
// fails. The provided metrics are useful to see how many scrapes hit the
|
||||
// monitored target (which could be from different Prometheus servers or other
|
||||
// scrapers), and how often they overlap (which would result in more than one
|
||||
// scrape in flight at the same time). Note that the scrapes-in-flight gauge
|
||||
// will contain the scrape by which it is exposed, while the scrape counter will
|
||||
// only get incremented after the scrape is complete (as only then the status
|
||||
// code is known). For tracking scrape durations, use the
|
||||
// "scrape_duration_seconds" gauge created by the Prometheus server upon each
|
||||
// scrape.
|
||||
func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
|
||||
cnt := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "promhttp_metric_handler_requests_total",
|
||||
Help: "Total number of scrapes by HTTP status code.",
|
||||
},
|
||||
[]string{"code"},
|
||||
)
|
||||
// Initialize the most likely HTTP status codes.
|
||||
cnt.WithLabelValues("200")
|
||||
cnt.WithLabelValues("500")
|
||||
cnt.WithLabelValues("503")
|
||||
if err := reg.Register(cnt); err != nil {
|
||||
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
|
||||
cnt = are.ExistingCollector.(*prometheus.CounterVec)
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
gge := prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "promhttp_metric_handler_requests_in_flight",
|
||||
Help: "Current number of scrapes being served.",
|
||||
})
|
||||
if err := reg.Register(gge); err != nil {
|
||||
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
|
||||
gge = are.ExistingCollector.(prometheus.Gauge)
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
|
||||
}
|
||||
|
||||
// HandlerErrorHandling defines how a Handler serving metrics will handle
|
||||
// errors.
|
||||
type HandlerErrorHandling int
|
||||
|
||||
// These constants cause handlers serving metrics to behave as described if
|
||||
// errors are encountered.
|
||||
const (
|
||||
// Serve an HTTP status code 500 upon the first error
|
||||
// encountered. Report the error message in the body. Note that HTTP
|
||||
// errors cannot be served anymore once the beginning of a regular
|
||||
// payload has been sent. Thus, in the (unlikely) case that encoding the
|
||||
// payload into the negotiated wire format fails, serving the response
|
||||
// will simply be aborted. Set an ErrorLog in HandlerOpts to detect
|
||||
// those errors.
|
||||
HTTPErrorOnError HandlerErrorHandling = iota
|
||||
// Ignore errors and try to serve as many metrics as possible. However,
|
||||
// if no metrics can be served, serve an HTTP status code 500 and the
|
||||
// last error message in the body. Only use this in deliberate "best
|
||||
// effort" metrics collection scenarios. In this case, it is highly
|
||||
// recommended to provide other means of detecting errors: By setting an
|
||||
// ErrorLog in HandlerOpts, the errors are logged. By providing a
|
||||
// Registry in HandlerOpts, the exposed metrics include an error counter
|
||||
// "promhttp_metric_handler_errors_total", which can be used for
|
||||
// alerts.
|
||||
ContinueOnError
|
||||
// Panic upon the first error encountered (useful for "crash only" apps).
|
||||
PanicOnError
|
||||
)
|
||||
|
||||
// Logger is the minimal interface HandlerOpts needs for logging. Note that
|
||||
// log.Logger from the standard library implements this interface, and it is
|
||||
// easy to implement by custom loggers, if they don't do so already anyway.
|
||||
type Logger interface {
|
||||
Println(v ...interface{})
|
||||
}
|
||||
|
||||
// HandlerOpts specifies options how to serve metrics via an http.Handler. The
|
||||
// zero value of HandlerOpts is a reasonable default.
|
||||
type HandlerOpts struct {
|
||||
// ErrorLog specifies an optional logger for errors collecting and
|
||||
// serving metrics. If nil, errors are not logged at all.
|
||||
ErrorLog Logger
|
||||
// ErrorHandling defines how errors are handled. Note that errors are
|
||||
// logged regardless of the configured ErrorHandling provided ErrorLog
|
||||
// is not nil.
|
||||
ErrorHandling HandlerErrorHandling
|
||||
// If Registry is not nil, it is used to register a metric
|
||||
// "promhttp_metric_handler_errors_total", partitioned by "cause". A
|
||||
// failed registration causes a panic. Note that this error counter is
|
||||
// different from the instrumentation you get from the various
|
||||
// InstrumentHandler... helpers. It counts errors that don't necessarily
|
||||
// result in a non-2xx HTTP status code. There are two typical cases:
|
||||
// (1) Encoding errors that only happen after streaming of the HTTP body
|
||||
// has already started (and the status code 200 has been sent). This
|
||||
// should only happen with custom collectors. (2) Collection errors with
|
||||
// no effect on the HTTP status code because ErrorHandling is set to
|
||||
// ContinueOnError.
|
||||
Registry prometheus.Registerer
|
||||
// If DisableCompression is true, the handler will never compress the
|
||||
// response, even if requested by the client.
|
||||
DisableCompression bool
|
||||
// The number of concurrent HTTP requests is limited to
|
||||
// MaxRequestsInFlight. Additional requests are responded to with 503
|
||||
// Service Unavailable and a suitable message in the body. If
|
||||
// MaxRequestsInFlight is 0 or negative, no limit is applied.
|
||||
MaxRequestsInFlight int
|
||||
// If handling a request takes longer than Timeout, it is responded to
|
||||
// with 503 ServiceUnavailable and a suitable Message. No timeout is
|
||||
// applied if Timeout is 0 or negative. Note that with the current
|
||||
// implementation, reaching the timeout simply ends the HTTP requests as
|
||||
// described above (and even that only if sending of the body hasn't
|
||||
// started yet), while the bulk work of gathering all the metrics keeps
|
||||
// running in the background (with the eventual result to be thrown
|
||||
// away). Until the implementation is improved, it is recommended to
|
||||
// implement a separate timeout in potentially slow Collectors.
|
||||
Timeout time.Duration
|
||||
// If true, the experimental OpenMetrics encoding is added to the
|
||||
// possible options during content negotiation. Note that Prometheus
|
||||
// 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is
|
||||
// the only way to transmit exemplars. However, the move to OpenMetrics
|
||||
// is not completely transparent. Most notably, the values of "quantile"
|
||||
// labels of Summaries and "le" labels of Histograms are formatted with
|
||||
// a trailing ".0" if they would otherwise look like integer numbers
|
||||
// (which changes the identity of the resulting series on the Prometheus
|
||||
// server).
|
||||
EnableOpenMetrics bool
|
||||
}
|
||||
|
||||
// gzipAccepted returns whether the client will accept gzip-encoded content.
|
||||
func gzipAccepted(header http.Header) bool {
|
||||
a := header.Get(acceptEncodingHeader)
|
||||
parts := strings.Split(a, ",")
|
||||
for _, part := range parts {
|
||||
part = strings.TrimSpace(part)
|
||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// httpError removes any content-encoding header and then calls http.Error with
|
||||
// the provided error and http.StatusInternalServerError. Error contents is
|
||||
// supposed to be uncompressed plain text. Same as with a plain http.Error, this
|
||||
// must not be called if the header or any payload has already been sent.
|
||||
func httpError(rsp http.ResponseWriter, err error) {
|
||||
rsp.Header().Del(contentEncodingHeader)
|
||||
http.Error(
|
||||
rsp,
|
||||
"An error has occurred while serving metrics:\n\n"+err.Error(),
|
||||
http.StatusInternalServerError,
|
||||
)
|
||||
}
|
219
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
Normal file
219
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
Normal file
|
@ -0,0 +1,219 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package promhttp
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// The RoundTripperFunc type is an adapter to allow the use of ordinary
|
||||
// functions as RoundTrippers. If f is a function with the appropriate
|
||||
// signature, RountTripperFunc(f) is a RoundTripper that calls f.
|
||||
type RoundTripperFunc func(req *http.Request) (*http.Response, error)
|
||||
|
||||
// RoundTrip implements the RoundTripper interface.
|
||||
func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
return rt(r)
|
||||
}
|
||||
|
||||
// InstrumentRoundTripperInFlight is a middleware that wraps the provided
|
||||
// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
|
||||
// requests currently handled by the wrapped http.RoundTripper.
|
||||
//
|
||||
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
||||
func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
|
||||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
||||
gauge.Inc()
|
||||
defer gauge.Dec()
|
||||
return next.RoundTrip(r)
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentRoundTripperCounter is a middleware that wraps the provided
|
||||
// http.RoundTripper to observe the request result with the provided CounterVec.
|
||||
// The CounterVec must have zero, one, or two non-const non-curried labels. For
|
||||
// those, the only allowed label names are "code" and "method". The function
|
||||
// panics otherwise. Partitioning of the CounterVec happens by HTTP status code
|
||||
// and/or HTTP method if the respective instance label names are present in the
|
||||
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
|
||||
//
|
||||
// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
|
||||
// is not incremented.
|
||||
//
|
||||
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
||||
func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
|
||||
code, method := checkLabels(counter)
|
||||
|
||||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
||||
resp, err := next.RoundTrip(r)
|
||||
if err == nil {
|
||||
counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
|
||||
}
|
||||
return resp, err
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentRoundTripperDuration is a middleware that wraps the provided
|
||||
// http.RoundTripper to observe the request duration with the provided
|
||||
// ObserverVec. The ObserverVec must have zero, one, or two non-const
|
||||
// non-curried labels. For those, the only allowed label names are "code" and
|
||||
// "method". The function panics otherwise. The Observe method of the Observer
|
||||
// in the ObserverVec is called with the request duration in
|
||||
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
|
||||
// respective instance label names are present in the ObserverVec. For
|
||||
// unpartitioned observations, use an ObserverVec with zero labels. Note that
|
||||
// partitioning of Histograms is expensive and should be used judiciously.
|
||||
//
|
||||
// If the wrapped RoundTripper panics or returns a non-nil error, no values are
|
||||
// reported.
|
||||
//
|
||||
// Note that this method is only guaranteed to never observe negative durations
|
||||
// if used with Go1.9+.
|
||||
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
|
||||
code, method := checkLabels(obs)
|
||||
|
||||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
||||
start := time.Now()
|
||||
resp, err := next.RoundTrip(r)
|
||||
if err == nil {
|
||||
obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
|
||||
}
|
||||
return resp, err
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentTrace is used to offer flexibility in instrumenting the available
|
||||
// httptrace.ClientTrace hook functions. Each function is passed a float64
|
||||
// representing the time in seconds since the start of the http request. A user
|
||||
// may choose to use separately buckets Histograms, or implement custom
|
||||
// instance labels on a per function basis.
|
||||
type InstrumentTrace struct {
|
||||
GotConn func(float64)
|
||||
PutIdleConn func(float64)
|
||||
GotFirstResponseByte func(float64)
|
||||
Got100Continue func(float64)
|
||||
DNSStart func(float64)
|
||||
DNSDone func(float64)
|
||||
ConnectStart func(float64)
|
||||
ConnectDone func(float64)
|
||||
TLSHandshakeStart func(float64)
|
||||
TLSHandshakeDone func(float64)
|
||||
WroteHeaders func(float64)
|
||||
Wait100Continue func(float64)
|
||||
WroteRequest func(float64)
|
||||
}
|
||||
|
||||
// InstrumentRoundTripperTrace is a middleware that wraps the provided
|
||||
// RoundTripper and reports times to hook functions provided in the
|
||||
// InstrumentTrace struct. Hook functions that are not present in the provided
|
||||
// InstrumentTrace struct are ignored. Times reported to the hook functions are
|
||||
// time since the start of the request. Only with Go1.9+, those times are
|
||||
// guaranteed to never be negative. (Earlier Go versions are not using a
|
||||
// monotonic clock.) Note that partitioning of Histograms is expensive and
|
||||
// should be used judiciously.
|
||||
//
|
||||
// For hook functions that receive an error as an argument, no observations are
|
||||
// made in the event of a non-nil error value.
|
||||
//
|
||||
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
||||
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
|
||||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
||||
start := time.Now()
|
||||
|
||||
trace := &httptrace.ClientTrace{
|
||||
GotConn: func(_ httptrace.GotConnInfo) {
|
||||
if it.GotConn != nil {
|
||||
it.GotConn(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
PutIdleConn: func(err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if it.PutIdleConn != nil {
|
||||
it.PutIdleConn(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
DNSStart: func(_ httptrace.DNSStartInfo) {
|
||||
if it.DNSStart != nil {
|
||||
it.DNSStart(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
DNSDone: func(_ httptrace.DNSDoneInfo) {
|
||||
if it.DNSDone != nil {
|
||||
it.DNSDone(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
ConnectStart: func(_, _ string) {
|
||||
if it.ConnectStart != nil {
|
||||
it.ConnectStart(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
ConnectDone: func(_, _ string, err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if it.ConnectDone != nil {
|
||||
it.ConnectDone(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
GotFirstResponseByte: func() {
|
||||
if it.GotFirstResponseByte != nil {
|
||||
it.GotFirstResponseByte(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
Got100Continue: func() {
|
||||
if it.Got100Continue != nil {
|
||||
it.Got100Continue(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
TLSHandshakeStart: func() {
|
||||
if it.TLSHandshakeStart != nil {
|
||||
it.TLSHandshakeStart(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if it.TLSHandshakeDone != nil {
|
||||
it.TLSHandshakeDone(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
WroteHeaders: func() {
|
||||
if it.WroteHeaders != nil {
|
||||
it.WroteHeaders(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
Wait100Continue: func() {
|
||||
if it.Wait100Continue != nil {
|
||||
it.Wait100Continue(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
WroteRequest: func(_ httptrace.WroteRequestInfo) {
|
||||
if it.WroteRequest != nil {
|
||||
it.WroteRequest(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
}
|
||||
r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
|
||||
|
||||
return next.RoundTrip(r)
|
||||
})
|
||||
}
|
447
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
Normal file
447
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
Normal file
|
@ -0,0 +1,447 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package promhttp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// magicString is used for the hacky label test in checkLabels. Remove once fixed.
|
||||
const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
|
||||
|
||||
// InstrumentHandlerInFlight is a middleware that wraps the provided
|
||||
// http.Handler. It sets the provided prometheus.Gauge to the number of
|
||||
// requests currently handled by the wrapped http.Handler.
|
||||
//
|
||||
// See the example for InstrumentHandlerDuration for example usage.
|
||||
func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
g.Inc()
|
||||
defer g.Dec()
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentHandlerDuration is a middleware that wraps the provided
|
||||
// http.Handler to observe the request duration with the provided ObserverVec.
|
||||
// The ObserverVec must have zero, one, or two non-const non-curried labels. For
|
||||
// those, the only allowed label names are "code" and "method". The function
|
||||
// panics otherwise. The Observe method of the Observer in the ObserverVec is
|
||||
// called with the request duration in seconds. Partitioning happens by HTTP
|
||||
// status code and/or HTTP method if the respective instance label names are
|
||||
// present in the ObserverVec. For unpartitioned observations, use an
|
||||
// ObserverVec with zero labels. Note that partitioning of Histograms is
|
||||
// expensive and should be used judiciously.
|
||||
//
|
||||
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
|
||||
//
|
||||
// If the wrapped Handler panics, no values are reported.
|
||||
//
|
||||
// Note that this method is only guaranteed to never observe negative durations
|
||||
// if used with Go1.9+.
|
||||
func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
|
||||
code, method := checkLabels(obs)
|
||||
|
||||
if code {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
now := time.Now()
|
||||
d := newDelegator(w, nil)
|
||||
next.ServeHTTP(d, r)
|
||||
|
||||
obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
|
||||
})
|
||||
}
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
now := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
|
||||
// to observe the request result with the provided CounterVec. The CounterVec
|
||||
// must have zero, one, or two non-const non-curried labels. For those, the only
|
||||
// allowed label names are "code" and "method". The function panics
|
||||
// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or
|
||||
// HTTP method if the respective instance label names are present in the
|
||||
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
|
||||
//
|
||||
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
|
||||
//
|
||||
// If the wrapped Handler panics, the Counter is not incremented.
|
||||
//
|
||||
// See the example for InstrumentHandlerDuration for example usage.
|
||||
func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
|
||||
code, method := checkLabels(counter)
|
||||
|
||||
if code {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
d := newDelegator(w, nil)
|
||||
next.ServeHTTP(d, r)
|
||||
counter.With(labels(code, method, r.Method, d.Status())).Inc()
|
||||
})
|
||||
}
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
next.ServeHTTP(w, r)
|
||||
counter.With(labels(code, method, r.Method, 0)).Inc()
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
|
||||
// http.Handler to observe with the provided ObserverVec the request duration
|
||||
// until the response headers are written. The ObserverVec must have zero, one,
|
||||
// or two non-const non-curried labels. For those, the only allowed label names
|
||||
// are "code" and "method". The function panics otherwise. The Observe method of
|
||||
// the Observer in the ObserverVec is called with the request duration in
|
||||
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
|
||||
// respective instance label names are present in the ObserverVec. For
|
||||
// unpartitioned observations, use an ObserverVec with zero labels. Note that
|
||||
// partitioning of Histograms is expensive and should be used judiciously.
|
||||
//
|
||||
// If the wrapped Handler panics before calling WriteHeader, no value is
|
||||
// reported.
|
||||
//
|
||||
// Note that this method is only guaranteed to never observe negative durations
|
||||
// if used with Go1.9+.
|
||||
//
|
||||
// See the example for InstrumentHandlerDuration for example usage.
|
||||
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
|
||||
code, method := checkLabels(obs)
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
now := time.Now()
|
||||
d := newDelegator(w, func(status int) {
|
||||
obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
|
||||
})
|
||||
next.ServeHTTP(d, r)
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentHandlerRequestSize is a middleware that wraps the provided
|
||||
// http.Handler to observe the request size with the provided ObserverVec. The
|
||||
// ObserverVec must have zero, one, or two non-const non-curried labels. For
|
||||
// those, the only allowed label names are "code" and "method". The function
|
||||
// panics otherwise. The Observe method of the Observer in the ObserverVec is
|
||||
// called with the request size in bytes. Partitioning happens by HTTP status
|
||||
// code and/or HTTP method if the respective instance label names are present in
|
||||
// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
|
||||
// labels. Note that partitioning of Histograms is expensive and should be used
|
||||
// judiciously.
|
||||
//
|
||||
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
|
||||
//
|
||||
// If the wrapped Handler panics, no values are reported.
|
||||
//
|
||||
// See the example for InstrumentHandlerDuration for example usage.
|
||||
func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
|
||||
code, method := checkLabels(obs)
|
||||
|
||||
if code {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
d := newDelegator(w, nil)
|
||||
next.ServeHTTP(d, r)
|
||||
size := computeApproximateRequestSize(r)
|
||||
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
|
||||
})
|
||||
}
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
next.ServeHTTP(w, r)
|
||||
size := computeApproximateRequestSize(r)
|
||||
obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentHandlerResponseSize is a middleware that wraps the provided
|
||||
// http.Handler to observe the response size with the provided ObserverVec. The
|
||||
// ObserverVec must have zero, one, or two non-const non-curried labels. For
|
||||
// those, the only allowed label names are "code" and "method". The function
|
||||
// panics otherwise. The Observe method of the Observer in the ObserverVec is
|
||||
// called with the response size in bytes. Partitioning happens by HTTP status
|
||||
// code and/or HTTP method if the respective instance label names are present in
|
||||
// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
|
||||
// labels. Note that partitioning of Histograms is expensive and should be used
|
||||
// judiciously.
|
||||
//
|
||||
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
|
||||
//
|
||||
// If the wrapped Handler panics, no values are reported.
|
||||
//
|
||||
// See the example for InstrumentHandlerDuration for example usage.
|
||||
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
|
||||
code, method := checkLabels(obs)
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
d := newDelegator(w, nil)
|
||||
next.ServeHTTP(d, r)
|
||||
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
|
||||
})
|
||||
}
|
||||
|
||||
func checkLabels(c prometheus.Collector) (code bool, method bool) {
|
||||
// TODO(beorn7): Remove this hacky way to check for instance labels
|
||||
// once Descriptors can have their dimensionality queried.
|
||||
var (
|
||||
desc *prometheus.Desc
|
||||
m prometheus.Metric
|
||||
pm dto.Metric
|
||||
lvs []string
|
||||
)
|
||||
|
||||
// Get the Desc from the Collector.
|
||||
descc := make(chan *prometheus.Desc, 1)
|
||||
c.Describe(descc)
|
||||
|
||||
select {
|
||||
case desc = <-descc:
|
||||
default:
|
||||
panic("no description provided by collector")
|
||||
}
|
||||
select {
|
||||
case <-descc:
|
||||
panic("more than one description provided by collector")
|
||||
default:
|
||||
}
|
||||
|
||||
close(descc)
|
||||
|
||||
// Create a ConstMetric with the Desc. Since we don't know how many
|
||||
// variable labels there are, try for as long as it needs.
|
||||
for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {
|
||||
m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...)
|
||||
}
|
||||
|
||||
// Write out the metric into a proto message and look at the labels.
|
||||
// If the value is not the magicString, it is a constLabel, which doesn't interest us.
|
||||
// If the label is curried, it doesn't interest us.
|
||||
// In all other cases, only "code" or "method" is allowed.
|
||||
if err := m.Write(&pm); err != nil {
|
||||
panic("error checking metric for labels")
|
||||
}
|
||||
for _, label := range pm.Label {
|
||||
name, value := label.GetName(), label.GetValue()
|
||||
if value != magicString || isLabelCurried(c, name) {
|
||||
continue
|
||||
}
|
||||
switch name {
|
||||
case "code":
|
||||
code = true
|
||||
case "method":
|
||||
method = true
|
||||
default:
|
||||
panic("metric partitioned with non-supported labels")
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func isLabelCurried(c prometheus.Collector, label string) bool {
|
||||
// This is even hackier than the label test above.
|
||||
// We essentially try to curry again and see if it works.
|
||||
// But for that, we need to type-convert to the two
|
||||
// types we use here, ObserverVec or *CounterVec.
|
||||
switch v := c.(type) {
|
||||
case *prometheus.CounterVec:
|
||||
if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
|
||||
return false
|
||||
}
|
||||
case prometheus.ObserverVec:
|
||||
if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
panic("unsupported metric vec type")
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
|
||||
// unnecessary allocations on each request.
|
||||
var emptyLabels = prometheus.Labels{}
|
||||
|
||||
func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
|
||||
if !(code || method) {
|
||||
return emptyLabels
|
||||
}
|
||||
labels := prometheus.Labels{}
|
||||
|
||||
if code {
|
||||
labels["code"] = sanitizeCode(status)
|
||||
}
|
||||
if method {
|
||||
labels["method"] = sanitizeMethod(reqMethod)
|
||||
}
|
||||
|
||||
return labels
|
||||
}
|
||||
|
||||
func computeApproximateRequestSize(r *http.Request) int {
|
||||
s := 0
|
||||
if r.URL != nil {
|
||||
s += len(r.URL.String())
|
||||
}
|
||||
|
||||
s += len(r.Method)
|
||||
s += len(r.Proto)
|
||||
for name, values := range r.Header {
|
||||
s += len(name)
|
||||
for _, value := range values {
|
||||
s += len(value)
|
||||
}
|
||||
}
|
||||
s += len(r.Host)
|
||||
|
||||
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
|
||||
|
||||
if r.ContentLength != -1 {
|
||||
s += int(r.ContentLength)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func sanitizeMethod(m string) string {
|
||||
switch m {
|
||||
case "GET", "get":
|
||||
return "get"
|
||||
case "PUT", "put":
|
||||
return "put"
|
||||
case "HEAD", "head":
|
||||
return "head"
|
||||
case "POST", "post":
|
||||
return "post"
|
||||
case "DELETE", "delete":
|
||||
return "delete"
|
||||
case "CONNECT", "connect":
|
||||
return "connect"
|
||||
case "OPTIONS", "options":
|
||||
return "options"
|
||||
case "NOTIFY", "notify":
|
||||
return "notify"
|
||||
default:
|
||||
return strings.ToLower(m)
|
||||
}
|
||||
}
|
||||
|
||||
// If the wrapped http.Handler has not set a status code, i.e. the value is
|
||||
// currently 0, santizeCode will return 200, for consistency with behavior in
|
||||
// the stdlib.
|
||||
func sanitizeCode(s int) string {
|
||||
switch s {
|
||||
case 100:
|
||||
return "100"
|
||||
case 101:
|
||||
return "101"
|
||||
|
||||
case 200, 0:
|
||||
return "200"
|
||||
case 201:
|
||||
return "201"
|
||||
case 202:
|
||||
return "202"
|
||||
case 203:
|
||||
return "203"
|
||||
case 204:
|
||||
return "204"
|
||||
case 205:
|
||||
return "205"
|
||||
case 206:
|
||||
return "206"
|
||||
|
||||
case 300:
|
||||
return "300"
|
||||
case 301:
|
||||
return "301"
|
||||
case 302:
|
||||
return "302"
|
||||
case 304:
|
||||
return "304"
|
||||
case 305:
|
||||
return "305"
|
||||
case 307:
|
||||
return "307"
|
||||
|
||||
case 400:
|
||||
return "400"
|
||||
case 401:
|
||||
return "401"
|
||||
case 402:
|
||||
return "402"
|
||||
case 403:
|
||||
return "403"
|
||||
case 404:
|
||||
return "404"
|
||||
case 405:
|
||||
return "405"
|
||||
case 406:
|
||||
return "406"
|
||||
case 407:
|
||||
return "407"
|
||||
case 408:
|
||||
return "408"
|
||||
case 409:
|
||||
return "409"
|
||||
case 410:
|
||||
return "410"
|
||||
case 411:
|
||||
return "411"
|
||||
case 412:
|
||||
return "412"
|
||||
case 413:
|
||||
return "413"
|
||||
case 414:
|
||||
return "414"
|
||||
case 415:
|
||||
return "415"
|
||||
case 416:
|
||||
return "416"
|
||||
case 417:
|
||||
return "417"
|
||||
case 418:
|
||||
return "418"
|
||||
|
||||
case 500:
|
||||
return "500"
|
||||
case 501:
|
||||
return "501"
|
||||
case 502:
|
||||
return "502"
|
||||
case 503:
|
||||
return "503"
|
||||
case 504:
|
||||
return "504"
|
||||
case 505:
|
||||
return "505"
|
||||
|
||||
case 428:
|
||||
return "428"
|
||||
case 429:
|
||||
return "429"
|
||||
case 431:
|
||||
return "431"
|
||||
case 511:
|
||||
return "511"
|
||||
|
||||
default:
|
||||
return strconv.Itoa(s)
|
||||
}
|
||||
}
|
|
@ -15,15 +15,23 @@ package prometheus
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/internal"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -35,13 +43,14 @@ const (
|
|||
// DefaultRegisterer and DefaultGatherer are the implementations of the
|
||||
// Registerer and Gatherer interface a number of convenience functions in this
|
||||
// package act on. Initially, both variables point to the same Registry, which
|
||||
// has a process collector (see NewProcessCollector) and a Go collector (see
|
||||
// NewGoCollector) already registered. This approach to keep default instances
|
||||
// as global state mirrors the approach of other packages in the Go standard
|
||||
// library. Note that there are caveats. Change the variables with caution and
|
||||
// only if you understand the consequences. Users who want to avoid global state
|
||||
// altogether should not use the convenience function and act on custom
|
||||
// instances instead.
|
||||
// has a process collector (currently on Linux only, see NewProcessCollector)
|
||||
// and a Go collector (see NewGoCollector, in particular the note about
|
||||
// stop-the-world implication with Go versions older than 1.9) already
|
||||
// registered. This approach to keep default instances as global state mirrors
|
||||
// the approach of other packages in the Go standard library. Note that there
|
||||
// are caveats. Change the variables with caution and only if you understand the
|
||||
// consequences. Users who want to avoid global state altogether should not use
|
||||
// the convenience functions and act on custom instances instead.
|
||||
var (
|
||||
defaultRegistry = NewRegistry()
|
||||
DefaultRegisterer Registerer = defaultRegistry
|
||||
|
@ -49,7 +58,7 @@ var (
|
|||
)
|
||||
|
||||
func init() {
|
||||
MustRegister(NewProcessCollector(os.Getpid(), ""))
|
||||
MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
|
||||
MustRegister(NewGoCollector())
|
||||
}
|
||||
|
||||
|
@ -65,7 +74,8 @@ func NewRegistry() *Registry {
|
|||
|
||||
// NewPedanticRegistry returns a registry that checks during collection if each
|
||||
// collected Metric is consistent with its reported Desc, and if the Desc has
|
||||
// actually been registered with the registry.
|
||||
// actually been registered with the registry. Unchecked Collectors (those whose
|
||||
// Describe method does not yield any descriptors) are excluded from the check.
|
||||
//
|
||||
// Usually, a Registry will be happy as long as the union of all collected
|
||||
// Metrics is consistent and valid even if some metrics are not consistent with
|
||||
|
@ -80,7 +90,7 @@ func NewPedanticRegistry() *Registry {
|
|||
|
||||
// Registerer is the interface for the part of a registry in charge of
|
||||
// registering and unregistering. Users of custom registries should use
|
||||
// Registerer as type for registration purposes (rather then the Registry type
|
||||
// Registerer as type for registration purposes (rather than the Registry type
|
||||
// directly). In that way, they are free to use custom Registerer implementation
|
||||
// (e.g. for testing purposes).
|
||||
type Registerer interface {
|
||||
|
@ -95,8 +105,13 @@ type Registerer interface {
|
|||
// returned error is an instance of AlreadyRegisteredError, which
|
||||
// contains the previously registered Collector.
|
||||
//
|
||||
// It is in general not safe to register the same Collector multiple
|
||||
// times concurrently.
|
||||
// A Collector whose Describe method does not yield any Desc is treated
|
||||
// as unchecked. Registration will always succeed. No check for
|
||||
// re-registering (see previous paragraph) is performed. Thus, the
|
||||
// caller is responsible for not double-registering the same unchecked
|
||||
// Collector, and for providing a Collector that will not cause
|
||||
// inconsistent metrics on collection. (This would lead to scrape
|
||||
// errors.)
|
||||
Register(Collector) error
|
||||
// MustRegister works like Register but registers any number of
|
||||
// Collectors and panics upon the first registration that causes an
|
||||
|
@ -105,7 +120,9 @@ type Registerer interface {
|
|||
// Unregister unregisters the Collector that equals the Collector passed
|
||||
// in as an argument. (Two Collectors are considered equal if their
|
||||
// Describe method yields the same set of descriptors.) The function
|
||||
// returns whether a Collector was unregistered.
|
||||
// returns whether a Collector was unregistered. Note that an unchecked
|
||||
// Collector cannot be unregistered (as its Describe method does not
|
||||
// yield any descriptor).
|
||||
//
|
||||
// Note that even after unregistering, it will not be possible to
|
||||
// register a new Collector that is inconsistent with the unregistered
|
||||
|
@ -123,15 +140,23 @@ type Registerer interface {
|
|||
type Gatherer interface {
|
||||
// Gather calls the Collect method of the registered Collectors and then
|
||||
// gathers the collected metrics into a lexicographically sorted slice
|
||||
// of MetricFamily protobufs. Even if an error occurs, Gather attempts
|
||||
// to gather as many metrics as possible. Hence, if a non-nil error is
|
||||
// returned, the returned MetricFamily slice could be nil (in case of a
|
||||
// fatal error that prevented any meaningful metric collection) or
|
||||
// contain a number of MetricFamily protobufs, some of which might be
|
||||
// incomplete, and some might be missing altogether. The returned error
|
||||
// (which might be a MultiError) explains the details. In scenarios
|
||||
// where complete collection is critical, the returned MetricFamily
|
||||
// protobufs should be disregarded if the returned error is non-nil.
|
||||
// of uniquely named MetricFamily protobufs. Gather ensures that the
|
||||
// returned slice is valid and self-consistent so that it can be used
|
||||
// for valid exposition. As an exception to the strict consistency
|
||||
// requirements described for metric.Desc, Gather will tolerate
|
||||
// different sets of label names for metrics of the same metric family.
|
||||
//
|
||||
// Even if an error occurs, Gather attempts to gather as many metrics as
|
||||
// possible. Hence, if a non-nil error is returned, the returned
|
||||
// MetricFamily slice could be nil (in case of a fatal error that
|
||||
// prevented any meaningful metric collection) or contain a number of
|
||||
// MetricFamily protobufs, some of which might be incomplete, and some
|
||||
// might be missing altogether. The returned error (which might be a
|
||||
// MultiError) explains the details. Note that this is mostly useful for
|
||||
// debugging purposes. If the gathered protobufs are to be used for
|
||||
// exposition in actual monitoring, it is almost always better to not
|
||||
// expose an incomplete result and instead disregard the returned
|
||||
// MetricFamily protobufs in case the returned error is non-nil.
|
||||
Gather() ([]*dto.MetricFamily, error)
|
||||
}
|
||||
|
||||
|
@ -152,38 +177,6 @@ func MustRegister(cs ...Collector) {
|
|||
DefaultRegisterer.MustRegister(cs...)
|
||||
}
|
||||
|
||||
// RegisterOrGet registers the provided Collector with the DefaultRegisterer and
|
||||
// returns the Collector, unless an equal Collector was registered before, in
|
||||
// which case that Collector is returned.
|
||||
//
|
||||
// Deprecated: RegisterOrGet is merely a convenience function for the
|
||||
// implementation as described in the documentation for
|
||||
// AlreadyRegisteredError. As the use case is relatively rare, this function
|
||||
// will be removed in a future version of this package to clean up the
|
||||
// namespace.
|
||||
func RegisterOrGet(c Collector) (Collector, error) {
|
||||
if err := Register(c); err != nil {
|
||||
if are, ok := err.(AlreadyRegisteredError); ok {
|
||||
return are.ExistingCollector, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// MustRegisterOrGet behaves like RegisterOrGet but panics instead of returning
|
||||
// an error.
|
||||
//
|
||||
// Deprecated: This is deprecated for the same reason RegisterOrGet is. See
|
||||
// there for details.
|
||||
func MustRegisterOrGet(c Collector) Collector {
|
||||
c, err := RegisterOrGet(c)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// Unregister removes the registration of the provided Collector from the
|
||||
// DefaultRegisterer.
|
||||
//
|
||||
|
@ -201,25 +194,6 @@ func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
|
|||
return gf()
|
||||
}
|
||||
|
||||
// SetMetricFamilyInjectionHook replaces the DefaultGatherer with one that
|
||||
// gathers from the previous DefaultGatherers but then merges the MetricFamily
|
||||
// protobufs returned from the provided hook function with the MetricFamily
|
||||
// protobufs returned from the original DefaultGatherer.
|
||||
//
|
||||
// Deprecated: This function manipulates the DefaultGatherer variable. Consider
|
||||
// the implications, i.e. don't do this concurrently with any uses of the
|
||||
// DefaultGatherer. In the rare cases where you need to inject MetricFamily
|
||||
// protobufs directly, it is recommended to use a custom Registry and combine it
|
||||
// with a custom Gatherer using the Gatherers type (see
|
||||
// there). SetMetricFamilyInjectionHook only exists for compatibility reasons
|
||||
// with previous versions of this package.
|
||||
func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
|
||||
DefaultGatherer = Gatherers{
|
||||
DefaultGatherer,
|
||||
GathererFunc(func() ([]*dto.MetricFamily, error) { return hook(), nil }),
|
||||
}
|
||||
}
|
||||
|
||||
// AlreadyRegisteredError is returned by the Register method if the Collector to
|
||||
// be registered has already been registered before, or a different Collector
|
||||
// that collects the same metrics has been registered before. Registration fails
|
||||
|
@ -252,6 +226,13 @@ func (errs MultiError) Error() string {
|
|||
return buf.String()
|
||||
}
|
||||
|
||||
// Append appends the provided error if it is not nil.
|
||||
func (errs *MultiError) Append(err error) {
|
||||
if err != nil {
|
||||
*errs = append(*errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
|
||||
// contained error as error if len(errs is 1). In all other cases, it returns
|
||||
// the MultiError directly. This is helpful for returning a MultiError in a way
|
||||
|
@ -276,6 +257,7 @@ type Registry struct {
|
|||
collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
|
||||
descIDs map[uint64]struct{}
|
||||
dimHashesByName map[string]uint64
|
||||
uncheckedCollectors []Collector
|
||||
pedanticChecksEnabled bool
|
||||
}
|
||||
|
||||
|
@ -285,7 +267,7 @@ func (r *Registry) Register(c Collector) error {
|
|||
descChan = make(chan *Desc, capDescChan)
|
||||
newDescIDs = map[uint64]struct{}{}
|
||||
newDimHashesByName = map[string]uint64{}
|
||||
collectorID uint64 // Just a sum of all desc IDs.
|
||||
collectorID uint64 // All desc IDs XOR'd together.
|
||||
duplicateDescErr error
|
||||
)
|
||||
go func() {
|
||||
|
@ -293,8 +275,13 @@ func (r *Registry) Register(c Collector) error {
|
|||
close(descChan)
|
||||
}()
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
// Coduct various tests...
|
||||
defer func() {
|
||||
// Drain channel in case of premature return to not leak a goroutine.
|
||||
for range descChan {
|
||||
}
|
||||
r.mtx.Unlock()
|
||||
}()
|
||||
// Conduct various tests...
|
||||
for desc := range descChan {
|
||||
|
||||
// Is the descriptor valid at all?
|
||||
|
@ -307,12 +294,12 @@ func (r *Registry) Register(c Collector) error {
|
|||
if _, exists := r.descIDs[desc.id]; exists {
|
||||
duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
|
||||
}
|
||||
// If it is not a duplicate desc in this collector, add it to
|
||||
// If it is not a duplicate desc in this collector, XOR it to
|
||||
// the collectorID. (We allow duplicate descs within the same
|
||||
// collector, but their existence must be a no-op.)
|
||||
if _, exists := newDescIDs[desc.id]; !exists {
|
||||
newDescIDs[desc.id] = struct{}{}
|
||||
collectorID += desc.id
|
||||
collectorID ^= desc.id
|
||||
}
|
||||
|
||||
// Are all the label names and the help string consistent with
|
||||
|
@ -333,15 +320,24 @@ func (r *Registry) Register(c Collector) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
// Did anything happen at all?
|
||||
// A Collector yielding no Desc at all is considered unchecked.
|
||||
if len(newDescIDs) == 0 {
|
||||
return errors.New("collector has no descriptors")
|
||||
r.uncheckedCollectors = append(r.uncheckedCollectors, c)
|
||||
return nil
|
||||
}
|
||||
if existing, exists := r.collectorsByID[collectorID]; exists {
|
||||
switch e := existing.(type) {
|
||||
case *wrappingCollector:
|
||||
return AlreadyRegisteredError{
|
||||
ExistingCollector: existing,
|
||||
ExistingCollector: e.unwrapRecursively(),
|
||||
NewCollector: c,
|
||||
}
|
||||
default:
|
||||
return AlreadyRegisteredError{
|
||||
ExistingCollector: e,
|
||||
NewCollector: c,
|
||||
}
|
||||
}
|
||||
}
|
||||
// If the collectorID is new, but at least one of the descs existed
|
||||
// before, we are in trouble.
|
||||
|
@ -365,7 +361,7 @@ func (r *Registry) Unregister(c Collector) bool {
|
|||
var (
|
||||
descChan = make(chan *Desc, capDescChan)
|
||||
descIDs = map[uint64]struct{}{}
|
||||
collectorID uint64 // Just a sum of the desc IDs.
|
||||
collectorID uint64 // All desc IDs XOR'd together.
|
||||
)
|
||||
go func() {
|
||||
c.Describe(descChan)
|
||||
|
@ -373,7 +369,7 @@ func (r *Registry) Unregister(c Collector) bool {
|
|||
}()
|
||||
for desc := range descChan {
|
||||
if _, exists := descIDs[desc.id]; !exists {
|
||||
collectorID += desc.id
|
||||
collectorID ^= desc.id
|
||||
descIDs[desc.id] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
@ -409,31 +405,25 @@ func (r *Registry) MustRegister(cs ...Collector) {
|
|||
// Gather implements Gatherer.
|
||||
func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
||||
var (
|
||||
metricChan = make(chan Metric, capMetricChan)
|
||||
checkedMetricChan = make(chan Metric, capMetricChan)
|
||||
uncheckedMetricChan = make(chan Metric, capMetricChan)
|
||||
metricHashes = map[uint64]struct{}{}
|
||||
dimHashes = map[string]uint64{}
|
||||
wg sync.WaitGroup
|
||||
errs MultiError // The collected errors to return in the end.
|
||||
registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
|
||||
)
|
||||
|
||||
r.mtx.RLock()
|
||||
goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
|
||||
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
|
||||
|
||||
// Scatter.
|
||||
// (Collectors could be complex and slow, so we call them all at once.)
|
||||
wg.Add(len(r.collectorsByID))
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(metricChan)
|
||||
}()
|
||||
checkedCollectors := make(chan Collector, len(r.collectorsByID))
|
||||
uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
|
||||
for _, collector := range r.collectorsByID {
|
||||
go func(collector Collector) {
|
||||
defer wg.Done()
|
||||
collector.Collect(metricChan)
|
||||
}(collector)
|
||||
checkedCollectors <- collector
|
||||
}
|
||||
for _, collector := range r.uncheckedCollectors {
|
||||
uncheckedCollectors <- collector
|
||||
}
|
||||
|
||||
// In case pedantic checks are enabled, we have to copy the map before
|
||||
// giving up the RLock.
|
||||
if r.pedanticChecksEnabled {
|
||||
|
@ -442,83 +432,218 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
|||
registeredDescIDs[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
r.mtx.RUnlock()
|
||||
|
||||
// Drain metricChan in case of premature return.
|
||||
wg.Add(goroutineBudget)
|
||||
|
||||
collectWorker := func() {
|
||||
for {
|
||||
select {
|
||||
case collector := <-checkedCollectors:
|
||||
collector.Collect(checkedMetricChan)
|
||||
case collector := <-uncheckedCollectors:
|
||||
collector.Collect(uncheckedMetricChan)
|
||||
default:
|
||||
return
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
}
|
||||
|
||||
// Start the first worker now to make sure at least one is running.
|
||||
go collectWorker()
|
||||
goroutineBudget--
|
||||
|
||||
// Close checkedMetricChan and uncheckedMetricChan once all collectors
|
||||
// are collected.
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(checkedMetricChan)
|
||||
close(uncheckedMetricChan)
|
||||
}()
|
||||
|
||||
// Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
|
||||
defer func() {
|
||||
for _ = range metricChan {
|
||||
if checkedMetricChan != nil {
|
||||
for range checkedMetricChan {
|
||||
}
|
||||
}
|
||||
if uncheckedMetricChan != nil {
|
||||
for range uncheckedMetricChan {
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Gather.
|
||||
for metric := range metricChan {
|
||||
// This could be done concurrently, too, but it required locking
|
||||
// of metricFamiliesByName (and of metricHashes if checks are
|
||||
// enabled). Most likely not worth it.
|
||||
// Copy the channel references so we can nil them out later to remove
|
||||
// them from the select statements below.
|
||||
cmc := checkedMetricChan
|
||||
umc := uncheckedMetricChan
|
||||
|
||||
for {
|
||||
select {
|
||||
case metric, ok := <-cmc:
|
||||
if !ok {
|
||||
cmc = nil
|
||||
break
|
||||
}
|
||||
errs.Append(processMetric(
|
||||
metric, metricFamiliesByName,
|
||||
metricHashes,
|
||||
registeredDescIDs,
|
||||
))
|
||||
case metric, ok := <-umc:
|
||||
if !ok {
|
||||
umc = nil
|
||||
break
|
||||
}
|
||||
errs.Append(processMetric(
|
||||
metric, metricFamiliesByName,
|
||||
metricHashes,
|
||||
nil,
|
||||
))
|
||||
default:
|
||||
if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
|
||||
// All collectors are already being worked on or
|
||||
// we have already as many goroutines started as
|
||||
// there are collectors. Do the same as above,
|
||||
// just without the default.
|
||||
select {
|
||||
case metric, ok := <-cmc:
|
||||
if !ok {
|
||||
cmc = nil
|
||||
break
|
||||
}
|
||||
errs.Append(processMetric(
|
||||
metric, metricFamiliesByName,
|
||||
metricHashes,
|
||||
registeredDescIDs,
|
||||
))
|
||||
case metric, ok := <-umc:
|
||||
if !ok {
|
||||
umc = nil
|
||||
break
|
||||
}
|
||||
errs.Append(processMetric(
|
||||
metric, metricFamiliesByName,
|
||||
metricHashes,
|
||||
nil,
|
||||
))
|
||||
}
|
||||
break
|
||||
}
|
||||
// Start more workers.
|
||||
go collectWorker()
|
||||
goroutineBudget--
|
||||
runtime.Gosched()
|
||||
}
|
||||
// Once both checkedMetricChan and uncheckdMetricChan are closed
|
||||
// and drained, the contraption above will nil out cmc and umc,
|
||||
// and then we can leave the collect loop here.
|
||||
if cmc == nil && umc == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
||||
}
|
||||
|
||||
// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
|
||||
// Prometheus text format, and writes it to a temporary file. Upon success, the
|
||||
// temporary file is renamed to the provided filename.
|
||||
//
|
||||
// This is intended for use with the textfile collector of the node exporter.
|
||||
// Note that the node exporter expects the filename to be suffixed with ".prom".
|
||||
func WriteToTextfile(filename string, g Gatherer) error {
|
||||
tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.Remove(tmp.Name())
|
||||
|
||||
mfs, err := g.Gather()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, mf := range mfs {
|
||||
if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tmp.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Chmod(tmp.Name(), 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Rename(tmp.Name(), filename)
|
||||
}
|
||||
|
||||
// processMetric is an internal helper method only used by the Gather method.
|
||||
func processMetric(
|
||||
metric Metric,
|
||||
metricFamiliesByName map[string]*dto.MetricFamily,
|
||||
metricHashes map[uint64]struct{},
|
||||
registeredDescIDs map[uint64]struct{},
|
||||
) error {
|
||||
desc := metric.Desc()
|
||||
// Wrapped metrics collected by an unchecked Collector can have an
|
||||
// invalid Desc.
|
||||
if desc.err != nil {
|
||||
return desc.err
|
||||
}
|
||||
dtoMetric := &dto.Metric{}
|
||||
if err := metric.Write(dtoMetric); err != nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"error collecting metric %v: %s", desc, err,
|
||||
))
|
||||
continue
|
||||
return fmt.Errorf("error collecting metric %v: %s", desc, err)
|
||||
}
|
||||
metricFamily, ok := metricFamiliesByName[desc.fqName]
|
||||
if ok {
|
||||
if ok { // Existing name.
|
||||
if metricFamily.GetHelp() != desc.help {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s has help %q but should have %q",
|
||||
desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
|
||||
))
|
||||
continue
|
||||
)
|
||||
}
|
||||
// TODO(beorn7): Simplify switch once Desc has type.
|
||||
switch metricFamily.GetType() {
|
||||
case dto.MetricType_COUNTER:
|
||||
if dtoMetric.Counter == nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s should be a Counter",
|
||||
desc.fqName, dtoMetric,
|
||||
))
|
||||
continue
|
||||
)
|
||||
}
|
||||
case dto.MetricType_GAUGE:
|
||||
if dtoMetric.Gauge == nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s should be a Gauge",
|
||||
desc.fqName, dtoMetric,
|
||||
))
|
||||
continue
|
||||
)
|
||||
}
|
||||
case dto.MetricType_SUMMARY:
|
||||
if dtoMetric.Summary == nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s should be a Summary",
|
||||
desc.fqName, dtoMetric,
|
||||
))
|
||||
continue
|
||||
)
|
||||
}
|
||||
case dto.MetricType_UNTYPED:
|
||||
if dtoMetric.Untyped == nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s should be Untyped",
|
||||
desc.fqName, dtoMetric,
|
||||
))
|
||||
continue
|
||||
)
|
||||
}
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
if dtoMetric.Histogram == nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s should be a Histogram",
|
||||
desc.fqName, dtoMetric,
|
||||
))
|
||||
continue
|
||||
)
|
||||
}
|
||||
default:
|
||||
panic("encountered MetricFamily with invalid type")
|
||||
}
|
||||
} else {
|
||||
} else { // New name.
|
||||
metricFamily = &dto.MetricFamily{}
|
||||
metricFamily.Name = proto.String(desc.fqName)
|
||||
metricFamily.Help = proto.String(desc.help)
|
||||
|
@ -535,40 +660,36 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
|||
case dtoMetric.Histogram != nil:
|
||||
metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
|
||||
default:
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"empty metric collected: %s", dtoMetric,
|
||||
))
|
||||
continue
|
||||
return fmt.Errorf("empty metric collected: %s", dtoMetric)
|
||||
}
|
||||
if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil {
|
||||
return err
|
||||
}
|
||||
metricFamiliesByName[desc.fqName] = metricFamily
|
||||
}
|
||||
if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.pedanticChecksEnabled {
|
||||
if registeredDescIDs != nil {
|
||||
// Is the desc registered at all?
|
||||
if _, exist := registeredDescIDs[desc.id]; !exist {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s with unregistered descriptor %s",
|
||||
metricFamily.GetName(), dtoMetric, desc,
|
||||
))
|
||||
continue
|
||||
)
|
||||
}
|
||||
if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
return err
|
||||
}
|
||||
}
|
||||
metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
|
||||
}
|
||||
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gatherers is a slice of Gatherer instances that implements the Gatherer
|
||||
// interface itself. Its Gather method calls Gather on all Gatherers in the
|
||||
// slice in order and returns the merged results. Errors returned from the
|
||||
// Gather calles are all returned in a flattened MultiError. Duplicate and
|
||||
// Gather calls are all returned in a flattened MultiError. Duplicate and
|
||||
// inconsistent Metrics are skipped (first occurrence in slice order wins) and
|
||||
// reported in the returned error.
|
||||
//
|
||||
|
@ -588,7 +709,6 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
|
|||
var (
|
||||
metricFamiliesByName = map[string]*dto.MetricFamily{}
|
||||
metricHashes = map[uint64]struct{}{}
|
||||
dimHashes = map[string]uint64{}
|
||||
errs MultiError // The collected errors to return in the end.
|
||||
)
|
||||
|
||||
|
@ -625,10 +745,14 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
|
|||
existingMF.Name = mf.Name
|
||||
existingMF.Help = mf.Help
|
||||
existingMF.Type = mf.Type
|
||||
if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
metricFamiliesByName[mf.GetName()] = existingMF
|
||||
}
|
||||
for _, m := range mf.Metric {
|
||||
if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil {
|
||||
if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
|
@ -636,88 +760,80 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
|
|||
}
|
||||
}
|
||||
}
|
||||
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
||||
return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
||||
}
|
||||
|
||||
// metricSorter is a sortable slice of *dto.Metric.
|
||||
type metricSorter []*dto.Metric
|
||||
|
||||
func (s metricSorter) Len() int {
|
||||
return len(s)
|
||||
// checkSuffixCollisions checks for collisions with the “magic” suffixes the
|
||||
// Prometheus text format and the internal metric representation of the
|
||||
// Prometheus server add while flattening Summaries and Histograms.
|
||||
func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error {
|
||||
var (
|
||||
newName = mf.GetName()
|
||||
newType = mf.GetType()
|
||||
newNameWithoutSuffix = ""
|
||||
)
|
||||
switch {
|
||||
case strings.HasSuffix(newName, "_count"):
|
||||
newNameWithoutSuffix = newName[:len(newName)-6]
|
||||
case strings.HasSuffix(newName, "_sum"):
|
||||
newNameWithoutSuffix = newName[:len(newName)-4]
|
||||
case strings.HasSuffix(newName, "_bucket"):
|
||||
newNameWithoutSuffix = newName[:len(newName)-7]
|
||||
}
|
||||
|
||||
func (s metricSorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
if newNameWithoutSuffix != "" {
|
||||
if existingMF, ok := mfs[newNameWithoutSuffix]; ok {
|
||||
switch existingMF.GetType() {
|
||||
case dto.MetricType_SUMMARY:
|
||||
if !strings.HasSuffix(newName, "_bucket") {
|
||||
return fmt.Errorf(
|
||||
"collected metric named %q collides with previously collected summary named %q",
|
||||
newName, newNameWithoutSuffix,
|
||||
)
|
||||
}
|
||||
|
||||
func (s metricSorter) Less(i, j int) bool {
|
||||
if len(s[i].Label) != len(s[j].Label) {
|
||||
// This should not happen. The metrics are
|
||||
// inconsistent. However, we have to deal with the fact, as
|
||||
// people might use custom collectors or metric family injection
|
||||
// to create inconsistent metrics. So let's simply compare the
|
||||
// number of labels in this case. That will still yield
|
||||
// reproducible sorting.
|
||||
return len(s[i].Label) < len(s[j].Label)
|
||||
}
|
||||
for n, lp := range s[i].Label {
|
||||
vi := lp.GetValue()
|
||||
vj := s[j].Label[n].GetValue()
|
||||
if vi != vj {
|
||||
return vi < vj
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
return fmt.Errorf(
|
||||
"collected metric named %q collides with previously collected histogram named %q",
|
||||
newName, newNameWithoutSuffix,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// We should never arrive here. Multiple metrics with the same
|
||||
// label set in the same scrape will lead to undefined ingestion
|
||||
// behavior. However, as above, we have to provide stable sorting
|
||||
// here, even for inconsistent metrics. So sort equal metrics
|
||||
// by their timestamp, with missing timestamps (implying "now")
|
||||
// coming last.
|
||||
if s[i].TimestampMs == nil {
|
||||
return false
|
||||
}
|
||||
if s[j].TimestampMs == nil {
|
||||
return true
|
||||
if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM {
|
||||
if _, ok := mfs[newName+"_count"]; ok {
|
||||
return fmt.Errorf(
|
||||
"collected histogram or summary named %q collides with previously collected metric named %q",
|
||||
newName, newName+"_count",
|
||||
)
|
||||
}
|
||||
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
|
||||
}
|
||||
|
||||
// normalizeMetricFamilies returns a MetricFamily slice whith empty
|
||||
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
|
||||
// the slice, with the contained Metrics sorted within each MetricFamily.
|
||||
func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
|
||||
for _, mf := range metricFamiliesByName {
|
||||
sort.Sort(metricSorter(mf.Metric))
|
||||
}
|
||||
names := make([]string, 0, len(metricFamiliesByName))
|
||||
for name, mf := range metricFamiliesByName {
|
||||
if len(mf.Metric) > 0 {
|
||||
names = append(names, name)
|
||||
if _, ok := mfs[newName+"_sum"]; ok {
|
||||
return fmt.Errorf(
|
||||
"collected histogram or summary named %q collides with previously collected metric named %q",
|
||||
newName, newName+"_sum",
|
||||
)
|
||||
}
|
||||
}
|
||||
sort.Strings(names)
|
||||
result := make([]*dto.MetricFamily, 0, len(names))
|
||||
for _, name := range names {
|
||||
result = append(result, metricFamiliesByName[name])
|
||||
if newType == dto.MetricType_HISTOGRAM {
|
||||
if _, ok := mfs[newName+"_bucket"]; ok {
|
||||
return fmt.Errorf(
|
||||
"collected histogram named %q collides with previously collected metric named %q",
|
||||
newName, newName+"_bucket",
|
||||
)
|
||||
}
|
||||
return result
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkMetricConsistency checks if the provided Metric is consistent with the
|
||||
// provided MetricFamily. It also hashed the Metric labels and the MetricFamily
|
||||
// name. If the resulting hash is alread in the provided metricHashes, an error
|
||||
// is returned. If not, it is added to metricHashes. The provided dimHashes maps
|
||||
// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
|
||||
// doesn't yet contain a hash for the provided MetricFamily, it is
|
||||
// added. Otherwise, an error is returned if the existing dimHashes in not equal
|
||||
// the calculated dimHash.
|
||||
// provided MetricFamily. It also hashes the Metric labels and the MetricFamily
|
||||
// name. If the resulting hash is already in the provided metricHashes, an error
|
||||
// is returned. If not, it is added to metricHashes.
|
||||
func checkMetricConsistency(
|
||||
metricFamily *dto.MetricFamily,
|
||||
dtoMetric *dto.Metric,
|
||||
metricHashes map[uint64]struct{},
|
||||
dimHashes map[string]uint64,
|
||||
) error {
|
||||
name := metricFamily.GetName()
|
||||
|
||||
// Type consistency with metric family.
|
||||
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
|
||||
metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
|
||||
|
@ -725,42 +841,67 @@ func checkMetricConsistency(
|
|||
metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
|
||||
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s is not a %s",
|
||||
metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
|
||||
"collected metric %q { %s} is not a %s",
|
||||
name, dtoMetric, metricFamily.GetType(),
|
||||
)
|
||||
}
|
||||
|
||||
// Is the metric unique (i.e. no other metric with the same name and the same label values)?
|
||||
h := hashNew()
|
||||
h = hashAdd(h, metricFamily.GetName())
|
||||
h = hashAddByte(h, separatorByte)
|
||||
dh := hashNew()
|
||||
previousLabelName := ""
|
||||
for _, labelPair := range dtoMetric.GetLabel() {
|
||||
labelName := labelPair.GetName()
|
||||
if labelName == previousLabelName {
|
||||
return fmt.Errorf(
|
||||
"collected metric %q { %s} has two or more labels with the same name: %s",
|
||||
name, dtoMetric, labelName,
|
||||
)
|
||||
}
|
||||
if !checkLabelName(labelName) {
|
||||
return fmt.Errorf(
|
||||
"collected metric %q { %s} has a label with an invalid name: %s",
|
||||
name, dtoMetric, labelName,
|
||||
)
|
||||
}
|
||||
if dtoMetric.Summary != nil && labelName == quantileLabel {
|
||||
return fmt.Errorf(
|
||||
"collected metric %q { %s} must not have an explicit %q label",
|
||||
name, dtoMetric, quantileLabel,
|
||||
)
|
||||
}
|
||||
if !utf8.ValidString(labelPair.GetValue()) {
|
||||
return fmt.Errorf(
|
||||
"collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
|
||||
name, dtoMetric, labelName, labelPair.GetValue())
|
||||
}
|
||||
previousLabelName = labelName
|
||||
}
|
||||
|
||||
// Is the metric unique (i.e. no other metric with the same name and the same labels)?
|
||||
h := xxhash.New()
|
||||
h.WriteString(name)
|
||||
h.Write(separatorByteSlice)
|
||||
// Make sure label pairs are sorted. We depend on it for the consistency
|
||||
// check.
|
||||
sort.Sort(LabelPairSorter(dtoMetric.Label))
|
||||
if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) {
|
||||
// We cannot sort dtoMetric.Label in place as it is immutable by contract.
|
||||
copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label))
|
||||
copy(copiedLabels, dtoMetric.Label)
|
||||
sort.Sort(labelPairSorter(copiedLabels))
|
||||
dtoMetric.Label = copiedLabels
|
||||
}
|
||||
for _, lp := range dtoMetric.Label {
|
||||
h = hashAdd(h, lp.GetValue())
|
||||
h = hashAddByte(h, separatorByte)
|
||||
dh = hashAdd(dh, lp.GetName())
|
||||
dh = hashAddByte(dh, separatorByte)
|
||||
h.WriteString(lp.GetName())
|
||||
h.Write(separatorByteSlice)
|
||||
h.WriteString(lp.GetValue())
|
||||
h.Write(separatorByteSlice)
|
||||
}
|
||||
if _, exists := metricHashes[h]; exists {
|
||||
hSum := h.Sum64()
|
||||
if _, exists := metricHashes[hSum]; exists {
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s was collected before with the same name and label values",
|
||||
metricFamily.GetName(), dtoMetric,
|
||||
"collected metric %q { %s} was collected before with the same name and label values",
|
||||
name, dtoMetric,
|
||||
)
|
||||
}
|
||||
if dimHash, ok := dimHashes[metricFamily.GetName()]; ok {
|
||||
if dimHash != dh {
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family",
|
||||
metricFamily.GetName(), dtoMetric,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
dimHashes[metricFamily.GetName()] = dh
|
||||
}
|
||||
metricHashes[h] = struct{}{}
|
||||
metricHashes[hSum] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -778,8 +919,8 @@ func checkDescConsistency(
|
|||
}
|
||||
|
||||
// Is the desc consistent with the content of the metric?
|
||||
lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))
|
||||
lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)
|
||||
lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
|
||||
copy(lpsFromDesc, desc.constLabelPairs)
|
||||
for _, l := range desc.variableLabels {
|
||||
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
|
||||
Name: proto.String(l),
|
||||
|
@ -791,7 +932,7 @@ func checkDescConsistency(
|
|||
metricFamily.GetName(), dtoMetric, desc,
|
||||
)
|
||||
}
|
||||
sort.Sort(LabelPairSorter(lpsFromDesc))
|
||||
sort.Sort(labelPairSorter(lpsFromDesc))
|
||||
for i, lpFromDesc := range lpsFromDesc {
|
||||
lpFromMetric := dtoMetric.Label[i]
|
||||
if lpFromDesc.GetName() != lpFromMetric.GetName() ||
|
||||
|
|
|
@ -16,8 +16,10 @@ package prometheus
|
|||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/beorn7/perks/quantile"
|
||||
|
@ -36,7 +38,10 @@ const quantileLabel = "quantile"
|
|||
//
|
||||
// A typical use-case is the observation of request latencies. By default, a
|
||||
// Summary provides the median, the 90th and the 99th percentile of the latency
|
||||
// as rank estimations.
|
||||
// as rank estimations. However, the default behavior will change in the
|
||||
// upcoming v1.0.0 of the library. There will be no rank estimations at all by
|
||||
// default. For a sane transition, it is recommended to set the desired rank
|
||||
// estimations explicitly.
|
||||
//
|
||||
// Note that the rank estimations cannot be aggregated in a meaningful way with
|
||||
// the Prometheus query language (i.e. you cannot average or add them). If you
|
||||
|
@ -53,14 +58,9 @@ type Summary interface {
|
|||
Observe(float64)
|
||||
}
|
||||
|
||||
// DefObjectives are the default Summary quantile values.
|
||||
var (
|
||||
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
|
||||
|
||||
errQuantileLabelNotAllowed = fmt.Errorf(
|
||||
var errQuantileLabelNotAllowed = fmt.Errorf(
|
||||
"%q is not allowed as label name in summaries", quantileLabel,
|
||||
)
|
||||
)
|
||||
|
||||
// Default values for SummaryOpts.
|
||||
const (
|
||||
|
@ -75,8 +75,10 @@ const (
|
|||
)
|
||||
|
||||
// SummaryOpts bundles the options for creating a Summary metric. It is
|
||||
// mandatory to set Name and Help to a non-empty string. All other fields are
|
||||
// optional and can safely be left at their zero value.
|
||||
// mandatory to set Name to a non-empty string. While all other fields are
|
||||
// optional and can safely be left at their zero value, it is recommended to set
|
||||
// a help string and to explicitly set the Objectives field to the desired value
|
||||
// as the default value will change in the upcoming v1.0.0 of the library.
|
||||
type SummaryOpts struct {
|
||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||
// name of the Summary (created by joining these components with
|
||||
|
@ -87,35 +89,34 @@ type SummaryOpts struct {
|
|||
Subsystem string
|
||||
Name string
|
||||
|
||||
// Help provides information about this Summary. Mandatory!
|
||||
// Help provides information about this Summary.
|
||||
//
|
||||
// Metrics with the same fully-qualified name must have the same Help
|
||||
// string.
|
||||
Help string
|
||||
|
||||
// ConstLabels are used to attach fixed labels to this
|
||||
// Summary. Summaries with the same fully-qualified name must have the
|
||||
// same label names in their ConstLabels.
|
||||
// ConstLabels are used to attach fixed labels to this metric. Metrics
|
||||
// with the same fully-qualified name must have the same label names in
|
||||
// their ConstLabels.
|
||||
//
|
||||
// Note that in most cases, labels have a value that varies during the
|
||||
// lifetime of a process. Those labels are usually managed with a
|
||||
// SummaryVec. ConstLabels serve only special purposes. One is for the
|
||||
// special case where the value of a label does not change during the
|
||||
// lifetime of a process, e.g. if the revision of the running binary is
|
||||
// put into a label. Another, more advanced purpose is if more than one
|
||||
// Collector needs to collect Summaries with the same fully-qualified
|
||||
// name. In that case, those Summaries must differ in the values of
|
||||
// their ConstLabels. See the Collector examples.
|
||||
// Due to the way a Summary is represented in the Prometheus text format
|
||||
// and how it is handled by the Prometheus server internally, “quantile”
|
||||
// is an illegal label name. Construction of a Summary or SummaryVec
|
||||
// will panic if this label name is used in ConstLabels.
|
||||
//
|
||||
// If the value of a label never changes (not even between binaries),
|
||||
// that label most likely should not be a label at all (but part of the
|
||||
// metric name).
|
||||
// ConstLabels are only used rarely. In particular, do not use them to
|
||||
// attach the same labels to all your metrics. Those use cases are
|
||||
// better covered by target labels set by the scraping Prometheus
|
||||
// server, or by one specific metric (e.g. a build_info or a
|
||||
// machine_role metric). See also
|
||||
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
|
||||
ConstLabels Labels
|
||||
|
||||
// Objectives defines the quantile rank estimates with their respective
|
||||
// absolute error. If Objectives[q] = e, then the value reported
|
||||
// for q will be the φ-quantile value for some φ between q-e and q+e.
|
||||
// The default value is DefObjectives.
|
||||
// absolute error. If Objectives[q] = e, then the value reported for q
|
||||
// will be the φ-quantile value for some φ between q-e and q+e. The
|
||||
// default value is an empty map, resulting in a summary without
|
||||
// quantiles.
|
||||
Objectives map[float64]float64
|
||||
|
||||
// MaxAge defines the duration for which an observation stays relevant
|
||||
|
@ -139,7 +140,7 @@ type SummaryOpts struct {
|
|||
BufCap uint32
|
||||
}
|
||||
|
||||
// Great fuck-up with the sliding-window decay algorithm... The Merge method of
|
||||
// Problem with the sliding-window decay algorithm... The Merge method of
|
||||
// perk/quantile is actually not working as advertised - and it might be
|
||||
// unfixable, as the underlying algorithm is apparently not capable of merging
|
||||
// summaries in the first place. To avoid using Merge, we are currently adding
|
||||
|
@ -169,7 +170,7 @@ func NewSummary(opts SummaryOpts) Summary {
|
|||
|
||||
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
||||
if len(desc.variableLabels) != len(labelValues) {
|
||||
panic(errInconsistentCardinality)
|
||||
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
|
||||
}
|
||||
|
||||
for _, n := range desc.variableLabels {
|
||||
|
@ -183,8 +184,8 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
|||
}
|
||||
}
|
||||
|
||||
if len(opts.Objectives) == 0 {
|
||||
opts.Objectives = DefObjectives
|
||||
if opts.Objectives == nil {
|
||||
opts.Objectives = map[float64]float64{}
|
||||
}
|
||||
|
||||
if opts.MaxAge < 0 {
|
||||
|
@ -202,6 +203,17 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
|||
opts.BufCap = DefBufCap
|
||||
}
|
||||
|
||||
if len(opts.Objectives) == 0 {
|
||||
// Use the lock-free implementation of a Summary without objectives.
|
||||
s := &noObjectivesSummary{
|
||||
desc: desc,
|
||||
labelPairs: makeLabelPairs(desc, labelValues),
|
||||
counts: [2]*summaryCounts{{}, {}},
|
||||
}
|
||||
s.init(s) // Init self-collection.
|
||||
return s
|
||||
}
|
||||
|
||||
s := &summary{
|
||||
desc: desc,
|
||||
|
||||
|
@ -370,6 +382,116 @@ func (s *summary) swapBufs(now time.Time) {
|
|||
}
|
||||
}
|
||||
|
||||
type summaryCounts struct {
|
||||
// sumBits contains the bits of the float64 representing the sum of all
|
||||
// observations. sumBits and count have to go first in the struct to
|
||||
// guarantee alignment for atomic operations.
|
||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
sumBits uint64
|
||||
count uint64
|
||||
}
|
||||
|
||||
type noObjectivesSummary struct {
|
||||
// countAndHotIdx enables lock-free writes with use of atomic updates.
|
||||
// The most significant bit is the hot index [0 or 1] of the count field
|
||||
// below. Observe calls update the hot one. All remaining bits count the
|
||||
// number of Observe calls. Observe starts by incrementing this counter,
|
||||
// and finish by incrementing the count field in the respective
|
||||
// summaryCounts, as a marker for completion.
|
||||
//
|
||||
// Calls of the Write method (which are non-mutating reads from the
|
||||
// perspective of the summary) swap the hot–cold under the writeMtx
|
||||
// lock. A cooldown is awaited (while locked) by comparing the number of
|
||||
// observations with the initiation count. Once they match, then the
|
||||
// last observation on the now cool one has completed. All cool fields must
|
||||
// be merged into the new hot before releasing writeMtx.
|
||||
|
||||
// Fields with atomic access first! See alignment constraint:
|
||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
countAndHotIdx uint64
|
||||
|
||||
selfCollector
|
||||
desc *Desc
|
||||
writeMtx sync.Mutex // Only used in the Write method.
|
||||
|
||||
// Two counts, one is "hot" for lock-free observations, the other is
|
||||
// "cold" for writing out a dto.Metric. It has to be an array of
|
||||
// pointers to guarantee 64bit alignment of the histogramCounts, see
|
||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
|
||||
counts [2]*summaryCounts
|
||||
|
||||
labelPairs []*dto.LabelPair
|
||||
}
|
||||
|
||||
func (s *noObjectivesSummary) Desc() *Desc {
|
||||
return s.desc
|
||||
}
|
||||
|
||||
func (s *noObjectivesSummary) Observe(v float64) {
|
||||
// We increment h.countAndHotIdx so that the counter in the lower
|
||||
// 63 bits gets incremented. At the same time, we get the new value
|
||||
// back, which we can use to find the currently-hot counts.
|
||||
n := atomic.AddUint64(&s.countAndHotIdx, 1)
|
||||
hotCounts := s.counts[n>>63]
|
||||
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Increment count last as we take it as a signal that the observation
|
||||
// is complete.
|
||||
atomic.AddUint64(&hotCounts.count, 1)
|
||||
}
|
||||
|
||||
func (s *noObjectivesSummary) Write(out *dto.Metric) error {
|
||||
// For simplicity, we protect this whole method by a mutex. It is not in
|
||||
// the hot path, i.e. Observe is called much more often than Write. The
|
||||
// complication of making Write lock-free isn't worth it, if possible at
|
||||
// all.
|
||||
s.writeMtx.Lock()
|
||||
defer s.writeMtx.Unlock()
|
||||
|
||||
// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
|
||||
// without touching the count bits. See the struct comments for a full
|
||||
// description of the algorithm.
|
||||
n := atomic.AddUint64(&s.countAndHotIdx, 1<<63)
|
||||
// count is contained unchanged in the lower 63 bits.
|
||||
count := n & ((1 << 63) - 1)
|
||||
// The most significant bit tells us which counts is hot. The complement
|
||||
// is thus the cold one.
|
||||
hotCounts := s.counts[n>>63]
|
||||
coldCounts := s.counts[(^n)>>63]
|
||||
|
||||
// Await cooldown.
|
||||
for count != atomic.LoadUint64(&coldCounts.count) {
|
||||
runtime.Gosched() // Let observations get work done.
|
||||
}
|
||||
|
||||
sum := &dto.Summary{
|
||||
SampleCount: proto.Uint64(count),
|
||||
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
|
||||
}
|
||||
|
||||
out.Summary = sum
|
||||
out.Label = s.labelPairs
|
||||
|
||||
// Finally add all the cold counts to the new hot counts and reset the cold counts.
|
||||
atomic.AddUint64(&hotCounts.count, count)
|
||||
atomic.StoreUint64(&coldCounts.count, 0)
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
|
||||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||
atomic.StoreUint64(&coldCounts.sumBits, 0)
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type quantSort []*dto.Quantile
|
||||
|
||||
func (s quantSort) Len() int {
|
||||
|
@ -390,13 +512,21 @@ func (s quantSort) Less(i, j int) bool {
|
|||
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
||||
// instances with NewSummaryVec.
|
||||
type SummaryVec struct {
|
||||
*MetricVec
|
||||
*metricVec
|
||||
}
|
||||
|
||||
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
|
||||
// partitioned by the given label names. At least one label name must be
|
||||
// provided.
|
||||
// partitioned by the given label names.
|
||||
//
|
||||
// Due to the way a Summary is represented in the Prometheus text format and how
|
||||
// it is handled by the Prometheus server internally, “quantile” is an illegal
|
||||
// label name. NewSummaryVec will panic if this label name is used.
|
||||
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
||||
for _, ln := range labelNames {
|
||||
if ln == quantileLabel {
|
||||
panic(errQuantileLabelNotAllowed)
|
||||
}
|
||||
}
|
||||
desc := NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
|
@ -404,47 +534,116 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
|||
opts.ConstLabels,
|
||||
)
|
||||
return &SummaryVec{
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
return newSummary(desc, opts, lvs...)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetricWithLabelValues replaces the method of the same name in
|
||||
// MetricVec. The difference is that this method returns a Summary and not a
|
||||
// Metric so that no type conversion is required.
|
||||
func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
|
||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
||||
// GetMetricWithLabelValues returns the Summary for the given slice of label
|
||||
// values (same order as the VariableLabels in Desc). If that combination of
|
||||
// label values is accessed for the first time, a new Summary is created.
|
||||
//
|
||||
// It is possible to call this method without using the returned Summary to only
|
||||
// create the new Summary but leave it at its starting value, a Summary without
|
||||
// any observations.
|
||||
//
|
||||
// Keeping the Summary for later use is possible (and should be considered if
|
||||
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
||||
// Delete can be used to delete the Summary from the SummaryVec. In that case,
|
||||
// the Summary will still exist, but it will not be exported anymore, even if a
|
||||
// Summary with the same label values is created later. See also the CounterVec
|
||||
// example.
|
||||
//
|
||||
// An error is returned if the number of label values is not the same as the
|
||||
// number of VariableLabels in Desc (minus any curried labels).
|
||||
//
|
||||
// Note that for more than one label value, this method is prone to mistakes
|
||||
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
||||
// an alternative to avoid that type of mistake. For higher label numbers, the
|
||||
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||
// with a performance overhead (for creating and processing the Labels map).
|
||||
// See also the GaugeVec example.
|
||||
func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
|
||||
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
|
||||
if metric != nil {
|
||||
return metric.(Summary), err
|
||||
return metric.(Observer), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
||||
// difference is that this method returns a Summary and not a Metric so that no
|
||||
// type conversion is required.
|
||||
func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
|
||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
||||
// GetMetricWith returns the Summary for the given Labels map (the label names
|
||||
// must match those of the VariableLabels in Desc). If that label map is
|
||||
// accessed for the first time, a new Summary is created. Implications of
|
||||
// creating a Summary without using it and keeping the Summary for later use are
|
||||
// the same as for GetMetricWithLabelValues.
|
||||
//
|
||||
// An error is returned if the number and names of the Labels are inconsistent
|
||||
// with those of the VariableLabels in Desc (minus any curried labels).
|
||||
//
|
||||
// This method is used for the same purpose as
|
||||
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||
// methods.
|
||||
func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
|
||||
metric, err := v.metricVec.getMetricWith(labels)
|
||||
if metric != nil {
|
||||
return metric.(Summary), err
|
||||
return metric.(Observer), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
||||
// error, WithLabelValues allows shortcuts like
|
||||
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||
// error allows shortcuts like
|
||||
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
||||
func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
|
||||
return m.MetricVec.WithLabelValues(lvs...).(Summary)
|
||||
func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
|
||||
s, err := v.GetMetricWithLabelValues(lvs...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||
// returned an error. By not returning an error, With allows shortcuts like
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
||||
func (m *SummaryVec) With(labels Labels) Summary {
|
||||
return m.MetricVec.With(labels).(Summary)
|
||||
// returned an error. Not returning an error allows shortcuts like
|
||||
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
||||
func (v *SummaryVec) With(labels Labels) Observer {
|
||||
s, err := v.GetMetricWith(labels)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// CurryWith returns a vector curried with the provided labels, i.e. the
|
||||
// returned vector has those labels pre-set for all labeled operations performed
|
||||
// on it. The cardinality of the curried vector is reduced accordingly. The
|
||||
// order of the remaining labels stays the same (just with the curried labels
|
||||
// taken out of the sequence – which is relevant for the
|
||||
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
|
||||
// vector, but only with labels not yet used for currying before.
|
||||
//
|
||||
// The metrics contained in the SummaryVec are shared between the curried and
|
||||
// uncurried vectors. They are just accessed differently. Curried and uncurried
|
||||
// vectors behave identically in terms of collection. Only one must be
|
||||
// registered with a given registry (usually the uncurried version). The Reset
|
||||
// method deletes all metrics, even if called on a curried vector.
|
||||
func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
|
||||
vec, err := v.curryWith(labels)
|
||||
if vec != nil {
|
||||
return &SummaryVec{vec}, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// MustCurryWith works as CurryWith but panics where CurryWith would have
|
||||
// returned an error.
|
||||
func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec {
|
||||
vec, err := v.CurryWith(labels)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return vec
|
||||
}
|
||||
|
||||
type constSummary struct {
|
||||
|
@ -497,7 +696,7 @@ func (s *constSummary) Write(out *dto.Metric) error {
|
|||
// map[float64]float64{0.5: 0.23, 0.99: 0.56}
|
||||
//
|
||||
// NewConstSummary returns an error if the length of labelValues is not
|
||||
// consistent with the variable labels in Desc.
|
||||
// consistent with the variable labels in Desc or if Desc is invalid.
|
||||
func NewConstSummary(
|
||||
desc *Desc,
|
||||
count uint64,
|
||||
|
@ -505,8 +704,11 @@ func NewConstSummary(
|
|||
quantiles map[float64]float64,
|
||||
labelValues ...string,
|
||||
) (Metric, error) {
|
||||
if len(desc.variableLabels) != len(labelValues) {
|
||||
return nil, errInconsistentCardinality
|
||||
if desc.err != nil {
|
||||
return nil, desc.err
|
||||
}
|
||||
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &constSummary{
|
||||
desc: desc,
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import "time"
|
||||
|
||||
// Timer is a helper type to time functions. Use NewTimer to create new
|
||||
// instances.
|
||||
type Timer struct {
|
||||
begin time.Time
|
||||
observer Observer
|
||||
}
|
||||
|
||||
// NewTimer creates a new Timer. The provided Observer is used to observe a
|
||||
// duration in seconds. Timer is usually used to time a function call in the
|
||||
// following way:
|
||||
// func TimeMe() {
|
||||
// timer := NewTimer(myHistogram)
|
||||
// defer timer.ObserveDuration()
|
||||
// // Do actual work.
|
||||
// }
|
||||
func NewTimer(o Observer) *Timer {
|
||||
return &Timer{
|
||||
begin: time.Now(),
|
||||
observer: o,
|
||||
}
|
||||
}
|
||||
|
||||
// ObserveDuration records the duration passed since the Timer was created with
|
||||
// NewTimer. It calls the Observe method of the Observer provided during
|
||||
// construction with the duration in seconds as an argument. The observed
|
||||
// duration is also returned. ObserveDuration is usually called with a defer
|
||||
// statement.
|
||||
//
|
||||
// Note that this method is only guaranteed to never observe negative durations
|
||||
// if used with Go1.9+.
|
||||
func (t *Timer) ObserveDuration() time.Duration {
|
||||
d := time.Since(t.begin)
|
||||
if t.observer != nil {
|
||||
t.observer.Observe(d.Seconds())
|
||||
}
|
||||
return d
|
||||
}
|
|
@ -13,108 +13,12 @@
|
|||
|
||||
package prometheus
|
||||
|
||||
// Untyped is a Metric that represents a single numerical value that can
|
||||
// arbitrarily go up and down.
|
||||
//
|
||||
// An Untyped metric works the same as a Gauge. The only difference is that to
|
||||
// no type information is implied.
|
||||
//
|
||||
// To create Untyped instances, use NewUntyped.
|
||||
type Untyped interface {
|
||||
Metric
|
||||
Collector
|
||||
|
||||
// Set sets the Untyped metric to an arbitrary value.
|
||||
Set(float64)
|
||||
// Inc increments the Untyped metric by 1.
|
||||
Inc()
|
||||
// Dec decrements the Untyped metric by 1.
|
||||
Dec()
|
||||
// Add adds the given value to the Untyped metric. (The value can be
|
||||
// negative, resulting in a decrease.)
|
||||
Add(float64)
|
||||
// Sub subtracts the given value from the Untyped metric. (The value can
|
||||
// be negative, resulting in an increase.)
|
||||
Sub(float64)
|
||||
}
|
||||
|
||||
// UntypedOpts is an alias for Opts. See there for doc comments.
|
||||
type UntypedOpts Opts
|
||||
|
||||
// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
|
||||
func NewUntyped(opts UntypedOpts) Untyped {
|
||||
return newValue(NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
nil,
|
||||
opts.ConstLabels,
|
||||
), UntypedValue, 0)
|
||||
}
|
||||
|
||||
// UntypedVec is a Collector that bundles a set of Untyped metrics that all
|
||||
// share the same Desc, but have different values for their variable
|
||||
// labels. This is used if you want to count the same thing partitioned by
|
||||
// various dimensions. Create instances with NewUntypedVec.
|
||||
type UntypedVec struct {
|
||||
*MetricVec
|
||||
}
|
||||
|
||||
// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
|
||||
// partitioned by the given label names. At least one label name must be
|
||||
// provided.
|
||||
func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
|
||||
desc := NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
labelNames,
|
||||
opts.ConstLabels,
|
||||
)
|
||||
return &UntypedVec{
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
return newValue(desc, UntypedValue, 0, lvs...)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetricWithLabelValues replaces the method of the same name in
|
||||
// MetricVec. The difference is that this method returns an Untyped and not a
|
||||
// Metric so that no type conversion is required.
|
||||
func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
|
||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
||||
if metric != nil {
|
||||
return metric.(Untyped), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
||||
// difference is that this method returns an Untyped and not a Metric so that no
|
||||
// type conversion is required.
|
||||
func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
|
||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
||||
if metric != nil {
|
||||
return metric.(Untyped), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
||||
// error, WithLabelValues allows shortcuts like
|
||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||
func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
|
||||
return m.MetricVec.WithLabelValues(lvs...).(Untyped)
|
||||
}
|
||||
|
||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||
// returned an error. By not returning an error, With allows shortcuts like
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
func (m *UntypedVec) With(labels Labels) Untyped {
|
||||
return m.MetricVec.With(labels).(Untyped)
|
||||
}
|
||||
|
||||
// UntypedFunc is an Untyped whose value is determined at collect time by
|
||||
// calling a provided function.
|
||||
// UntypedFunc works like GaugeFunc but the collected metric is of type
|
||||
// "Untyped". UntypedFunc is useful to mirror an external metric of unknown
|
||||
// type.
|
||||
//
|
||||
// To create UntypedFunc instances, use NewUntypedFunc.
|
||||
type UntypedFunc interface {
|
||||
|
|
|
@ -14,21 +14,22 @@
|
|||
package prometheus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// ValueType is an enumeration of metric types that represent a simple value.
|
||||
type ValueType int
|
||||
|
||||
// Possible values for the ValueType enum.
|
||||
// Possible values for the ValueType enum. Use UntypedValue to mark a metric
|
||||
// with an unknown type.
|
||||
const (
|
||||
_ ValueType = iota
|
||||
CounterValue
|
||||
|
@ -36,77 +37,6 @@ const (
|
|||
UntypedValue
|
||||
)
|
||||
|
||||
var errInconsistentCardinality = errors.New("inconsistent label cardinality")
|
||||
|
||||
// value is a generic metric for simple values. It implements Metric, Collector,
|
||||
// Counter, Gauge, and Untyped. Its effective type is determined by
|
||||
// ValueType. This is a low-level building block used by the library to back the
|
||||
// implementations of Counter, Gauge, and Untyped.
|
||||
type value struct {
|
||||
// valBits containst the bits of the represented float64 value. It has
|
||||
// to go first in the struct to guarantee alignment for atomic
|
||||
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
valBits uint64
|
||||
|
||||
selfCollector
|
||||
|
||||
desc *Desc
|
||||
valType ValueType
|
||||
labelPairs []*dto.LabelPair
|
||||
}
|
||||
|
||||
// newValue returns a newly allocated value with the given Desc, ValueType,
|
||||
// sample value and label values. It panics if the number of label
|
||||
// values is different from the number of variable labels in Desc.
|
||||
func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
|
||||
if len(labelValues) != len(desc.variableLabels) {
|
||||
panic(errInconsistentCardinality)
|
||||
}
|
||||
result := &value{
|
||||
desc: desc,
|
||||
valType: valueType,
|
||||
valBits: math.Float64bits(val),
|
||||
labelPairs: makeLabelPairs(desc, labelValues),
|
||||
}
|
||||
result.init(result)
|
||||
return result
|
||||
}
|
||||
|
||||
func (v *value) Desc() *Desc {
|
||||
return v.desc
|
||||
}
|
||||
|
||||
func (v *value) Set(val float64) {
|
||||
atomic.StoreUint64(&v.valBits, math.Float64bits(val))
|
||||
}
|
||||
|
||||
func (v *value) Inc() {
|
||||
v.Add(1)
|
||||
}
|
||||
|
||||
func (v *value) Dec() {
|
||||
v.Add(-1)
|
||||
}
|
||||
|
||||
func (v *value) Add(val float64) {
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&v.valBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
|
||||
if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (v *value) Sub(val float64) {
|
||||
v.Add(val * -1)
|
||||
}
|
||||
|
||||
func (v *value) Write(out *dto.Metric) error {
|
||||
val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
|
||||
return populateMetric(v.valType, val, v.labelPairs, out)
|
||||
}
|
||||
|
||||
// valueFunc is a generic metric for simple values retrieved on collect time
|
||||
// from a function. It implements Metric and Collector. Its effective type is
|
||||
// determined by ValueType. This is a low-level building block used by the
|
||||
|
@ -143,7 +73,7 @@ func (v *valueFunc) Desc() *Desc {
|
|||
}
|
||||
|
||||
func (v *valueFunc) Write(out *dto.Metric) error {
|
||||
return populateMetric(v.valType, v.function(), v.labelPairs, out)
|
||||
return populateMetric(v.valType, v.function(), v.labelPairs, nil, out)
|
||||
}
|
||||
|
||||
// NewConstMetric returns a metric with one fixed value that cannot be
|
||||
|
@ -151,10 +81,14 @@ func (v *valueFunc) Write(out *dto.Metric) error {
|
|||
// operations. However, when implementing custom Collectors, it is useful as a
|
||||
// throw-away metric that is generated on the fly to send it to Prometheus in
|
||||
// the Collect method. NewConstMetric returns an error if the length of
|
||||
// labelValues is not consistent with the variable labels in Desc.
|
||||
// labelValues is not consistent with the variable labels in Desc or if Desc is
|
||||
// invalid.
|
||||
func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
|
||||
if len(desc.variableLabels) != len(labelValues) {
|
||||
return nil, errInconsistentCardinality
|
||||
if desc.err != nil {
|
||||
return nil, desc.err
|
||||
}
|
||||
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &constMetric{
|
||||
desc: desc,
|
||||
|
@ -186,19 +120,20 @@ func (m *constMetric) Desc() *Desc {
|
|||
}
|
||||
|
||||
func (m *constMetric) Write(out *dto.Metric) error {
|
||||
return populateMetric(m.valType, m.val, m.labelPairs, out)
|
||||
return populateMetric(m.valType, m.val, m.labelPairs, nil, out)
|
||||
}
|
||||
|
||||
func populateMetric(
|
||||
t ValueType,
|
||||
v float64,
|
||||
labelPairs []*dto.LabelPair,
|
||||
e *dto.Exemplar,
|
||||
m *dto.Metric,
|
||||
) error {
|
||||
m.Label = labelPairs
|
||||
switch t {
|
||||
case CounterValue:
|
||||
m.Counter = &dto.Counter{Value: proto.Float64(v)}
|
||||
m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e}
|
||||
case GaugeValue:
|
||||
m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
|
||||
case UntypedValue:
|
||||
|
@ -226,9 +161,44 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
|
|||
Value: proto.String(labelValues[i]),
|
||||
})
|
||||
}
|
||||
for _, lp := range desc.constLabelPairs {
|
||||
labelPairs = append(labelPairs, lp)
|
||||
}
|
||||
sort.Sort(LabelPairSorter(labelPairs))
|
||||
labelPairs = append(labelPairs, desc.constLabelPairs...)
|
||||
sort.Sort(labelPairSorter(labelPairs))
|
||||
return labelPairs
|
||||
}
|
||||
|
||||
// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
|
||||
const ExemplarMaxRunes = 64
|
||||
|
||||
// newExemplar creates a new dto.Exemplar from the provided values. An error is
|
||||
// returned if any of the label names or values are invalid or if the total
|
||||
// number of runes in the label names and values exceeds ExemplarMaxRunes.
|
||||
func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
|
||||
e := &dto.Exemplar{}
|
||||
e.Value = proto.Float64(value)
|
||||
tsProto, err := ptypes.TimestampProto(ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e.Timestamp = tsProto
|
||||
labelPairs := make([]*dto.LabelPair, 0, len(l))
|
||||
var runes int
|
||||
for name, value := range l {
|
||||
if !checkLabelName(name) {
|
||||
return nil, fmt.Errorf("exemplar label name %q is invalid", name)
|
||||
}
|
||||
runes += utf8.RuneCountInString(name)
|
||||
if !utf8.ValidString(value) {
|
||||
return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value)
|
||||
}
|
||||
runes += utf8.RuneCountInString(value)
|
||||
labelPairs = append(labelPairs, &dto.LabelPair{
|
||||
Name: proto.String(name),
|
||||
Value: proto.String(value),
|
||||
})
|
||||
}
|
||||
if runes > ExemplarMaxRunes {
|
||||
return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes)
|
||||
}
|
||||
e.Label = labelPairs
|
||||
return e, nil
|
||||
}
|
||||
|
|
|
@ -20,33 +20,192 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
// MetricVec is a Collector to bundle metrics of the same name that
|
||||
// differ in their label values. MetricVec is usually not used directly but as a
|
||||
// building block for implementations of vectors of a given metric
|
||||
// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
|
||||
// provided in this package.
|
||||
type MetricVec struct {
|
||||
mtx sync.RWMutex // Protects the children.
|
||||
children map[uint64][]metricWithLabelValues
|
||||
desc *Desc
|
||||
// metricVec is a Collector to bundle metrics of the same name that differ in
|
||||
// their label values. metricVec is not used directly (and therefore
|
||||
// unexported). It is used as a building block for implementations of vectors of
|
||||
// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec.
|
||||
// It also handles label currying.
|
||||
type metricVec struct {
|
||||
*metricMap
|
||||
|
||||
newMetric func(labelValues ...string) Metric
|
||||
hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
|
||||
curry []curriedLabelValue
|
||||
|
||||
// hashAdd and hashAddByte can be replaced for testing collision handling.
|
||||
hashAdd func(h uint64, s string) uint64
|
||||
hashAddByte func(h uint64, b byte) uint64
|
||||
}
|
||||
|
||||
// newMetricVec returns an initialized MetricVec. The concrete value is
|
||||
// returned for embedding into another struct.
|
||||
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
|
||||
return &MetricVec{
|
||||
children: map[uint64][]metricWithLabelValues{},
|
||||
// newMetricVec returns an initialized metricVec.
|
||||
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {
|
||||
return &metricVec{
|
||||
metricMap: &metricMap{
|
||||
metrics: map[uint64][]metricWithLabelValues{},
|
||||
desc: desc,
|
||||
newMetric: newMetric,
|
||||
},
|
||||
hashAdd: hashAdd,
|
||||
hashAddByte: hashAddByte,
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteLabelValues removes the metric where the variable labels are the same
|
||||
// as those passed in as labels (same order as the VariableLabels in Desc). It
|
||||
// returns true if a metric was deleted.
|
||||
//
|
||||
// It is not an error if the number of label values is not the same as the
|
||||
// number of VariableLabels in Desc. However, such inconsistent label count can
|
||||
// never match an actual metric, so the method will always return false in that
|
||||
// case.
|
||||
//
|
||||
// Note that for more than one label value, this method is prone to mistakes
|
||||
// caused by an incorrect order of arguments. Consider Delete(Labels) as an
|
||||
// alternative to avoid that type of mistake. For higher label numbers, the
|
||||
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||
// with a performance overhead (for creating and processing the Labels map).
|
||||
// See also the CounterVec example.
|
||||
func (m *metricVec) DeleteLabelValues(lvs ...string) bool {
|
||||
h, err := m.hashLabelValues(lvs)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
|
||||
}
|
||||
|
||||
// Delete deletes the metric where the variable labels are the same as those
|
||||
// passed in as labels. It returns true if a metric was deleted.
|
||||
//
|
||||
// It is not an error if the number and names of the Labels are inconsistent
|
||||
// with those of the VariableLabels in Desc. However, such inconsistent Labels
|
||||
// can never match an actual metric, so the method will always return false in
|
||||
// that case.
|
||||
//
|
||||
// This method is used for the same purpose as DeleteLabelValues(...string). See
|
||||
// there for pros and cons of the two methods.
|
||||
func (m *metricVec) Delete(labels Labels) bool {
|
||||
h, err := m.hashLabels(labels)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
|
||||
}
|
||||
|
||||
// Without explicit forwarding of Describe, Collect, Reset, those methods won't
|
||||
// show up in GoDoc.
|
||||
|
||||
// Describe implements Collector.
|
||||
func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
|
||||
|
||||
// Collect implements Collector.
|
||||
func (m *metricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
|
||||
|
||||
// Reset deletes all metrics in this vector.
|
||||
func (m *metricVec) Reset() { m.metricMap.Reset() }
|
||||
|
||||
func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
|
||||
var (
|
||||
newCurry []curriedLabelValue
|
||||
oldCurry = m.curry
|
||||
iCurry int
|
||||
)
|
||||
for i, label := range m.desc.variableLabels {
|
||||
val, ok := labels[label]
|
||||
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
|
||||
if ok {
|
||||
return nil, fmt.Errorf("label name %q is already curried", label)
|
||||
}
|
||||
newCurry = append(newCurry, oldCurry[iCurry])
|
||||
iCurry++
|
||||
} else {
|
||||
if !ok {
|
||||
continue // Label stays uncurried.
|
||||
}
|
||||
newCurry = append(newCurry, curriedLabelValue{i, val})
|
||||
}
|
||||
}
|
||||
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
|
||||
return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
|
||||
}
|
||||
|
||||
return &metricVec{
|
||||
metricMap: m.metricMap,
|
||||
curry: newCurry,
|
||||
hashAdd: m.hashAdd,
|
||||
hashAddByte: m.hashAddByte,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {
|
||||
h, err := m.hashLabelValues(lvs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
|
||||
}
|
||||
|
||||
func (m *metricVec) getMetricWith(labels Labels) (Metric, error) {
|
||||
h, err := m.hashLabels(labels)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
|
||||
}
|
||||
|
||||
func (m *metricVec) hashLabelValues(vals []string) (uint64, error) {
|
||||
if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var (
|
||||
h = hashNew()
|
||||
curry = m.curry
|
||||
iVals, iCurry int
|
||||
)
|
||||
for i := 0; i < len(m.desc.variableLabels); i++ {
|
||||
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||
h = m.hashAdd(h, curry[iCurry].value)
|
||||
iCurry++
|
||||
} else {
|
||||
h = m.hashAdd(h, vals[iVals])
|
||||
iVals++
|
||||
}
|
||||
h = m.hashAddByte(h, model.SeparatorByte)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (m *metricVec) hashLabels(labels Labels) (uint64, error) {
|
||||
if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var (
|
||||
h = hashNew()
|
||||
curry = m.curry
|
||||
iCurry int
|
||||
)
|
||||
for i, label := range m.desc.variableLabels {
|
||||
val, ok := labels[label]
|
||||
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||
if ok {
|
||||
return 0, fmt.Errorf("label name %q is already curried", label)
|
||||
}
|
||||
h = m.hashAdd(h, curry[iCurry].value)
|
||||
iCurry++
|
||||
} else {
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("label name %q missing in label map", label)
|
||||
}
|
||||
h = m.hashAdd(h, val)
|
||||
}
|
||||
h = m.hashAddByte(h, model.SeparatorByte)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// metricWithLabelValues provides the metric and its label values for
|
||||
// disambiguation on hash collision.
|
||||
type metricWithLabelValues struct {
|
||||
|
@ -54,166 +213,72 @@ type metricWithLabelValues struct {
|
|||
metric Metric
|
||||
}
|
||||
|
||||
// Describe implements Collector. The length of the returned slice
|
||||
// is always one.
|
||||
func (m *MetricVec) Describe(ch chan<- *Desc) {
|
||||
// curriedLabelValue sets the curried value for a label at the given index.
|
||||
type curriedLabelValue struct {
|
||||
index int
|
||||
value string
|
||||
}
|
||||
|
||||
// metricMap is a helper for metricVec and shared between differently curried
|
||||
// metricVecs.
|
||||
type metricMap struct {
|
||||
mtx sync.RWMutex // Protects metrics.
|
||||
metrics map[uint64][]metricWithLabelValues
|
||||
desc *Desc
|
||||
newMetric func(labelValues ...string) Metric
|
||||
}
|
||||
|
||||
// Describe implements Collector. It will send exactly one Desc to the provided
|
||||
// channel.
|
||||
func (m *metricMap) Describe(ch chan<- *Desc) {
|
||||
ch <- m.desc
|
||||
}
|
||||
|
||||
// Collect implements Collector.
|
||||
func (m *MetricVec) Collect(ch chan<- Metric) {
|
||||
func (m *metricMap) Collect(ch chan<- Metric) {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
|
||||
for _, metrics := range m.children {
|
||||
for _, metrics := range m.metrics {
|
||||
for _, metric := range metrics {
|
||||
ch <- metric.metric
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetricWithLabelValues returns the Metric for the given slice of label
|
||||
// values (same order as the VariableLabels in Desc). If that combination of
|
||||
// label values is accessed for the first time, a new Metric is created.
|
||||
//
|
||||
// It is possible to call this method without using the returned Metric to only
|
||||
// create the new Metric but leave it at its start value (e.g. a Summary or
|
||||
// Histogram without any observations). See also the SummaryVec example.
|
||||
//
|
||||
// Keeping the Metric for later use is possible (and should be considered if
|
||||
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
||||
// Delete can be used to delete the Metric from the MetricVec. In that case, the
|
||||
// Metric will still exist, but it will not be exported anymore, even if a
|
||||
// Metric with the same label values is created later. See also the CounterVec
|
||||
// example.
|
||||
//
|
||||
// An error is returned if the number of label values is not the same as the
|
||||
// number of VariableLabels in Desc.
|
||||
//
|
||||
// Note that for more than one label value, this method is prone to mistakes
|
||||
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
||||
// an alternative to avoid that type of mistake. For higher label numbers, the
|
||||
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||
// with a performance overhead (for creating and processing the Labels map).
|
||||
// See also the GaugeVec example.
|
||||
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
||||
h, err := m.hashLabelValues(lvs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m.getOrCreateMetricWithLabelValues(h, lvs), nil
|
||||
}
|
||||
|
||||
// GetMetricWith returns the Metric for the given Labels map (the label names
|
||||
// must match those of the VariableLabels in Desc). If that label map is
|
||||
// accessed for the first time, a new Metric is created. Implications of
|
||||
// creating a Metric without using it and keeping the Metric for later use are
|
||||
// the same as for GetMetricWithLabelValues.
|
||||
//
|
||||
// An error is returned if the number and names of the Labels are inconsistent
|
||||
// with those of the VariableLabels in Desc.
|
||||
//
|
||||
// This method is used for the same purpose as
|
||||
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||
// methods.
|
||||
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
|
||||
h, err := m.hashLabels(labels)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m.getOrCreateMetricWithLabels(h, labels), nil
|
||||
}
|
||||
|
||||
// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
|
||||
// occurs. The method allows neat syntax like:
|
||||
// httpReqs.WithLabelValues("404", "POST").Inc()
|
||||
func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
|
||||
metric, err := m.GetMetricWithLabelValues(lvs...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return metric
|
||||
}
|
||||
|
||||
// With works as GetMetricWith, but panics if an error occurs. The method allows
|
||||
// neat syntax like:
|
||||
// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
|
||||
func (m *MetricVec) With(labels Labels) Metric {
|
||||
metric, err := m.GetMetricWith(labels)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return metric
|
||||
}
|
||||
|
||||
// DeleteLabelValues removes the metric where the variable labels are the same
|
||||
// as those passed in as labels (same order as the VariableLabels in Desc). It
|
||||
// returns true if a metric was deleted.
|
||||
//
|
||||
// It is not an error if the number of label values is not the same as the
|
||||
// number of VariableLabels in Desc. However, such inconsistent label count can
|
||||
// never match an actual Metric, so the method will always return false in that
|
||||
// case.
|
||||
//
|
||||
// Note that for more than one label value, this method is prone to mistakes
|
||||
// caused by an incorrect order of arguments. Consider Delete(Labels) as an
|
||||
// alternative to avoid that type of mistake. For higher label numbers, the
|
||||
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||
// with a performance overhead (for creating and processing the Labels map).
|
||||
// See also the CounterVec example.
|
||||
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
|
||||
// Reset deletes all metrics in this vector.
|
||||
func (m *metricMap) Reset() {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
h, err := m.hashLabelValues(lvs)
|
||||
if err != nil {
|
||||
return false
|
||||
for h := range m.metrics {
|
||||
delete(m.metrics, h)
|
||||
}
|
||||
return m.deleteByHashWithLabelValues(h, lvs)
|
||||
}
|
||||
|
||||
// Delete deletes the metric where the variable labels are the same as those
|
||||
// passed in as labels. It returns true if a metric was deleted.
|
||||
//
|
||||
// It is not an error if the number and names of the Labels are inconsistent
|
||||
// with those of the VariableLabels in the Desc of the MetricVec. However, such
|
||||
// inconsistent Labels can never match an actual Metric, so the method will
|
||||
// always return false in that case.
|
||||
//
|
||||
// This method is used for the same purpose as DeleteLabelValues(...string). See
|
||||
// there for pros and cons of the two methods.
|
||||
func (m *MetricVec) Delete(labels Labels) bool {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
h, err := m.hashLabels(labels)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return m.deleteByHashWithLabels(h, labels)
|
||||
}
|
||||
|
||||
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
|
||||
// there are multiple matches in the bucket, use lvs to select a metric and
|
||||
// remove only that metric.
|
||||
func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
|
||||
metrics, ok := m.children[h]
|
||||
func (m *metricMap) deleteByHashWithLabelValues(
|
||||
h uint64, lvs []string, curry []curriedLabelValue,
|
||||
) bool {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
metrics, ok := m.metrics[h]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
i := m.findMetricWithLabelValues(metrics, lvs)
|
||||
i := findMetricWithLabelValues(metrics, lvs, curry)
|
||||
if i >= len(metrics) {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(metrics) > 1 {
|
||||
m.children[h] = append(metrics[:i], metrics[i+1:]...)
|
||||
m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
|
||||
} else {
|
||||
delete(m.children, h)
|
||||
delete(m.metrics, h)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
@ -221,69 +286,38 @@ func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
|
|||
// deleteByHashWithLabels removes the metric from the hash bucket h. If there
|
||||
// are multiple matches in the bucket, use lvs to select a metric and remove
|
||||
// only that metric.
|
||||
func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
|
||||
metrics, ok := m.children[h]
|
||||
func (m *metricMap) deleteByHashWithLabels(
|
||||
h uint64, labels Labels, curry []curriedLabelValue,
|
||||
) bool {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
metrics, ok := m.metrics[h]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
i := m.findMetricWithLabels(metrics, labels)
|
||||
i := findMetricWithLabels(m.desc, metrics, labels, curry)
|
||||
if i >= len(metrics) {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(metrics) > 1 {
|
||||
m.children[h] = append(metrics[:i], metrics[i+1:]...)
|
||||
m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
|
||||
} else {
|
||||
delete(m.children, h)
|
||||
delete(m.metrics, h)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Reset deletes all metrics in this vector.
|
||||
func (m *MetricVec) Reset() {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
for h := range m.children {
|
||||
delete(m.children, h)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
|
||||
if len(vals) != len(m.desc.variableLabels) {
|
||||
return 0, errInconsistentCardinality
|
||||
}
|
||||
h := hashNew()
|
||||
for _, val := range vals {
|
||||
h = m.hashAdd(h, val)
|
||||
h = m.hashAddByte(h, model.SeparatorByte)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
|
||||
if len(labels) != len(m.desc.variableLabels) {
|
||||
return 0, errInconsistentCardinality
|
||||
}
|
||||
h := hashNew()
|
||||
for _, label := range m.desc.variableLabels {
|
||||
val, ok := labels[label]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("label name %q missing in label map", label)
|
||||
}
|
||||
h = m.hashAdd(h, val)
|
||||
h = m.hashAddByte(h, model.SeparatorByte)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
||||
// or creates it and returns the new one.
|
||||
//
|
||||
// This function holds the mutex.
|
||||
func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
|
||||
func (m *metricMap) getOrCreateMetricWithLabelValues(
|
||||
hash uint64, lvs []string, curry []curriedLabelValue,
|
||||
) Metric {
|
||||
m.mtx.RLock()
|
||||
metric, ok := m.getMetricWithLabelValues(hash, lvs)
|
||||
metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry)
|
||||
m.mtx.RUnlock()
|
||||
if ok {
|
||||
return metric
|
||||
|
@ -291,13 +325,11 @@ func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string)
|
|||
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
metric, ok = m.getMetricWithLabelValues(hash, lvs)
|
||||
metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry)
|
||||
if !ok {
|
||||
// Copy to avoid allocation in case wo don't go down this code path.
|
||||
copiedLVs := make([]string, len(lvs))
|
||||
copy(copiedLVs, lvs)
|
||||
metric = m.newMetric(copiedLVs...)
|
||||
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
|
||||
inlinedLVs := inlineLabelValues(lvs, curry)
|
||||
metric = m.newMetric(inlinedLVs...)
|
||||
m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric})
|
||||
}
|
||||
return metric
|
||||
}
|
||||
|
@ -306,9 +338,11 @@ func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string)
|
|||
// or creates it and returns the new one.
|
||||
//
|
||||
// This function holds the mutex.
|
||||
func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
|
||||
func (m *metricMap) getOrCreateMetricWithLabels(
|
||||
hash uint64, labels Labels, curry []curriedLabelValue,
|
||||
) Metric {
|
||||
m.mtx.RLock()
|
||||
metric, ok := m.getMetricWithLabels(hash, labels)
|
||||
metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry)
|
||||
m.mtx.RUnlock()
|
||||
if ok {
|
||||
return metric
|
||||
|
@ -316,33 +350,37 @@ func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metr
|
|||
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
metric, ok = m.getMetricWithLabels(hash, labels)
|
||||
metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry)
|
||||
if !ok {
|
||||
lvs := m.extractLabelValues(labels)
|
||||
lvs := extractLabelValues(m.desc, labels, curry)
|
||||
metric = m.newMetric(lvs...)
|
||||
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
|
||||
m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric})
|
||||
}
|
||||
return metric
|
||||
}
|
||||
|
||||
// getMetricWithLabelValues gets a metric while handling possible collisions in
|
||||
// the hash space. Must be called while holding read mutex.
|
||||
func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
|
||||
metrics, ok := m.children[h]
|
||||
// getMetricWithHashAndLabelValues gets a metric while handling possible
|
||||
// collisions in the hash space. Must be called while holding the read mutex.
|
||||
func (m *metricMap) getMetricWithHashAndLabelValues(
|
||||
h uint64, lvs []string, curry []curriedLabelValue,
|
||||
) (Metric, bool) {
|
||||
metrics, ok := m.metrics[h]
|
||||
if ok {
|
||||
if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
|
||||
if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) {
|
||||
return metrics[i].metric, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// getMetricWithLabels gets a metric while handling possible collisions in
|
||||
// getMetricWithHashAndLabels gets a metric while handling possible collisions in
|
||||
// the hash space. Must be called while holding read mutex.
|
||||
func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
|
||||
metrics, ok := m.children[h]
|
||||
func (m *metricMap) getMetricWithHashAndLabels(
|
||||
h uint64, labels Labels, curry []curriedLabelValue,
|
||||
) (Metric, bool) {
|
||||
metrics, ok := m.metrics[h]
|
||||
if ok {
|
||||
if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
|
||||
if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) {
|
||||
return metrics[i].metric, true
|
||||
}
|
||||
}
|
||||
|
@ -351,9 +389,11 @@ func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool)
|
|||
|
||||
// findMetricWithLabelValues returns the index of the matching metric or
|
||||
// len(metrics) if not found.
|
||||
func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
|
||||
func findMetricWithLabelValues(
|
||||
metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue,
|
||||
) int {
|
||||
for i, metric := range metrics {
|
||||
if m.matchLabelValues(metric.values, lvs) {
|
||||
if matchLabelValues(metric.values, lvs, curry) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
@ -362,32 +402,51 @@ func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, l
|
|||
|
||||
// findMetricWithLabels returns the index of the matching metric or len(metrics)
|
||||
// if not found.
|
||||
func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
|
||||
func findMetricWithLabels(
|
||||
desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
|
||||
) int {
|
||||
for i, metric := range metrics {
|
||||
if m.matchLabels(metric.values, labels) {
|
||||
if matchLabels(desc, metric.values, labels, curry) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return len(metrics)
|
||||
}
|
||||
|
||||
func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
|
||||
if len(values) != len(lvs) {
|
||||
func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool {
|
||||
if len(values) != len(lvs)+len(curry) {
|
||||
return false
|
||||
}
|
||||
var iLVs, iCurry int
|
||||
for i, v := range values {
|
||||
if v != lvs[i] {
|
||||
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||
if v != curry[iCurry].value {
|
||||
return false
|
||||
}
|
||||
iCurry++
|
||||
continue
|
||||
}
|
||||
if v != lvs[iLVs] {
|
||||
return false
|
||||
}
|
||||
iLVs++
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
|
||||
if len(labels) != len(values) {
|
||||
func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
|
||||
if len(values) != len(labels)+len(curry) {
|
||||
return false
|
||||
}
|
||||
for i, k := range m.desc.variableLabels {
|
||||
iCurry := 0
|
||||
for i, k := range desc.variableLabels {
|
||||
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||
if values[i] != curry[iCurry].value {
|
||||
return false
|
||||
}
|
||||
iCurry++
|
||||
continue
|
||||
}
|
||||
if values[i] != labels[k] {
|
||||
return false
|
||||
}
|
||||
|
@ -395,10 +454,31 @@ func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (m *MetricVec) extractLabelValues(labels Labels) []string {
|
||||
labelValues := make([]string, len(labels))
|
||||
for i, k := range m.desc.variableLabels {
|
||||
func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
|
||||
labelValues := make([]string, len(labels)+len(curry))
|
||||
iCurry := 0
|
||||
for i, k := range desc.variableLabels {
|
||||
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||
labelValues[i] = curry[iCurry].value
|
||||
iCurry++
|
||||
continue
|
||||
}
|
||||
labelValues[i] = labels[k]
|
||||
}
|
||||
return labelValues
|
||||
}
|
||||
|
||||
func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
|
||||
labelValues := make([]string, len(lvs)+len(curry))
|
||||
var iCurry, iLVs int
|
||||
for i := range labelValues {
|
||||
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||
labelValues[i] = curry[iCurry].value
|
||||
iCurry++
|
||||
continue
|
||||
}
|
||||
labelValues[i] = lvs[iLVs]
|
||||
iLVs++
|
||||
}
|
||||
return labelValues
|
||||
}
|
||||
|
|
|
@ -0,0 +1,200 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// WrapRegistererWith returns a Registerer wrapping the provided
|
||||
// Registerer. Collectors registered with the returned Registerer will be
|
||||
// registered with the wrapped Registerer in a modified way. The modified
|
||||
// Collector adds the provided Labels to all Metrics it collects (as
|
||||
// ConstLabels). The Metrics collected by the unmodified Collector must not
|
||||
// duplicate any of those labels.
|
||||
//
|
||||
// WrapRegistererWith provides a way to add fixed labels to a subset of
|
||||
// Collectors. It should not be used to add fixed labels to all metrics exposed.
|
||||
//
|
||||
// Conflicts between Collectors registered through the original Registerer with
|
||||
// Collectors registered through the wrapping Registerer will still be
|
||||
// detected. Any AlreadyRegisteredError returned by the Register method of
|
||||
// either Registerer will contain the ExistingCollector in the form it was
|
||||
// provided to the respective registry.
|
||||
//
|
||||
// The Collector example demonstrates a use of WrapRegistererWith.
|
||||
func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
|
||||
return &wrappingRegisterer{
|
||||
wrappedRegisterer: reg,
|
||||
labels: labels,
|
||||
}
|
||||
}
|
||||
|
||||
// WrapRegistererWithPrefix returns a Registerer wrapping the provided
|
||||
// Registerer. Collectors registered with the returned Registerer will be
|
||||
// registered with the wrapped Registerer in a modified way. The modified
|
||||
// Collector adds the provided prefix to the name of all Metrics it collects.
|
||||
//
|
||||
// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
|
||||
// a sub-system. To make this work, register metrics of the sub-system with the
|
||||
// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful
|
||||
// to use the same prefix for all metrics exposed. In particular, do not prefix
|
||||
// metric names that are standardized across applications, as that would break
|
||||
// horizontal monitoring, for example the metrics provided by the Go collector
|
||||
// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
|
||||
// fact, those metrics are already prefixed with “go_” or “process_”,
|
||||
// respectively.)
|
||||
//
|
||||
// Conflicts between Collectors registered through the original Registerer with
|
||||
// Collectors registered through the wrapping Registerer will still be
|
||||
// detected. Any AlreadyRegisteredError returned by the Register method of
|
||||
// either Registerer will contain the ExistingCollector in the form it was
|
||||
// provided to the respective registry.
|
||||
func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
|
||||
return &wrappingRegisterer{
|
||||
wrappedRegisterer: reg,
|
||||
prefix: prefix,
|
||||
}
|
||||
}
|
||||
|
||||
type wrappingRegisterer struct {
|
||||
wrappedRegisterer Registerer
|
||||
prefix string
|
||||
labels Labels
|
||||
}
|
||||
|
||||
func (r *wrappingRegisterer) Register(c Collector) error {
|
||||
return r.wrappedRegisterer.Register(&wrappingCollector{
|
||||
wrappedCollector: c,
|
||||
prefix: r.prefix,
|
||||
labels: r.labels,
|
||||
})
|
||||
}
|
||||
|
||||
func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
|
||||
for _, c := range cs {
|
||||
if err := r.Register(c); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *wrappingRegisterer) Unregister(c Collector) bool {
|
||||
return r.wrappedRegisterer.Unregister(&wrappingCollector{
|
||||
wrappedCollector: c,
|
||||
prefix: r.prefix,
|
||||
labels: r.labels,
|
||||
})
|
||||
}
|
||||
|
||||
type wrappingCollector struct {
|
||||
wrappedCollector Collector
|
||||
prefix string
|
||||
labels Labels
|
||||
}
|
||||
|
||||
func (c *wrappingCollector) Collect(ch chan<- Metric) {
|
||||
wrappedCh := make(chan Metric)
|
||||
go func() {
|
||||
c.wrappedCollector.Collect(wrappedCh)
|
||||
close(wrappedCh)
|
||||
}()
|
||||
for m := range wrappedCh {
|
||||
ch <- &wrappingMetric{
|
||||
wrappedMetric: m,
|
||||
prefix: c.prefix,
|
||||
labels: c.labels,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *wrappingCollector) Describe(ch chan<- *Desc) {
|
||||
wrappedCh := make(chan *Desc)
|
||||
go func() {
|
||||
c.wrappedCollector.Describe(wrappedCh)
|
||||
close(wrappedCh)
|
||||
}()
|
||||
for desc := range wrappedCh {
|
||||
ch <- wrapDesc(desc, c.prefix, c.labels)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *wrappingCollector) unwrapRecursively() Collector {
|
||||
switch wc := c.wrappedCollector.(type) {
|
||||
case *wrappingCollector:
|
||||
return wc.unwrapRecursively()
|
||||
default:
|
||||
return wc
|
||||
}
|
||||
}
|
||||
|
||||
type wrappingMetric struct {
|
||||
wrappedMetric Metric
|
||||
prefix string
|
||||
labels Labels
|
||||
}
|
||||
|
||||
func (m *wrappingMetric) Desc() *Desc {
|
||||
return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels)
|
||||
}
|
||||
|
||||
func (m *wrappingMetric) Write(out *dto.Metric) error {
|
||||
if err := m.wrappedMetric.Write(out); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(m.labels) == 0 {
|
||||
// No wrapping labels.
|
||||
return nil
|
||||
}
|
||||
for ln, lv := range m.labels {
|
||||
out.Label = append(out.Label, &dto.LabelPair{
|
||||
Name: proto.String(ln),
|
||||
Value: proto.String(lv),
|
||||
})
|
||||
}
|
||||
sort.Sort(labelPairSorter(out.Label))
|
||||
return nil
|
||||
}
|
||||
|
||||
func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
|
||||
constLabels := Labels{}
|
||||
for _, lp := range desc.constLabelPairs {
|
||||
constLabels[*lp.Name] = *lp.Value
|
||||
}
|
||||
for ln, lv := range labels {
|
||||
if _, alreadyUsed := constLabels[ln]; alreadyUsed {
|
||||
return &Desc{
|
||||
fqName: desc.fqName,
|
||||
help: desc.help,
|
||||
variableLabels: desc.variableLabels,
|
||||
constLabelPairs: desc.constLabelPairs,
|
||||
err: fmt.Errorf("attempted wrapping with already existing label name %q", ln),
|
||||
}
|
||||
}
|
||||
constLabels[ln] = lv
|
||||
}
|
||||
// NewDesc will do remaining validations.
|
||||
newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
|
||||
// Propagate errors if there was any. This will override any errer
|
||||
// created by NewDesc above, i.e. earlier errors get precedence.
|
||||
if desc.err != nil {
|
||||
newDesc.err = desc.err
|
||||
}
|
||||
return newDesc
|
||||
}
|
|
@ -1,26 +1,34 @@
|
|||
# Background
|
||||
Under most circumstances, manually downloading this repository should never
|
||||
be required.
|
||||
# Deprecation note
|
||||
|
||||
# Prerequisites
|
||||
# Base
|
||||
* [Google Protocol Buffers](https://developers.google.com/protocol-buffers)
|
||||
This repository used to contain the [protocol
|
||||
buffer](https://developers.google.com/protocol-buffers) code that defined both
|
||||
the data model and the exposition format of Prometheus metrics.
|
||||
|
||||
## Java
|
||||
* [Apache Maven](http://maven.apache.org)
|
||||
* [Prometheus Maven Repository](https://github.com/prometheus/io.prometheus-maven-repository) checked out into ../io.prometheus-maven-repository
|
||||
Starting with v2.0.0, the [Prometheus
|
||||
server](https://github.com/prometheus/prometheus) does not ingest the
|
||||
protobuf-based exposition format anymore. Currently, all but one of the
|
||||
[official instrumentation
|
||||
libraries](https://prometheus.io/docs/instrumenting/clientlibs/) do not expose
|
||||
the protobuf-based exposition format. The [Go instrumentation
|
||||
library](https://github.com/prometheus/client_golang), however, has been built
|
||||
around the protobuf-based data model. As a byproduct thereof, it is still able
|
||||
to expose the protobuf-based exposition format. The Go instrumentation library
|
||||
is the only remaining repository within the [Prometheus GitHub
|
||||
org](https://github.com/prometheus) directly using the prometheus/client_model
|
||||
repository.
|
||||
|
||||
## Go
|
||||
* [Go](http://golang.org)
|
||||
* [goprotobuf](https://code.google.com/p/goprotobuf)
|
||||
Therefore, formerly existing support for languages other than Go (namely C++,
|
||||
Java, Python, Ruby) has been removed from this repository. If you are a 3rd
|
||||
party user of those languages, you can go back to [commit
|
||||
14fe0d1](https://github.com/prometheus/client_model/commit/14fe0d1b01d4d5fc031dd4bec1823bd3ebbe8016)
|
||||
to keep using the old code, or you can consume
|
||||
[`metrics.proto`](https://github.com/prometheus/client_model/blob/master/metrics.proto)
|
||||
directly with your own protobuf tooling. Note, however, that changes of
|
||||
`metrics.proto` after [commit
|
||||
14fe0d1](https://github.com/prometheus/client_model/commit/14fe0d1b01d4d5fc031dd4bec1823bd3ebbe8016)
|
||||
are solely informed by requirements of the Go instrumentation library and will
|
||||
not take into account any requirements of other languages or stability concerns
|
||||
for the protobuf-based exposition format.
|
||||
|
||||
## Ruby
|
||||
* [Ruby](https://www.ruby-lang.org)
|
||||
* [bundler](https://rubygems.org/gems/bundler)
|
||||
|
||||
# Building
|
||||
$ make
|
||||
|
||||
# Getting Started
|
||||
* The Go source code is periodically indexed: [Go Protocol Buffer Model](http://godoc.org/github.com/prometheus/client_model/go).
|
||||
* All of the core developers are accessible via the [Prometheus Developers Mailinglist](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
|
||||
Check out the [OpenMetrics project](https://openmetrics.io/) for the future of
|
||||
the data model and exposition format used by Prometheus and others.
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
module github.com/prometheus/client_model
|
||||
|
||||
go 1.9
|
||||
|
||||
require (
|
||||
github.com/golang/protobuf v1.2.0
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f // indirect
|
||||
)
|
|
@ -1,34 +1,26 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: metrics.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package io_prometheus_client is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
metrics.proto
|
||||
|
||||
It has these top-level messages:
|
||||
LabelPair
|
||||
Gauge
|
||||
Counter
|
||||
Quantile
|
||||
Summary
|
||||
Untyped
|
||||
Histogram
|
||||
Bucket
|
||||
Metric
|
||||
MetricFamily
|
||||
*/
|
||||
package io_prometheus_client
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import math "math"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type MetricType int32
|
||||
|
||||
const (
|
||||
|
@ -46,6 +38,7 @@ var MetricType_name = map[int32]string{
|
|||
3: "UNTYPED",
|
||||
4: "HISTOGRAM",
|
||||
}
|
||||
|
||||
var MetricType_value = map[string]int32{
|
||||
"COUNTER": 0,
|
||||
"GAUGE": 1,
|
||||
|
@ -59,9 +52,11 @@ func (x MetricType) Enum() *MetricType {
|
|||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x MetricType) String() string {
|
||||
return proto.EnumName(MetricType_name, int32(x))
|
||||
}
|
||||
|
||||
func (x *MetricType) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
|
||||
if err != nil {
|
||||
|
@ -71,15 +66,42 @@ func (x *MetricType) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (MetricType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{0}
|
||||
}
|
||||
|
||||
type LabelPair struct {
|
||||
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LabelPair) Reset() { *m = LabelPair{} }
|
||||
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
|
||||
func (*LabelPair) ProtoMessage() {}
|
||||
func (*LabelPair) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{0}
|
||||
}
|
||||
|
||||
func (m *LabelPair) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_LabelPair.Unmarshal(m, b)
|
||||
}
|
||||
func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *LabelPair) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_LabelPair.Merge(m, src)
|
||||
}
|
||||
func (m *LabelPair) XXX_Size() int {
|
||||
return xxx_messageInfo_LabelPair.Size(m)
|
||||
}
|
||||
func (m *LabelPair) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_LabelPair.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_LabelPair proto.InternalMessageInfo
|
||||
|
||||
func (m *LabelPair) GetName() string {
|
||||
if m != nil && m.Name != nil {
|
||||
|
@ -97,12 +119,35 @@ func (m *LabelPair) GetValue() string {
|
|||
|
||||
type Gauge struct {
|
||||
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Gauge) Reset() { *m = Gauge{} }
|
||||
func (m *Gauge) String() string { return proto.CompactTextString(m) }
|
||||
func (*Gauge) ProtoMessage() {}
|
||||
func (*Gauge) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{1}
|
||||
}
|
||||
|
||||
func (m *Gauge) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Gauge.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Gauge) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Gauge.Merge(m, src)
|
||||
}
|
||||
func (m *Gauge) XXX_Size() int {
|
||||
return xxx_messageInfo_Gauge.Size(m)
|
||||
}
|
||||
func (m *Gauge) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Gauge.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Gauge proto.InternalMessageInfo
|
||||
|
||||
func (m *Gauge) GetValue() float64 {
|
||||
if m != nil && m.Value != nil {
|
||||
|
@ -113,12 +158,36 @@ func (m *Gauge) GetValue() float64 {
|
|||
|
||||
type Counter struct {
|
||||
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
|
||||
Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Counter) Reset() { *m = Counter{} }
|
||||
func (m *Counter) String() string { return proto.CompactTextString(m) }
|
||||
func (*Counter) ProtoMessage() {}
|
||||
func (*Counter) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{2}
|
||||
}
|
||||
|
||||
func (m *Counter) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Counter.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Counter) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Counter.Merge(m, src)
|
||||
}
|
||||
func (m *Counter) XXX_Size() int {
|
||||
return xxx_messageInfo_Counter.Size(m)
|
||||
}
|
||||
func (m *Counter) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Counter.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Counter proto.InternalMessageInfo
|
||||
|
||||
func (m *Counter) GetValue() float64 {
|
||||
if m != nil && m.Value != nil {
|
||||
|
@ -127,15 +196,45 @@ func (m *Counter) GetValue() float64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (m *Counter) GetExemplar() *Exemplar {
|
||||
if m != nil {
|
||||
return m.Exemplar
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Quantile struct {
|
||||
Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
|
||||
Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Quantile) Reset() { *m = Quantile{} }
|
||||
func (m *Quantile) String() string { return proto.CompactTextString(m) }
|
||||
func (*Quantile) ProtoMessage() {}
|
||||
func (*Quantile) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{3}
|
||||
}
|
||||
|
||||
func (m *Quantile) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Quantile.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Quantile) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Quantile.Merge(m, src)
|
||||
}
|
||||
func (m *Quantile) XXX_Size() int {
|
||||
return xxx_messageInfo_Quantile.Size(m)
|
||||
}
|
||||
func (m *Quantile) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Quantile.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Quantile proto.InternalMessageInfo
|
||||
|
||||
func (m *Quantile) GetQuantile() float64 {
|
||||
if m != nil && m.Quantile != nil {
|
||||
|
@ -152,15 +251,38 @@ func (m *Quantile) GetValue() float64 {
|
|||
}
|
||||
|
||||
type Summary struct {
|
||||
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
|
||||
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
|
||||
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
|
||||
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
|
||||
Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Summary) Reset() { *m = Summary{} }
|
||||
func (m *Summary) String() string { return proto.CompactTextString(m) }
|
||||
func (*Summary) ProtoMessage() {}
|
||||
func (*Summary) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{4}
|
||||
}
|
||||
|
||||
func (m *Summary) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Summary.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Summary) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Summary.Merge(m, src)
|
||||
}
|
||||
func (m *Summary) XXX_Size() int {
|
||||
return xxx_messageInfo_Summary.Size(m)
|
||||
}
|
||||
func (m *Summary) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Summary.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Summary proto.InternalMessageInfo
|
||||
|
||||
func (m *Summary) GetSampleCount() uint64 {
|
||||
if m != nil && m.SampleCount != nil {
|
||||
|
@ -185,12 +307,35 @@ func (m *Summary) GetQuantile() []*Quantile {
|
|||
|
||||
type Untyped struct {
|
||||
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Untyped) Reset() { *m = Untyped{} }
|
||||
func (m *Untyped) String() string { return proto.CompactTextString(m) }
|
||||
func (*Untyped) ProtoMessage() {}
|
||||
func (*Untyped) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{5}
|
||||
}
|
||||
|
||||
func (m *Untyped) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Untyped.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Untyped) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Untyped.Merge(m, src)
|
||||
}
|
||||
func (m *Untyped) XXX_Size() int {
|
||||
return xxx_messageInfo_Untyped.Size(m)
|
||||
}
|
||||
func (m *Untyped) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Untyped.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Untyped proto.InternalMessageInfo
|
||||
|
||||
func (m *Untyped) GetValue() float64 {
|
||||
if m != nil && m.Value != nil {
|
||||
|
@ -200,15 +345,38 @@ func (m *Untyped) GetValue() float64 {
|
|||
}
|
||||
|
||||
type Histogram struct {
|
||||
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
|
||||
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
|
||||
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
|
||||
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
|
||||
Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Histogram) Reset() { *m = Histogram{} }
|
||||
func (m *Histogram) String() string { return proto.CompactTextString(m) }
|
||||
func (*Histogram) ProtoMessage() {}
|
||||
func (*Histogram) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{6}
|
||||
}
|
||||
|
||||
func (m *Histogram) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Histogram.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Histogram) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Histogram.Merge(m, src)
|
||||
}
|
||||
func (m *Histogram) XXX_Size() int {
|
||||
return xxx_messageInfo_Histogram.Size(m)
|
||||
}
|
||||
func (m *Histogram) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Histogram.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Histogram proto.InternalMessageInfo
|
||||
|
||||
func (m *Histogram) GetSampleCount() uint64 {
|
||||
if m != nil && m.SampleCount != nil {
|
||||
|
@ -232,14 +400,38 @@ func (m *Histogram) GetBucket() []*Bucket {
|
|||
}
|
||||
|
||||
type Bucket struct {
|
||||
CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"`
|
||||
UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"`
|
||||
CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
|
||||
UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
|
||||
Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Bucket) Reset() { *m = Bucket{} }
|
||||
func (m *Bucket) String() string { return proto.CompactTextString(m) }
|
||||
func (*Bucket) ProtoMessage() {}
|
||||
func (*Bucket) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{7}
|
||||
}
|
||||
|
||||
func (m *Bucket) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Bucket.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Bucket) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Bucket.Merge(m, src)
|
||||
}
|
||||
func (m *Bucket) XXX_Size() int {
|
||||
return xxx_messageInfo_Bucket.Size(m)
|
||||
}
|
||||
func (m *Bucket) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Bucket.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Bucket proto.InternalMessageInfo
|
||||
|
||||
func (m *Bucket) GetCumulativeCount() uint64 {
|
||||
if m != nil && m.CumulativeCount != nil {
|
||||
|
@ -255,6 +447,68 @@ func (m *Bucket) GetUpperBound() float64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (m *Bucket) GetExemplar() *Exemplar {
|
||||
if m != nil {
|
||||
return m.Exemplar
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Exemplar struct {
|
||||
Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
|
||||
Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
|
||||
Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Exemplar) Reset() { *m = Exemplar{} }
|
||||
func (m *Exemplar) String() string { return proto.CompactTextString(m) }
|
||||
func (*Exemplar) ProtoMessage() {}
|
||||
func (*Exemplar) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{8}
|
||||
}
|
||||
|
||||
func (m *Exemplar) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Exemplar.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Exemplar) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Exemplar.Merge(m, src)
|
||||
}
|
||||
func (m *Exemplar) XXX_Size() int {
|
||||
return xxx_messageInfo_Exemplar.Size(m)
|
||||
}
|
||||
func (m *Exemplar) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Exemplar.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Exemplar proto.InternalMessageInfo
|
||||
|
||||
func (m *Exemplar) GetLabel() []*LabelPair {
|
||||
if m != nil {
|
||||
return m.Label
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Exemplar) GetValue() float64 {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Exemplar) GetTimestamp() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.Timestamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Metric struct {
|
||||
Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
|
||||
Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
|
||||
|
@ -262,13 +516,36 @@ type Metric struct {
|
|||
Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
|
||||
Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
|
||||
Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
|
||||
TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"`
|
||||
TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Metric) Reset() { *m = Metric{} }
|
||||
func (m *Metric) String() string { return proto.CompactTextString(m) }
|
||||
func (*Metric) ProtoMessage() {}
|
||||
func (*Metric) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{9}
|
||||
}
|
||||
|
||||
func (m *Metric) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Metric.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Metric) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Metric.Merge(m, src)
|
||||
}
|
||||
func (m *Metric) XXX_Size() int {
|
||||
return xxx_messageInfo_Metric.Size(m)
|
||||
}
|
||||
func (m *Metric) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Metric.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Metric proto.InternalMessageInfo
|
||||
|
||||
func (m *Metric) GetLabel() []*LabelPair {
|
||||
if m != nil {
|
||||
|
@ -324,12 +601,35 @@ type MetricFamily struct {
|
|||
Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
|
||||
Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
|
||||
Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *MetricFamily) Reset() { *m = MetricFamily{} }
|
||||
func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
|
||||
func (*MetricFamily) ProtoMessage() {}
|
||||
func (*MetricFamily) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{10}
|
||||
}
|
||||
|
||||
func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
|
||||
}
|
||||
func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *MetricFamily) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_MetricFamily.Merge(m, src)
|
||||
}
|
||||
func (m *MetricFamily) XXX_Size() int {
|
||||
return xxx_messageInfo_MetricFamily.Size(m)
|
||||
}
|
||||
func (m *MetricFamily) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_MetricFamily.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_MetricFamily proto.InternalMessageInfo
|
||||
|
||||
func (m *MetricFamily) GetName() string {
|
||||
if m != nil && m.Name != nil {
|
||||
|
@ -361,4 +661,63 @@ func (m *MetricFamily) GetMetric() []*Metric {
|
|||
|
||||
func init() {
|
||||
proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
|
||||
proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
|
||||
proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
|
||||
proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
|
||||
proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile")
|
||||
proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary")
|
||||
proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
|
||||
proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
|
||||
proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
|
||||
proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
|
||||
proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
|
||||
proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) }
|
||||
|
||||
var fileDescriptor_6039342a2ba47b72 = []byte{
|
||||
// 665 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c,
|
||||
0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55,
|
||||
0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2,
|
||||
0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e,
|
||||
0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa,
|
||||
0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66,
|
||||
0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4,
|
||||
0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45,
|
||||
0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a,
|
||||
0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d,
|
||||
0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b,
|
||||
0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22,
|
||||
0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79,
|
||||
0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0,
|
||||
0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00,
|
||||
0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01,
|
||||
0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe,
|
||||
0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55,
|
||||
0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f,
|
||||
0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31,
|
||||
0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16,
|
||||
0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e,
|
||||
0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c,
|
||||
0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f,
|
||||
0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57,
|
||||
0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64,
|
||||
0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76,
|
||||
0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7,
|
||||
0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95,
|
||||
0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed,
|
||||
0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33,
|
||||
0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07,
|
||||
0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72,
|
||||
0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56,
|
||||
0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6,
|
||||
0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f,
|
||||
0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f,
|
||||
0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27,
|
||||
0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83,
|
||||
0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24,
|
||||
0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff,
|
||||
0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -6,7 +6,11 @@ components and libraries.
|
|||
|
||||
* **config**: Common configuration structures
|
||||
* **expfmt**: Decoding and encoding for the exposition format
|
||||
* **log**: A logging wrapper around [logrus](https://github.com/sirupsen/logrus)
|
||||
* **model**: Shared data structures
|
||||
* **promlog**: A logging wrapper around [go-kit/log](https://github.com/go-kit/kit/tree/master/log)
|
||||
* **route**: A routing wrapper around [httprouter](https://github.com/julienschmidt/httprouter) using `context.Context`
|
||||
* **server**: Common servers
|
||||
* **version**: Version information and metrics
|
||||
|
||||
## Deprecated
|
||||
* **log**: A logging wrapper around [logrus](https://github.com/sirupsen/logrus)
|
||||
|
|
|
@ -30,17 +30,38 @@ type Encoder interface {
|
|||
Encode(*dto.MetricFamily) error
|
||||
}
|
||||
|
||||
type encoder func(*dto.MetricFamily) error
|
||||
|
||||
func (e encoder) Encode(v *dto.MetricFamily) error {
|
||||
return e(v)
|
||||
// Closer is implemented by Encoders that need to be closed to finalize
|
||||
// encoding. (For example, OpenMetrics needs a final `# EOF` line.)
|
||||
//
|
||||
// Note that all Encoder implementations returned from this package implement
|
||||
// Closer, too, even if the Close call is a no-op. This happens in preparation
|
||||
// for adding a Close method to the Encoder interface directly in a (mildly
|
||||
// breaking) release in the future.
|
||||
type Closer interface {
|
||||
Close() error
|
||||
}
|
||||
|
||||
// Negotiate returns the Content-Type based on the given Accept header.
|
||||
// If no appropriate accepted type is found, FmtText is returned.
|
||||
type encoderCloser struct {
|
||||
encode func(*dto.MetricFamily) error
|
||||
close func() error
|
||||
}
|
||||
|
||||
func (ec encoderCloser) Encode(v *dto.MetricFamily) error {
|
||||
return ec.encode(v)
|
||||
}
|
||||
|
||||
func (ec encoderCloser) Close() error {
|
||||
return ec.close()
|
||||
}
|
||||
|
||||
// Negotiate returns the Content-Type based on the given Accept header. If no
|
||||
// appropriate accepted type is found, FmtText is returned (which is the
|
||||
// Prometheus text format). This function will never negotiate FmtOpenMetrics,
|
||||
// as the support is still experimental. To include the option to negotiate
|
||||
// FmtOpenMetrics, use NegotiateOpenMetrics.
|
||||
func Negotiate(h http.Header) Format {
|
||||
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
|
||||
// Check for protocol buffer
|
||||
ver := ac.Params["version"]
|
||||
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
||||
switch ac.Params["encoding"] {
|
||||
case "delimited":
|
||||
|
@ -51,8 +72,6 @@ func Negotiate(h http.Header) Format {
|
|||
return FmtProtoCompact
|
||||
}
|
||||
}
|
||||
// Check for text format.
|
||||
ver := ac.Params["version"]
|
||||
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
||||
return FmtText
|
||||
}
|
||||
|
@ -60,29 +79,84 @@ func Negotiate(h http.Header) Format {
|
|||
return FmtText
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder based on content type negotiation.
|
||||
// NegotiateIncludingOpenMetrics works like Negotiate but includes
|
||||
// FmtOpenMetrics as an option for the result. Note that this function is
|
||||
// temporary and will disappear once FmtOpenMetrics is fully supported and as
|
||||
// such may be negotiated by the normal Negotiate function.
|
||||
func NegotiateIncludingOpenMetrics(h http.Header) Format {
|
||||
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
|
||||
ver := ac.Params["version"]
|
||||
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
||||
switch ac.Params["encoding"] {
|
||||
case "delimited":
|
||||
return FmtProtoDelim
|
||||
case "text":
|
||||
return FmtProtoText
|
||||
case "compact-text":
|
||||
return FmtProtoCompact
|
||||
}
|
||||
}
|
||||
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
||||
return FmtText
|
||||
}
|
||||
if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") {
|
||||
return FmtOpenMetrics
|
||||
}
|
||||
}
|
||||
return FmtText
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder based on content type negotiation. All
|
||||
// Encoder implementations returned by NewEncoder also implement Closer, and
|
||||
// callers should always call the Close method. It is currently only required
|
||||
// for FmtOpenMetrics, but a future (breaking) release will add the Close method
|
||||
// to the Encoder interface directly. The current version of the Encoder
|
||||
// interface is kept for backwards compatibility.
|
||||
func NewEncoder(w io.Writer, format Format) Encoder {
|
||||
switch format {
|
||||
case FmtProtoDelim:
|
||||
return encoder(func(v *dto.MetricFamily) error {
|
||||
return encoderCloser{
|
||||
encode: func(v *dto.MetricFamily) error {
|
||||
_, err := pbutil.WriteDelimited(w, v)
|
||||
return err
|
||||
})
|
||||
},
|
||||
close: func() error { return nil },
|
||||
}
|
||||
case FmtProtoCompact:
|
||||
return encoder(func(v *dto.MetricFamily) error {
|
||||
return encoderCloser{
|
||||
encode: func(v *dto.MetricFamily) error {
|
||||
_, err := fmt.Fprintln(w, v.String())
|
||||
return err
|
||||
})
|
||||
},
|
||||
close: func() error { return nil },
|
||||
}
|
||||
case FmtProtoText:
|
||||
return encoder(func(v *dto.MetricFamily) error {
|
||||
return encoderCloser{
|
||||
encode: func(v *dto.MetricFamily) error {
|
||||
_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
|
||||
return err
|
||||
})
|
||||
},
|
||||
close: func() error { return nil },
|
||||
}
|
||||
case FmtText:
|
||||
return encoder(func(v *dto.MetricFamily) error {
|
||||
return encoderCloser{
|
||||
encode: func(v *dto.MetricFamily) error {
|
||||
_, err := MetricFamilyToText(w, v)
|
||||
return err
|
||||
})
|
||||
},
|
||||
close: func() error { return nil },
|
||||
}
|
||||
panic("expfmt.NewEncoder: unknown format")
|
||||
case FmtOpenMetrics:
|
||||
return encoderCloser{
|
||||
encode: func(v *dto.MetricFamily) error {
|
||||
_, err := MetricFamilyToOpenMetrics(w, v)
|
||||
return err
|
||||
},
|
||||
close: func() error {
|
||||
_, err := FinalizeOpenMetrics(w)
|
||||
return err
|
||||
},
|
||||
}
|
||||
}
|
||||
panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format))
|
||||
}
|
||||
|
|
|
@ -23,6 +23,8 @@ const (
|
|||
ProtoType = `application/vnd.google.protobuf`
|
||||
ProtoProtocol = `io.prometheus.client.MetricFamily`
|
||||
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
|
||||
OpenMetricsType = `application/openmetrics-text`
|
||||
OpenMetricsVersion = "0.0.1"
|
||||
|
||||
// The Content-Type values for the different wire protocols.
|
||||
FmtUnknown Format = `<unknown>`
|
||||
|
@ -30,6 +32,7 @@ const (
|
|||
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
||||
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
||||
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
||||
FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8`
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -0,0 +1,527 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package expfmt
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
|
||||
// OpenMetrics text format and writes the resulting lines to 'out'. It returns
|
||||
// the number of bytes written and any error encountered. The output will have
|
||||
// the same order as the input, no further sorting is performed. Furthermore,
|
||||
// this function assumes the input is already sanitized and does not perform any
|
||||
// sanity checks. If the input contains duplicate metrics or invalid metric or
|
||||
// label names, the conversion will result in invalid text format output.
|
||||
//
|
||||
// This function fulfills the type 'expfmt.encoder'.
|
||||
//
|
||||
// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
|
||||
// on individual metric families, it is the responsibility of the caller to
|
||||
// append this line to 'out' once all metric families have been written.
|
||||
// Conveniently, this can be done by calling FinalizeOpenMetrics.
|
||||
//
|
||||
// The output should be fully OpenMetrics compliant. However, there are a few
|
||||
// missing features and peculiarities to avoid complications when switching from
|
||||
// Prometheus to OpenMetrics or vice versa:
|
||||
//
|
||||
// - Counters are expected to have the `_total` suffix in their metric name. In
|
||||
// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
|
||||
// line. A counter with a missing `_total` suffix is not an error. However,
|
||||
// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
|
||||
// output.
|
||||
//
|
||||
// - No support for the following (optional) features: `# UNIT` line, `_created`
|
||||
// line, info type, stateset type, gaugehistogram type.
|
||||
//
|
||||
// - The size of exemplar labels is not checked (i.e. it's possible to create
|
||||
// exemplars that are larger than allowed by the OpenMetrics specification).
|
||||
//
|
||||
// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
|
||||
// with a `NaN` value.)
|
||||
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
|
||||
name := in.GetName()
|
||||
if name == "" {
|
||||
return 0, fmt.Errorf("MetricFamily has no name: %s", in)
|
||||
}
|
||||
|
||||
// Try the interface upgrade. If it doesn't work, we'll use a
|
||||
// bufio.Writer from the sync.Pool.
|
||||
w, ok := out.(enhancedWriter)
|
||||
if !ok {
|
||||
b := bufPool.Get().(*bufio.Writer)
|
||||
b.Reset(out)
|
||||
w = b
|
||||
defer func() {
|
||||
bErr := b.Flush()
|
||||
if err == nil {
|
||||
err = bErr
|
||||
}
|
||||
bufPool.Put(b)
|
||||
}()
|
||||
}
|
||||
|
||||
var (
|
||||
n int
|
||||
metricType = in.GetType()
|
||||
shortName = name
|
||||
)
|
||||
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") {
|
||||
shortName = name[:len(name)-6]
|
||||
}
|
||||
|
||||
// Comments, first HELP, then TYPE.
|
||||
if in.Help != nil {
|
||||
n, err = w.WriteString("# HELP ")
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = w.WriteString(shortName)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = w.WriteByte(' ')
|
||||
written++
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = writeEscapedString(w, *in.Help, true)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = w.WriteByte('\n')
|
||||
written++
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
n, err = w.WriteString("# TYPE ")
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = w.WriteString(shortName)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
switch metricType {
|
||||
case dto.MetricType_COUNTER:
|
||||
if strings.HasSuffix(name, "_total") {
|
||||
n, err = w.WriteString(" counter\n")
|
||||
} else {
|
||||
n, err = w.WriteString(" unknown\n")
|
||||
}
|
||||
case dto.MetricType_GAUGE:
|
||||
n, err = w.WriteString(" gauge\n")
|
||||
case dto.MetricType_SUMMARY:
|
||||
n, err = w.WriteString(" summary\n")
|
||||
case dto.MetricType_UNTYPED:
|
||||
n, err = w.WriteString(" unknown\n")
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
n, err = w.WriteString(" histogram\n")
|
||||
default:
|
||||
return written, fmt.Errorf("unknown metric type %s", metricType.String())
|
||||
}
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Finally the samples, one line for each.
|
||||
for _, metric := range in.Metric {
|
||||
switch metricType {
|
||||
case dto.MetricType_COUNTER:
|
||||
if metric.Counter == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected counter in metric %s %s", name, metric,
|
||||
)
|
||||
}
|
||||
// Note that we have ensured above that either the name
|
||||
// ends on `_total` or that the rendered type is
|
||||
// `unknown`. Therefore, no `_total` must be added here.
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "", metric, "", 0,
|
||||
metric.Counter.GetValue(), 0, false,
|
||||
metric.Counter.Exemplar,
|
||||
)
|
||||
case dto.MetricType_GAUGE:
|
||||
if metric.Gauge == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected gauge in metric %s %s", name, metric,
|
||||
)
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "", metric, "", 0,
|
||||
metric.Gauge.GetValue(), 0, false,
|
||||
nil,
|
||||
)
|
||||
case dto.MetricType_UNTYPED:
|
||||
if metric.Untyped == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected untyped in metric %s %s", name, metric,
|
||||
)
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "", metric, "", 0,
|
||||
metric.Untyped.GetValue(), 0, false,
|
||||
nil,
|
||||
)
|
||||
case dto.MetricType_SUMMARY:
|
||||
if metric.Summary == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected summary in metric %s %s", name, metric,
|
||||
)
|
||||
}
|
||||
for _, q := range metric.Summary.Quantile {
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "", metric,
|
||||
model.QuantileLabel, q.GetQuantile(),
|
||||
q.GetValue(), 0, false,
|
||||
nil,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_sum", metric, "", 0,
|
||||
metric.Summary.GetSampleSum(), 0, false,
|
||||
nil,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_count", metric, "", 0,
|
||||
0, metric.Summary.GetSampleCount(), true,
|
||||
nil,
|
||||
)
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
if metric.Histogram == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected histogram in metric %s %s", name, metric,
|
||||
)
|
||||
}
|
||||
infSeen := false
|
||||
for _, b := range metric.Histogram.Bucket {
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_bucket", metric,
|
||||
model.BucketLabel, b.GetUpperBound(),
|
||||
0, b.GetCumulativeCount(), true,
|
||||
b.Exemplar,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if math.IsInf(b.GetUpperBound(), +1) {
|
||||
infSeen = true
|
||||
}
|
||||
}
|
||||
if !infSeen {
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_bucket", metric,
|
||||
model.BucketLabel, math.Inf(+1),
|
||||
0, metric.Histogram.GetSampleCount(), true,
|
||||
nil,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_sum", metric, "", 0,
|
||||
metric.Histogram.GetSampleSum(), 0, false,
|
||||
nil,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_count", metric, "", 0,
|
||||
0, metric.Histogram.GetSampleCount(), true,
|
||||
nil,
|
||||
)
|
||||
default:
|
||||
return written, fmt.Errorf(
|
||||
"unexpected type in metric %s %s", name, metric,
|
||||
)
|
||||
}
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics.
|
||||
func FinalizeOpenMetrics(w io.Writer) (written int, err error) {
|
||||
return w.Write([]byte("# EOF\n"))
|
||||
}
|
||||
|
||||
// writeOpenMetricsSample writes a single sample in OpenMetrics text format to
|
||||
// w, given the metric name, the metric proto message itself, optionally an
|
||||
// additional label name with a float64 value (use empty string as label name if
|
||||
// not required), the value (optionally as float64 or uint64, determined by
|
||||
// useIntValue), and optionally an exemplar (use nil if not required). The
|
||||
// function returns the number of bytes written and any error encountered.
|
||||
func writeOpenMetricsSample(
|
||||
w enhancedWriter,
|
||||
name, suffix string,
|
||||
metric *dto.Metric,
|
||||
additionalLabelName string, additionalLabelValue float64,
|
||||
floatValue float64, intValue uint64, useIntValue bool,
|
||||
exemplar *dto.Exemplar,
|
||||
) (int, error) {
|
||||
var written int
|
||||
n, err := w.WriteString(name)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
if suffix != "" {
|
||||
n, err = w.WriteString(suffix)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
n, err = writeOpenMetricsLabelPairs(
|
||||
w, metric.Label, additionalLabelName, additionalLabelValue,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
err = w.WriteByte(' ')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
if useIntValue {
|
||||
n, err = writeUint(w, intValue)
|
||||
} else {
|
||||
n, err = writeOpenMetricsFloat(w, floatValue)
|
||||
}
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
if metric.TimestampMs != nil {
|
||||
err = w.WriteByte(' ')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
// TODO(beorn7): Format this directly without converting to a float first.
|
||||
n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
if exemplar != nil {
|
||||
n, err = writeExemplar(w, exemplar)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
err = w.WriteByte('\n')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float
|
||||
// in OpenMetrics style.
|
||||
func writeOpenMetricsLabelPairs(
|
||||
w enhancedWriter,
|
||||
in []*dto.LabelPair,
|
||||
additionalLabelName string, additionalLabelValue float64,
|
||||
) (int, error) {
|
||||
if len(in) == 0 && additionalLabelName == "" {
|
||||
return 0, nil
|
||||
}
|
||||
var (
|
||||
written int
|
||||
separator byte = '{'
|
||||
)
|
||||
for _, lp := range in {
|
||||
err := w.WriteByte(separator)
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err := w.WriteString(lp.GetName())
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = w.WriteString(`="`)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = writeEscapedString(w, lp.GetValue(), true)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
err = w.WriteByte('"')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
separator = ','
|
||||
}
|
||||
if additionalLabelName != "" {
|
||||
err := w.WriteByte(separator)
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err := w.WriteString(additionalLabelName)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = w.WriteString(`="`)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = writeOpenMetricsFloat(w, additionalLabelValue)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
err = w.WriteByte('"')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
err := w.WriteByte('}')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
|
||||
// function returns the number of bytes written and any error encountered.
|
||||
func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
|
||||
written := 0
|
||||
n, err := w.WriteString(" # ")
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
err = w.WriteByte(' ')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = writeOpenMetricsFloat(w, e.GetValue())
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
if e.Timestamp != nil {
|
||||
err = w.WriteByte(' ')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
ts, err := ptypes.Timestamp((*e).Timestamp)
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
// TODO(beorn7): Format this directly from components of ts to
|
||||
// avoid overflow/underflow and precision issues of the float
|
||||
// conversion.
|
||||
n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting
|
||||
// number would otherwise contain neither a "." nor an "e".
|
||||
func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) {
|
||||
switch {
|
||||
case f == 1:
|
||||
return w.WriteString("1.0")
|
||||
case f == 0:
|
||||
return w.WriteString("0.0")
|
||||
case f == -1:
|
||||
return w.WriteString("-1.0")
|
||||
case math.IsNaN(f):
|
||||
return w.WriteString("NaN")
|
||||
case math.IsInf(f, +1):
|
||||
return w.WriteString("+Inf")
|
||||
case math.IsInf(f, -1):
|
||||
return w.WriteString("-Inf")
|
||||
default:
|
||||
bp := numBufPool.Get().(*[]byte)
|
||||
*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
|
||||
if !bytes.ContainsAny(*bp, "e.") {
|
||||
*bp = append(*bp, '.', '0')
|
||||
}
|
||||
written, err := w.Write(*bp)
|
||||
numBufPool.Put(bp)
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
|
||||
// writeUint is like writeInt just for uint64.
|
||||
func writeUint(w enhancedWriter, u uint64) (int, error) {
|
||||
bp := numBufPool.Get().(*[]byte)
|
||||
*bp = strconv.AppendUint((*bp)[:0], u, 10)
|
||||
written, err := w.Write(*bp)
|
||||
numBufPool.Put(bp)
|
||||
return written, err
|
||||
}
|
|
@ -14,13 +14,45 @@
|
|||
package expfmt
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
// enhancedWriter has all the enhanced write functions needed here. bufio.Writer
|
||||
// implements it.
|
||||
type enhancedWriter interface {
|
||||
io.Writer
|
||||
WriteRune(r rune) (n int, err error)
|
||||
WriteString(s string) (n int, err error)
|
||||
WriteByte(c byte) error
|
||||
}
|
||||
|
||||
const (
|
||||
initialNumBufSize = 24
|
||||
)
|
||||
|
||||
var (
|
||||
bufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return bufio.NewWriter(ioutil.Discard)
|
||||
},
|
||||
}
|
||||
numBufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := make([]byte, 0, initialNumBufSize)
|
||||
return &b
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// MetricFamilyToText converts a MetricFamily proto message into text format and
|
||||
|
@ -32,37 +64,90 @@ import (
|
|||
// will result in invalid text format output.
|
||||
//
|
||||
// This method fulfills the type 'prometheus.encoder'.
|
||||
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
||||
var written int
|
||||
|
||||
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
|
||||
// Fail-fast checks.
|
||||
if len(in.Metric) == 0 {
|
||||
return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
|
||||
return 0, fmt.Errorf("MetricFamily has no metrics: %s", in)
|
||||
}
|
||||
name := in.GetName()
|
||||
if name == "" {
|
||||
return written, fmt.Errorf("MetricFamily has no name: %s", in)
|
||||
return 0, fmt.Errorf("MetricFamily has no name: %s", in)
|
||||
}
|
||||
|
||||
// Try the interface upgrade. If it doesn't work, we'll use a
|
||||
// bufio.Writer from the sync.Pool.
|
||||
w, ok := out.(enhancedWriter)
|
||||
if !ok {
|
||||
b := bufPool.Get().(*bufio.Writer)
|
||||
b.Reset(out)
|
||||
w = b
|
||||
defer func() {
|
||||
bErr := b.Flush()
|
||||
if err == nil {
|
||||
err = bErr
|
||||
}
|
||||
bufPool.Put(b)
|
||||
}()
|
||||
}
|
||||
|
||||
var n int
|
||||
|
||||
// Comments, first HELP, then TYPE.
|
||||
if in.Help != nil {
|
||||
n, err := fmt.Fprintf(
|
||||
out, "# HELP %s %s\n",
|
||||
name, escapeString(*in.Help, false),
|
||||
)
|
||||
n, err = w.WriteString("# HELP ")
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
return
|
||||
}
|
||||
n, err = w.WriteString(name)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = w.WriteByte(' ')
|
||||
written++
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = writeEscapedString(w, *in.Help, false)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = w.WriteByte('\n')
|
||||
written++
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
n, err = w.WriteString("# TYPE ")
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = w.WriteString(name)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
metricType := in.GetType()
|
||||
n, err := fmt.Fprintf(
|
||||
out, "# TYPE %s %s\n",
|
||||
name, strings.ToLower(metricType.String()),
|
||||
)
|
||||
switch metricType {
|
||||
case dto.MetricType_COUNTER:
|
||||
n, err = w.WriteString(" counter\n")
|
||||
case dto.MetricType_GAUGE:
|
||||
n, err = w.WriteString(" gauge\n")
|
||||
case dto.MetricType_SUMMARY:
|
||||
n, err = w.WriteString(" summary\n")
|
||||
case dto.MetricType_UNTYPED:
|
||||
n, err = w.WriteString(" untyped\n")
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
n, err = w.WriteString(" histogram\n")
|
||||
default:
|
||||
return written, fmt.Errorf("unknown metric type %s", metricType.String())
|
||||
}
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
return
|
||||
}
|
||||
|
||||
// Finally the samples, one line for each.
|
||||
|
@ -75,9 +160,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
|||
)
|
||||
}
|
||||
n, err = writeSample(
|
||||
name, metric, "", "",
|
||||
w, name, "", metric, "", 0,
|
||||
metric.Counter.GetValue(),
|
||||
out,
|
||||
)
|
||||
case dto.MetricType_GAUGE:
|
||||
if metric.Gauge == nil {
|
||||
|
@ -86,9 +170,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
|||
)
|
||||
}
|
||||
n, err = writeSample(
|
||||
name, metric, "", "",
|
||||
w, name, "", metric, "", 0,
|
||||
metric.Gauge.GetValue(),
|
||||
out,
|
||||
)
|
||||
case dto.MetricType_UNTYPED:
|
||||
if metric.Untyped == nil {
|
||||
|
@ -97,9 +180,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
|||
)
|
||||
}
|
||||
n, err = writeSample(
|
||||
name, metric, "", "",
|
||||
w, name, "", metric, "", 0,
|
||||
metric.Untyped.GetValue(),
|
||||
out,
|
||||
)
|
||||
case dto.MetricType_SUMMARY:
|
||||
if metric.Summary == nil {
|
||||
|
@ -109,29 +191,26 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
|||
}
|
||||
for _, q := range metric.Summary.Quantile {
|
||||
n, err = writeSample(
|
||||
name, metric,
|
||||
model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
|
||||
w, name, "", metric,
|
||||
model.QuantileLabel, q.GetQuantile(),
|
||||
q.GetValue(),
|
||||
out,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
return
|
||||
}
|
||||
}
|
||||
n, err = writeSample(
|
||||
name+"_sum", metric, "", "",
|
||||
w, name, "_sum", metric, "", 0,
|
||||
metric.Summary.GetSampleSum(),
|
||||
out,
|
||||
)
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = writeSample(
|
||||
name+"_count", metric, "", "",
|
||||
w, name, "_count", metric, "", 0,
|
||||
float64(metric.Summary.GetSampleCount()),
|
||||
out,
|
||||
)
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
if metric.Histogram == nil {
|
||||
|
@ -140,46 +219,42 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
|||
)
|
||||
}
|
||||
infSeen := false
|
||||
for _, q := range metric.Histogram.Bucket {
|
||||
for _, b := range metric.Histogram.Bucket {
|
||||
n, err = writeSample(
|
||||
name+"_bucket", metric,
|
||||
model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
|
||||
float64(q.GetCumulativeCount()),
|
||||
out,
|
||||
w, name, "_bucket", metric,
|
||||
model.BucketLabel, b.GetUpperBound(),
|
||||
float64(b.GetCumulativeCount()),
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
return
|
||||
}
|
||||
if math.IsInf(q.GetUpperBound(), +1) {
|
||||
if math.IsInf(b.GetUpperBound(), +1) {
|
||||
infSeen = true
|
||||
}
|
||||
}
|
||||
if !infSeen {
|
||||
n, err = writeSample(
|
||||
name+"_bucket", metric,
|
||||
model.BucketLabel, "+Inf",
|
||||
w, name, "_bucket", metric,
|
||||
model.BucketLabel, math.Inf(+1),
|
||||
float64(metric.Histogram.GetSampleCount()),
|
||||
out,
|
||||
)
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
n, err = writeSample(
|
||||
name+"_sum", metric, "", "",
|
||||
w, name, "_sum", metric, "", 0,
|
||||
metric.Histogram.GetSampleSum(),
|
||||
out,
|
||||
)
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = writeSample(
|
||||
name+"_count", metric, "", "",
|
||||
w, name, "_count", metric, "", 0,
|
||||
float64(metric.Histogram.GetSampleCount()),
|
||||
out,
|
||||
)
|
||||
default:
|
||||
return written, fmt.Errorf(
|
||||
|
@ -188,116 +263,203 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
|||
}
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
return
|
||||
}
|
||||
}
|
||||
return written, nil
|
||||
return
|
||||
}
|
||||
|
||||
// writeSample writes a single sample in text format to out, given the metric
|
||||
// writeSample writes a single sample in text format to w, given the metric
|
||||
// name, the metric proto message itself, optionally an additional label name
|
||||
// and value (use empty strings if not required), and the value. The function
|
||||
// returns the number of bytes written and any error encountered.
|
||||
// with a float64 value (use empty string as label name if not required), and
|
||||
// the value. The function returns the number of bytes written and any error
|
||||
// encountered.
|
||||
func writeSample(
|
||||
name string,
|
||||
w enhancedWriter,
|
||||
name, suffix string,
|
||||
metric *dto.Metric,
|
||||
additionalLabelName, additionalLabelValue string,
|
||||
additionalLabelName string, additionalLabelValue float64,
|
||||
value float64,
|
||||
out io.Writer,
|
||||
) (int, error) {
|
||||
var written int
|
||||
n, err := fmt.Fprint(out, name)
|
||||
n, err := w.WriteString(name)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = labelPairsToText(
|
||||
metric.Label,
|
||||
additionalLabelName, additionalLabelValue,
|
||||
out,
|
||||
if suffix != "" {
|
||||
n, err = w.WriteString(suffix)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
n, err = writeLabelPairs(
|
||||
w, metric.Label, additionalLabelName, additionalLabelValue,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = fmt.Fprintf(out, " %v", value)
|
||||
err = w.WriteByte(' ')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = writeFloat(w, value)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
if metric.TimestampMs != nil {
|
||||
n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
|
||||
err = w.WriteByte(' ')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = writeInt(w, *metric.TimestampMs)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
n, err = out.Write([]byte{'\n'})
|
||||
written += n
|
||||
err = w.WriteByte('\n')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// labelPairsToText converts a slice of LabelPair proto messages plus the
|
||||
// writeLabelPairs converts a slice of LabelPair proto messages plus the
|
||||
// explicitly given additional label pair into text formatted as required by the
|
||||
// text format and writes it to 'out'. An empty slice in combination with an
|
||||
// empty string 'additionalLabelName' results in nothing being
|
||||
// written. Otherwise, the label pairs are written, escaped as required by the
|
||||
// text format, and enclosed in '{...}'. The function returns the number of
|
||||
// bytes written and any error encountered.
|
||||
func labelPairsToText(
|
||||
// text format and writes it to 'w'. An empty slice in combination with an empty
|
||||
// string 'additionalLabelName' results in nothing being written. Otherwise, the
|
||||
// label pairs are written, escaped as required by the text format, and enclosed
|
||||
// in '{...}'. The function returns the number of bytes written and any error
|
||||
// encountered.
|
||||
func writeLabelPairs(
|
||||
w enhancedWriter,
|
||||
in []*dto.LabelPair,
|
||||
additionalLabelName, additionalLabelValue string,
|
||||
out io.Writer,
|
||||
additionalLabelName string, additionalLabelValue float64,
|
||||
) (int, error) {
|
||||
if len(in) == 0 && additionalLabelName == "" {
|
||||
return 0, nil
|
||||
}
|
||||
var written int
|
||||
separator := '{'
|
||||
for _, lp := range in {
|
||||
n, err := fmt.Fprintf(
|
||||
out, `%c%s="%s"`,
|
||||
separator, lp.GetName(), escapeString(lp.GetValue(), true),
|
||||
var (
|
||||
written int
|
||||
separator byte = '{'
|
||||
)
|
||||
for _, lp := range in {
|
||||
err := w.WriteByte(separator)
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err := w.WriteString(lp.GetName())
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = w.WriteString(`="`)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = writeEscapedString(w, lp.GetValue(), true)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
err = w.WriteByte('"')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
separator = ','
|
||||
}
|
||||
if additionalLabelName != "" {
|
||||
n, err := fmt.Fprintf(
|
||||
out, `%c%s="%s"`,
|
||||
separator, additionalLabelName,
|
||||
escapeString(additionalLabelValue, true),
|
||||
)
|
||||
err := w.WriteByte(separator)
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err := w.WriteString(additionalLabelName)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
n, err := out.Write([]byte{'}'})
|
||||
n, err = w.WriteString(`="`)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = writeFloat(w, additionalLabelValue)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
err = w.WriteByte('"')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
err := w.WriteByte('}')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if
|
||||
// includeDoubleQuote is true - '"' by '\"'.
|
||||
var (
|
||||
escape = strings.NewReplacer("\\", `\\`, "\n", `\n`)
|
||||
escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
|
||||
escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`)
|
||||
quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
|
||||
)
|
||||
|
||||
// escapeString replaces '\' by '\\', new line character by '\n', and - if
|
||||
// includeDoubleQuote is true - '"' by '\"'.
|
||||
func escapeString(v string, includeDoubleQuote bool) string {
|
||||
func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
|
||||
if includeDoubleQuote {
|
||||
return escapeWithDoubleQuote.Replace(v)
|
||||
return quotedEscaper.WriteString(w, v)
|
||||
}
|
||||
return escaper.WriteString(w, v)
|
||||
}
|
||||
|
||||
return escape.Replace(v)
|
||||
// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
|
||||
// a few common cases for increased efficiency. For non-hardcoded cases, it uses
|
||||
// strconv.AppendFloat to avoid allocations, similar to writeInt.
|
||||
func writeFloat(w enhancedWriter, f float64) (int, error) {
|
||||
switch {
|
||||
case f == 1:
|
||||
return 1, w.WriteByte('1')
|
||||
case f == 0:
|
||||
return 1, w.WriteByte('0')
|
||||
case f == -1:
|
||||
return w.WriteString("-1")
|
||||
case math.IsNaN(f):
|
||||
return w.WriteString("NaN")
|
||||
case math.IsInf(f, +1):
|
||||
return w.WriteString("+Inf")
|
||||
case math.IsInf(f, -1):
|
||||
return w.WriteString("-Inf")
|
||||
default:
|
||||
bp := numBufPool.Get().(*[]byte)
|
||||
*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
|
||||
written, err := w.Write(*bp)
|
||||
numBufPool.Put(bp)
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
|
||||
// writeInt is equivalent to fmt.Fprint with an int64 argument but uses
|
||||
// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid
|
||||
// allocations.
|
||||
func writeInt(w enhancedWriter, i int64) (int, error) {
|
||||
bp := numBufPool.Get().(*[]byte)
|
||||
*bp = strconv.AppendInt((*bp)[:0], i, 10)
|
||||
written, err := w.Write(*bp)
|
||||
numBufPool.Put(bp)
|
||||
return written, err
|
||||
}
|
||||
|
|
|
@ -325,7 +325,7 @@ func (p *TextParser) startLabelValue() stateFn {
|
|||
// - Other labels have to be added to currentLabels for signature calculation.
|
||||
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
|
||||
if p.currentLabelPair.GetName() == model.QuantileLabel {
|
||||
if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
|
||||
if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
|
||||
// Create a more helpful error message.
|
||||
p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
|
||||
return nil
|
||||
|
@ -337,7 +337,7 @@ func (p *TextParser) startLabelValue() stateFn {
|
|||
// Similar special treatment of histograms.
|
||||
if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
|
||||
if p.currentLabelPair.GetName() == model.BucketLabel {
|
||||
if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
|
||||
if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
|
||||
// Create a more helpful error message.
|
||||
p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
|
||||
return nil
|
||||
|
@ -359,7 +359,7 @@ func (p *TextParser) startLabelValue() stateFn {
|
|||
}
|
||||
return p.readingValue
|
||||
default:
|
||||
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
|
||||
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -392,7 +392,7 @@ func (p *TextParser) readingValue() stateFn {
|
|||
if p.readTokenUntilWhitespace(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
value, err := strconv.ParseFloat(p.currentToken.String(), 64)
|
||||
value, err := parseFloat(p.currentToken.String())
|
||||
if err != nil {
|
||||
// Create a more helpful error message.
|
||||
p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
|
||||
|
@ -755,3 +755,10 @@ func histogramMetricName(name string) string {
|
|||
return name
|
||||
}
|
||||
}
|
||||
|
||||
func parseFloat(s string) (float64, error) {
|
||||
if strings.ContainsAny(s, "pP_") {
|
||||
return 0, fmt.Errorf("unsupported character in float")
|
||||
}
|
||||
return strconv.ParseFloat(s, 64)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
module github.com/prometheus/common
|
||||
|
||||
require (
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 // indirect
|
||||
github.com/go-kit/kit v0.9.0
|
||||
github.com/go-logfmt/logfmt v0.4.0 // indirect
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/julienschmidt/httprouter v1.2.0
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/prometheus/client_golang v1.0.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 // indirect
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6
|
||||
gopkg.in/yaml.v2 v2.2.4
|
||||
)
|
||||
|
||||
go 1.11
|
6
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
generated
vendored
6
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
generated
vendored
|
@ -1,12 +1,12 @@
|
|||
/*
|
||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||
All rights reserved.
|
||||
|
||||
HTTP Content-Type Autonegotiation.
|
||||
|
||||
The functions in this package implement the behaviour specified in
|
||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
|
||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
separator = []byte{0}
|
||||
// MetricNameRE is a regular expression matching valid metric
|
||||
// names. Note that the IsValidMetricName function performs the same
|
||||
// check but faster than a match with this regular expression.
|
||||
|
|
|
@ -43,7 +43,7 @@ const (
|
|||
// (1970-01-01 00:00 UTC) excluding leap seconds.
|
||||
type Time int64
|
||||
|
||||
// Interval describes and interval between two timestamps.
|
||||
// Interval describes an interval between two timestamps.
|
||||
type Interval struct {
|
||||
Start, End Time
|
||||
}
|
||||
|
@ -150,7 +150,13 @@ func (t *Time) UnmarshalJSON(b []byte) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// If the value was something like -0.1 the negative is lost in the
|
||||
// parsing because of the leading zero, this ensures that we capture it.
|
||||
if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 {
|
||||
*t = Time(v+va) * -1
|
||||
} else {
|
||||
*t = Time(v + va)
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("invalid time %q", string(b))
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# procfs
|
||||
|
||||
This procfs package provides functions to retrieve system, kernel and process
|
||||
metrics from the pseudo-filesystem proc.
|
||||
This package provides functions to retrieve system, kernel, and process
|
||||
metrics from the pseudo-filesystems /proc and /sys.
|
||||
|
||||
*WARNING*: This package is a work in progress. Its API may still break in
|
||||
backwards-incompatible ways without warnings. Use it at your own risk.
|
||||
|
@ -9,3 +9,53 @@ backwards-incompatible ways without warnings. Use it at your own risk.
|
|||
[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
|
||||
[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
|
||||
|
||||
## Usage
|
||||
|
||||
The procfs library is organized by packages based on whether the gathered data is coming from
|
||||
/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc,
|
||||
/sys, or both. For example, cpu statistics are gathered from
|
||||
`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount
|
||||
point is initialized, and then the stat information is read.
|
||||
|
||||
```go
|
||||
fs, err := procfs.NewFS("/proc")
|
||||
stats, err := fs.Stat()
|
||||
```
|
||||
|
||||
Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems.
|
||||
|
||||
```go
|
||||
fs, err := blockdevice.NewFS("/proc", "/sys")
|
||||
stats, err := fs.ProcDiskstats()
|
||||
```
|
||||
|
||||
## Package Organization
|
||||
|
||||
The packages in this project are organized according to (1) whether the data comes from the `/proc` or
|
||||
`/sys` filesystem and (2) the type of information being retrieved. For example, most process information
|
||||
can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives
|
||||
is available in the `blockdevices` sub-package.
|
||||
|
||||
## Building and Testing
|
||||
|
||||
The procfs library is intended to be built as part of another application, so there are no distributable binaries.
|
||||
However, most of the API includes unit tests which can be run with `make test`.
|
||||
|
||||
### Updating Test Fixtures
|
||||
|
||||
The procfs library includes a set of test fixtures which include many example files from
|
||||
the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
|
||||
which is extracted automatically during testing. To add/update the test fixtures, first
|
||||
ensure the `fixtures` directory is up to date by removing the existing directory and then
|
||||
extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
|
||||
|
||||
```bash
|
||||
rm -rf fixtures
|
||||
make test
|
||||
```
|
||||
|
||||
Next, make the required changes to the extracted files in the `fixtures` directory. When
|
||||
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
|
||||
based on the updated `fixtures` directory. And finally, verify the changes using
|
||||
`git diff fixtures.ttar`.
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ARPEntry contains a single row of the columnar data represented in
|
||||
// /proc/net/arp.
|
||||
type ARPEntry struct {
|
||||
// IP address
|
||||
IPAddr net.IP
|
||||
// MAC address
|
||||
HWAddr net.HardwareAddr
|
||||
// Name of the device
|
||||
Device string
|
||||
}
|
||||
|
||||
// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
|
||||
// and then return a slice of ARPEntry's.
|
||||
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
|
||||
data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err)
|
||||
}
|
||||
|
||||
return parseARPEntries(data)
|
||||
}
|
||||
|
||||
func parseARPEntries(data []byte) ([]ARPEntry, error) {
|
||||
lines := strings.Split(string(data), "\n")
|
||||
entries := make([]ARPEntry, 0)
|
||||
var err error
|
||||
const (
|
||||
expectedDataWidth = 6
|
||||
expectedHeaderWidth = 9
|
||||
)
|
||||
for _, line := range lines {
|
||||
columns := strings.Fields(line)
|
||||
width := len(columns)
|
||||
|
||||
if width == expectedHeaderWidth || width == 0 {
|
||||
continue
|
||||
} else if width == expectedDataWidth {
|
||||
entry, err := parseARPEntry(columns)
|
||||
if err != nil {
|
||||
return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err)
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
} else {
|
||||
return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return entries, err
|
||||
}
|
||||
|
||||
func parseARPEntry(columns []string) (ARPEntry, error) {
|
||||
ip := net.ParseIP(columns[0])
|
||||
mac := net.HardwareAddr(columns[3])
|
||||
|
||||
entry := ARPEntry{
|
||||
IPAddr: ip,
|
||||
HWAddr: mac,
|
||||
Device: columns[5],
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
}
|
|
@ -31,19 +31,9 @@ type BuddyInfo struct {
|
|||
Sizes []float64
|
||||
}
|
||||
|
||||
// NewBuddyInfo reads the buddyinfo statistics.
|
||||
func NewBuddyInfo() ([]BuddyInfo, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fs.NewBuddyInfo()
|
||||
}
|
||||
|
||||
// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
|
||||
func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
|
||||
file, err := os.Open(fs.Path("buddyinfo"))
|
||||
// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
|
||||
func (fs FS) BuddyInfo() ([]BuddyInfo, error) {
|
||||
file, err := os.Open(fs.proc.Path("buddyinfo"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -0,0 +1,167 @@
|
|||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// CPUInfo contains general information about a system CPU found in /proc/cpuinfo
|
||||
type CPUInfo struct {
|
||||
Processor uint
|
||||
VendorID string
|
||||
CPUFamily string
|
||||
Model string
|
||||
ModelName string
|
||||
Stepping string
|
||||
Microcode string
|
||||
CPUMHz float64
|
||||
CacheSize string
|
||||
PhysicalID string
|
||||
Siblings uint
|
||||
CoreID string
|
||||
CPUCores uint
|
||||
APICID string
|
||||
InitialAPICID string
|
||||
FPU string
|
||||
FPUException string
|
||||
CPUIDLevel uint
|
||||
WP string
|
||||
Flags []string
|
||||
Bugs []string
|
||||
BogoMips float64
|
||||
CLFlushSize uint
|
||||
CacheAlignment uint
|
||||
AddressSizes string
|
||||
PowerManagement string
|
||||
}
|
||||
|
||||
// CPUInfo returns information about current system CPUs.
|
||||
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||
func (fs FS) CPUInfo() ([]CPUInfo, error) {
|
||||
data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return parseCPUInfo(data)
|
||||
}
|
||||
|
||||
// parseCPUInfo parses data from /proc/cpuinfo
|
||||
func parseCPUInfo(info []byte) ([]CPUInfo, error) {
|
||||
cpuinfo := []CPUInfo{}
|
||||
i := -1
|
||||
scanner := bufio.NewScanner(bytes.NewReader(info))
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if strings.TrimSpace(line) == "" {
|
||||
continue
|
||||
}
|
||||
field := strings.SplitN(line, ": ", 2)
|
||||
switch strings.TrimSpace(field[0]) {
|
||||
case "processor":
|
||||
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
|
||||
i++
|
||||
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].Processor = uint(v)
|
||||
case "vendor_id":
|
||||
cpuinfo[i].VendorID = field[1]
|
||||
case "cpu family":
|
||||
cpuinfo[i].CPUFamily = field[1]
|
||||
case "model":
|
||||
cpuinfo[i].Model = field[1]
|
||||
case "model name":
|
||||
cpuinfo[i].ModelName = field[1]
|
||||
case "stepping":
|
||||
cpuinfo[i].Stepping = field[1]
|
||||
case "microcode":
|
||||
cpuinfo[i].Microcode = field[1]
|
||||
case "cpu MHz":
|
||||
v, err := strconv.ParseFloat(field[1], 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].CPUMHz = v
|
||||
case "cache size":
|
||||
cpuinfo[i].CacheSize = field[1]
|
||||
case "physical id":
|
||||
cpuinfo[i].PhysicalID = field[1]
|
||||
case "siblings":
|
||||
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].Siblings = uint(v)
|
||||
case "core id":
|
||||
cpuinfo[i].CoreID = field[1]
|
||||
case "cpu cores":
|
||||
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].CPUCores = uint(v)
|
||||
case "apicid":
|
||||
cpuinfo[i].APICID = field[1]
|
||||
case "initial apicid":
|
||||
cpuinfo[i].InitialAPICID = field[1]
|
||||
case "fpu":
|
||||
cpuinfo[i].FPU = field[1]
|
||||
case "fpu_exception":
|
||||
cpuinfo[i].FPUException = field[1]
|
||||
case "cpuid level":
|
||||
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].CPUIDLevel = uint(v)
|
||||
case "wp":
|
||||
cpuinfo[i].WP = field[1]
|
||||
case "flags":
|
||||
cpuinfo[i].Flags = strings.Fields(field[1])
|
||||
case "bugs":
|
||||
cpuinfo[i].Bugs = strings.Fields(field[1])
|
||||
case "bogomips":
|
||||
v, err := strconv.ParseFloat(field[1], 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].BogoMips = v
|
||||
case "clflush size":
|
||||
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].CLFlushSize = uint(v)
|
||||
case "cache_alignment":
|
||||
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].CacheAlignment = uint(v)
|
||||
case "address sizes":
|
||||
cpuinfo[i].AddressSizes = field[1]
|
||||
case "power management":
|
||||
cpuinfo[i].PowerManagement = field[1]
|
||||
}
|
||||
}
|
||||
return cpuinfo, nil
|
||||
|
||||
}
|
|
@ -0,0 +1,153 @@
|
|||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// Crypto holds info parsed from /proc/crypto.
|
||||
type Crypto struct {
|
||||
Alignmask *uint64
|
||||
Async bool
|
||||
Blocksize *uint64
|
||||
Chunksize *uint64
|
||||
Ctxsize *uint64
|
||||
Digestsize *uint64
|
||||
Driver string
|
||||
Geniv string
|
||||
Internal string
|
||||
Ivsize *uint64
|
||||
Maxauthsize *uint64
|
||||
MaxKeysize *uint64
|
||||
MinKeysize *uint64
|
||||
Module string
|
||||
Name string
|
||||
Priority *int64
|
||||
Refcnt *int64
|
||||
Seedsize *uint64
|
||||
Selftest string
|
||||
Type string
|
||||
Walksize *uint64
|
||||
}
|
||||
|
||||
// Crypto parses an crypto-file (/proc/crypto) and returns a slice of
|
||||
// structs containing the relevant info. More information available here:
|
||||
// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
|
||||
func (fs FS) Crypto() ([]Crypto, error) {
|
||||
path := fs.proc.Path("crypto")
|
||||
b, err := util.ReadFileNoStat(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading crypto %s: %s", path, err)
|
||||
}
|
||||
|
||||
crypto, err := parseCrypto(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing crypto %s: %s", path, err)
|
||||
}
|
||||
|
||||
return crypto, nil
|
||||
}
|
||||
|
||||
// parseCrypto parses a /proc/crypto stream into Crypto elements.
|
||||
func parseCrypto(r io.Reader) ([]Crypto, error) {
|
||||
var out []Crypto
|
||||
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
text := s.Text()
|
||||
switch {
|
||||
case strings.HasPrefix(text, "name"):
|
||||
// Each crypto element begins with its name.
|
||||
out = append(out, Crypto{})
|
||||
case text == "":
|
||||
continue
|
||||
}
|
||||
|
||||
kv := strings.Split(text, ":")
|
||||
if len(kv) != 2 {
|
||||
return nil, fmt.Errorf("malformed crypto line: %q", text)
|
||||
}
|
||||
|
||||
k := strings.TrimSpace(kv[0])
|
||||
v := strings.TrimSpace(kv[1])
|
||||
|
||||
// Parse the key/value pair into the currently focused element.
|
||||
c := &out[len(out)-1]
|
||||
if err := c.parseKV(k, v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// parseKV parses a key/value pair into the appropriate field of c.
|
||||
func (c *Crypto) parseKV(k, v string) error {
|
||||
vp := util.NewValueParser(v)
|
||||
|
||||
switch k {
|
||||
case "async":
|
||||
// Interpret literal yes as true.
|
||||
c.Async = v == "yes"
|
||||
case "blocksize":
|
||||
c.Blocksize = vp.PUInt64()
|
||||
case "chunksize":
|
||||
c.Chunksize = vp.PUInt64()
|
||||
case "digestsize":
|
||||
c.Digestsize = vp.PUInt64()
|
||||
case "driver":
|
||||
c.Driver = v
|
||||
case "geniv":
|
||||
c.Geniv = v
|
||||
case "internal":
|
||||
c.Internal = v
|
||||
case "ivsize":
|
||||
c.Ivsize = vp.PUInt64()
|
||||
case "maxauthsize":
|
||||
c.Maxauthsize = vp.PUInt64()
|
||||
case "max keysize":
|
||||
c.MaxKeysize = vp.PUInt64()
|
||||
case "min keysize":
|
||||
c.MinKeysize = vp.PUInt64()
|
||||
case "module":
|
||||
c.Module = v
|
||||
case "name":
|
||||
c.Name = v
|
||||
case "priority":
|
||||
c.Priority = vp.PInt64()
|
||||
case "refcnt":
|
||||
c.Refcnt = vp.PInt64()
|
||||
case "seedsize":
|
||||
c.Seedsize = vp.PUInt64()
|
||||
case "selftest":
|
||||
c.Selftest = v
|
||||
case "type":
|
||||
c.Type = v
|
||||
case "walksize":
|
||||
c.Walksize = vp.PUInt64()
|
||||
}
|
||||
|
||||
return vp.Err()
|
||||
}
|
|
@ -14,69 +14,30 @@
|
|||
package procfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/prometheus/procfs/nfs"
|
||||
"github.com/prometheus/procfs/xfs"
|
||||
"github.com/prometheus/procfs/internal/fs"
|
||||
)
|
||||
|
||||
// FS represents the pseudo-filesystem proc, which provides an interface to
|
||||
// FS represents the pseudo-filesystem sys, which provides an interface to
|
||||
// kernel data structures.
|
||||
type FS string
|
||||
type FS struct {
|
||||
proc fs.FS
|
||||
}
|
||||
|
||||
// DefaultMountPoint is the common mount point of the proc filesystem.
|
||||
const DefaultMountPoint = "/proc"
|
||||
const DefaultMountPoint = fs.DefaultProcMountPoint
|
||||
|
||||
// NewFS returns a new FS mounted under the given mountPoint. It will error
|
||||
// if the mount point can't be read.
|
||||
// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
|
||||
// It will error if the mount point directory can't be read or is a file.
|
||||
func NewDefaultFS() (FS, error) {
|
||||
return NewFS(DefaultMountPoint)
|
||||
}
|
||||
|
||||
// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error
|
||||
// if the mount point directory can't be read or is a file.
|
||||
func NewFS(mountPoint string) (FS, error) {
|
||||
info, err := os.Stat(mountPoint)
|
||||
fs, err := fs.NewFS(mountPoint)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
|
||||
return FS{}, err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
|
||||
}
|
||||
|
||||
return FS(mountPoint), nil
|
||||
}
|
||||
|
||||
// Path returns the path of the given subsystem relative to the procfs root.
|
||||
func (fs FS) Path(p ...string) string {
|
||||
return path.Join(append([]string{string(fs)}, p...)...)
|
||||
}
|
||||
|
||||
// XFSStats retrieves XFS filesystem runtime statistics.
|
||||
func (fs FS) XFSStats() (*xfs.Stats, error) {
|
||||
f, err := os.Open(fs.Path("fs/xfs/stat"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return xfs.ParseStats(f)
|
||||
}
|
||||
|
||||
// NFSClientRPCStats retrieves NFS client RPC statistics.
|
||||
func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) {
|
||||
f, err := os.Open(fs.Path("net/rpc/nfs"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return nfs.ParseClientRPCStats(f)
|
||||
}
|
||||
|
||||
// NFSdServerRPCStats retrieves NFS daemon RPC statistics.
|
||||
func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) {
|
||||
f, err := os.Open(fs.Path("net/rpc/nfsd"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return nfs.ParseServerRPCStats(f)
|
||||
return FS{fs}, nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
module github.com/prometheus/procfs
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/google/go-cmp v0.3.1
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e
|
||||
)
|
|
@ -0,0 +1,55 @@
|
|||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultProcMountPoint is the common mount point of the proc filesystem.
|
||||
DefaultProcMountPoint = "/proc"
|
||||
|
||||
// DefaultSysMountPoint is the common mount point of the sys filesystem.
|
||||
DefaultSysMountPoint = "/sys"
|
||||
|
||||
// DefaultConfigfsMountPoint is the common mount point of the configfs
|
||||
DefaultConfigfsMountPoint = "/sys/kernel/config"
|
||||
)
|
||||
|
||||
// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
|
||||
// interface to kernel data structures.
|
||||
type FS string
|
||||
|
||||
// NewFS returns a new FS mounted under the given mountPoint. It will error
|
||||
// if the mount point can't be read.
|
||||
func NewFS(mountPoint string) (FS, error) {
|
||||
info, err := os.Stat(mountPoint)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
|
||||
}
|
||||
|
||||
return FS(mountPoint), nil
|
||||
}
|
||||
|
||||
// Path appends the given path elements to the filesystem path, adding separators
|
||||
// as necessary.
|
||||
func (fs FS) Path(p ...string) string {
|
||||
return filepath.Join(append([]string{string(fs)}, p...)...)
|
||||
}
|
|
@ -13,7 +13,11 @@
|
|||
|
||||
package util
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"io/ioutil"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ParseUint32s parses a slice of strings into a slice of uint32s.
|
||||
func ParseUint32s(ss []string) ([]uint32, error) {
|
||||
|
@ -44,3 +48,41 @@ func ParseUint64s(ss []string) ([]uint64, error) {
|
|||
|
||||
return us, nil
|
||||
}
|
||||
|
||||
// ParsePInt64s parses a slice of strings into a slice of int64 pointers.
|
||||
func ParsePInt64s(ss []string) ([]*int64, error) {
|
||||
us := make([]*int64, 0, len(ss))
|
||||
for _, s := range ss {
|
||||
u, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
us = append(us, &u)
|
||||
}
|
||||
|
||||
return us, nil
|
||||
}
|
||||
|
||||
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
|
||||
func ReadUintFromFile(path string) (uint64, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
|
||||
}
|
||||
|
||||
// ParseBool parses a string into a boolean pointer.
|
||||
func ParseBool(b string) *bool {
|
||||
var truth bool
|
||||
switch b {
|
||||
case "enabled":
|
||||
truth = true
|
||||
case "disabled":
|
||||
truth = false
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
return &truth
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue