diff --git a/vendor.conf b/vendor.conf index 7bb7e0d971..866e74a689 100755 --- a/vendor.conf +++ b/vendor.conf @@ -2,36 +2,37 @@ cloud.google.com/go 0ebda48a7f143b1cce9eb37a8c11 github.com/agl/ed25519 5312a61534124124185d41f09206b9fef1d88403 github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109 github.com/beorn7/perks 37c8de3658fcb183f997c4e13e8337516ab753e6 # v1.0.1 +github.com/cespare/xxhash/v2 d7df74196a9e781ede915320c11c378c1b2f3a1f # v2.1.1 github.com/containerd/console 8375c3424e4d7b114e8a90a4a40c8e1b40d1d4e6 # v1.0.0 -github.com/containerd/containerd c80284d4b5291a351bb471bcdabb5c1d95e7a583 # master / v1.4.0-dev +github.com/containerd/containerd e9f94064b9616ab36a8a51d632a63f97f7783c3d # v1.4.0-rc.1 github.com/containerd/continuity efbc4488d8fe1bdc16bde3b2d2990d9b3a899165 -github.com/containerd/cgroups 44306b6a1d46985d916b48b4199f93a378af314f +github.com/containerd/cgroups 318312a373405e5e91134d8063d04d59768a1bff github.com/coreos/etcd d57e8b8d97adfc4a6c224fe116714bf1a1f3beb9 # v3.3.12 github.com/cpuguy83/go-md2man/v2 f79a8a8ca69da163eee19ab442bedad7a35bba5a # v2.0.0 github.com/creack/pty 3a6a957789163cacdfe0e291617a1c8e80612c11 # v1.1.9 github.com/davecgh/go-spew 8991bc29aa16c548c550c7ff78260e27b9ab7c73 # v1.1.1 github.com/docker/compose-on-kubernetes 78e6a00beda64ac8ccb9fec787e601fe2ce0d5bb # v0.5.0-alpha1 github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580 -github.com/docker/docker 0f41a77c6993ade605a639fb25994cfe5e1b3fe8 +github.com/docker/docker f50a40e889fdaeebf14fce1d494f95e60092d21d github.com/docker/docker-credential-helpers 54f0238b6bf101fc3ad3b34114cb5520beb562f5 # v0.6.3 github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06 # Contains a customized version of canonical/json and is used by Notary. The package is periodically rebased on current Go versions. github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55 # v0.4.0 github.com/docker/go-events e31b211e4f1cd09aa76fe4ac244571fab96ae47f -github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18 +github.com/docker/go-metrics b619b3592b65de4f087d9f16863a7e6ff905973c # v0.0.1 github.com/docker/go-units 519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0 -github.com/docker/swarmkit 035d564a3686f5e348d861ec0c074ff26854c498 +github.com/docker/swarmkit d6592ddefd8a5319aadff74c558b816b1a0b2590 github.com/evanphx/json-patch 72bf35d0ff611848c1dc9df0f976c81192392fa5 # v4.1.0 github.com/fvbommel/sortorder a1ddee917217bbd0691affdca9c88d24d3f5c27d # v1.0.1 github.com/gofrs/flock 392e7fae8f1b0bdbd67dad7237d23f618feb6dbb # v0.7.1 github.com/gogo/googleapis 01e0f9cca9b92166042241267ee2a5cdf5cff46c # v1.3.2 github.com/gogo/protobuf 5628607bb4c51c3157aacc3a50f0ab707582b805 # v1.3.1 github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998 -github.com/golang/protobuf d23c5127dc24889085f8ccea5c9d560a57a879d8 # v1.3.3 +github.com/golang/protobuf 84668698ea25b64748563aa20726db66a6b8d299 # v1.3.5 github.com/google/go-cmp 3af367b6b30c263d47e8895973edcca9a49cf029 # v0.2.0 github.com/google/gofuzz 24818f796faf91cd76ec7bddd72458fbced7a6c1 github.com/google/shlex e7afc7fbc51079733e9468cdfd1efcd7d196cd1d github.com/googleapis/gnostic 7c663266750e7d82587642f65e60bc4083f1f84e # v0.2.0 -github.com/gorilla/mux 75dcda0896e109a2a22c9315bca3bb21b87b2ba5 # v1.7.4 +github.com/gorilla/mux 98cb6bf42e086f6af920b965c38cacc07402d51b # v1.8.0 github.com/grpc-ecosystem/go-grpc-middleware 3c51f7f332123e8be5a157c0802a228ac85bf9db # v1.2.0 github.com/grpc-ecosystem/grpc-gateway 1a03ca3bad1e1ebadaedd3abb76bc58d4ac8143b github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746 @@ -54,13 +55,13 @@ github.com/modern-go/reflect2 4b7aa43c6742a2c18fdef89dd197 github.com/morikuni/aec 39771216ff4c63d11f5e604076f9c45e8be1067b # v1.0.0 github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf # v1.0.0-rc1 github.com/opencontainers/image-spec d60099175f88c47cd379c4738d158884749ed235 # v1.0.1 -github.com/opencontainers/runc dc9208a3303feef5b3839f4323d9beb36df0a9dd # v1.0.0-rc10 +github.com/opencontainers/runc ff819c7e9184c13b7c2607fe6c30ae19403a7aff # v1.0.0-rc92 github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7 github.com/pkg/errors 614d223910a179a466c1767a985424175c39b465 # v0.9.1 -github.com/prometheus/client_golang c5b7fccd204277076155f10851dad72b76a49317 # v0.8.0 -github.com/prometheus/client_model 6f3806018612930941127f2a7c6c453ba2c527d2 -github.com/prometheus/common 7600349dcfe1abd18d72d3a1770870d9800a7801 -github.com/prometheus/procfs 7d6f385de8bea29190f15ba9931442a0eaef9af7 +github.com/prometheus/client_golang 6edbbd9e560190e318cdc5b4d3e630b442858380 # v1.6.0 +github.com/prometheus/client_model 7bc5445566f0fe75b15de23e6b93886e982d7bf9 # v0.2.0 +github.com/prometheus/common d978bcb1309602d68bb4ba69cf3f8ed900e07308 # v0.9.1 +github.com/prometheus/procfs 46159f73e74d1cb8dc223deef9b2d049286f46b1 # v0.0.11 github.com/russross/blackfriday/v2 d3b5b032dc8e8927d31a5071b56e14c89f045135 # v2.0.1 github.com/shurcooL/sanitized_anchor_name 7bfe4c7ecddb3666a94b053b422cdd8f5aaa3615 # v1.0.0 github.com/sirupsen/logrus 60c74ad9be0d874af0ab0daef6ab07c5c5911f0d # v1.6.0 @@ -73,11 +74,11 @@ github.com/tonistiigi/units 6950e57a87eaf136bbe44ef2ec8e github.com/xeipuuv/gojsonpointer 02993c407bfbf5f6dae44c4f4b1cf6a39b5fc5bb github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b github.com/xeipuuv/gojsonschema 82fcdeb203eb6ab2a67d0a623d9c19e5e5a64927 # v1.2.0 -golang.org/x/crypto 2aa609cf4a9d7d1126360de73b55b6002f9e052a -golang.org/x/net 0de0cce0169b09b364e001f108dc0399ea8630b3 +golang.org/x/crypto 75b288015ac94e66e3d6715fb68a9b41bf046ec2 +golang.org/x/net ab34263943818b32f575efc978a3d24e80b04bd7 golang.org/x/oauth2 bf48bf16ab8d622ce64ec6ce98d2c98f916b6303 golang.org/x/sync cd5d95a43a6e21273425c7ae415d3df9ea832eeb -golang.org/x/sys 85ca7c5b95cdf1e557abb38a283d1e61a5959c31 +golang.org/x/sys ed371f2e16b4b305ee99df548828de367527b76b golang.org/x/text 23ae387dee1f90d29a23c0e87ee0b46038fbed0e # v0.3.3 golang.org/x/time 555d28b269f0569763d25dbe1a237ae74c6bcc82 google.golang.org/genproto 3f1135a288c9a07e340ae8ba4cc6c7065a3160e8 diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt new file mode 100644 index 0000000000..24b53065f4 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md new file mode 100644 index 0000000000..2fd8693c21 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -0,0 +1,67 @@ +# xxhash + +[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) +[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| --- | --- | --- | +| 5 B | 979.66 MB/s | 1291.17 MB/s | +| 100 B | 7475.26 MB/s | 7973.40 MB/s | +| 4 KB | 17573.46 MB/s | 17602.65 MB/s | +| 10 MB | 17131.46 MB/s | 17142.16 MB/s | + +These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using +the following commands under Go 1.11.2: + +``` +$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' +$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/cespare/xxhash/v2/go.mod b/vendor/github.com/cespare/xxhash/v2/go.mod new file mode 100644 index 0000000000..49f67608bf --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/go.mod @@ -0,0 +1,3 @@ +module github.com/cespare/xxhash/v2 + +go 1.11 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go new file mode 100644 index 0000000000..db0b35fbe3 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -0,0 +1,236 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = prime1v + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -prime1v + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(d.mem[d.n:], b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + copy(d.mem[d.n:], b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[32-d.n:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + i, end := 0, d.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(d.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(d.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(d.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + b = b[len(d.mem):] + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go new file mode 100644 index 0000000000..ad14b807f4 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go @@ -0,0 +1,13 @@ +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s new file mode 100644 index 0000000000..d580e32aed --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -0,0 +1,215 @@ +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Register allocation: +// AX h +// CX pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// R15 prime4v + +// round reads from and advances the buffer pointer in CX. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (CX), R12 \ + ADDQ $8, CX \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ R15, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), R15 + + // Load slice. + MOVQ b_base+0(FP), CX + MOVQ b_len+8(FP), DX + LEAQ (CX)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until CX > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + ADDQ $24, BX + + CMPQ CX, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (CX), R8 + ADDQ $8, CX + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ R15, AX + + CMPQ CX, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ CX, BX + JG singles + + MOVL (CX), R8 + ADDQ $4, CX + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ CX, BX + JGE finalize + +singlesLoop: + MOVBQZX (CX), R12 + ADDQ $1, CX + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ CX, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the d pointer. + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT, $0-40 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ b_base+8(FP), CX + MOVQ b_len+16(FP), DX + LEAQ (CX)(DX*1), BX + SUBQ $32, BX + + // Load vN from d. + MOVQ d+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + // Copy vN back to d. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // The number of bytes written is CX minus the old base pointer. + SUBQ b_base+8(FP), CX + MOVQ CX, ret+32(FP) + + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go new file mode 100644 index 0000000000..4a5a821603 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -0,0 +1,76 @@ +// +build !amd64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go new file mode 100644 index 0000000000..fc9bea7a31 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -0,0 +1,15 @@ +// +build appengine + +// This file contains the safe implementations of otherwise unsafe-using code. + +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go new file mode 100644 index 0000000000..53bf76efbc --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -0,0 +1,46 @@ +// +build !appengine + +// This file encapsulates usage of unsafe. +// xxhash_safe.go contains the safe implementations. + +package xxhash + +import ( + "reflect" + "unsafe" +) + +// Notes: +// +// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ +// for some discussion about these unsafe conversions. +// +// In the future it's possible that compiler optimizations will make these +// unsafe operations unnecessary: https://golang.org/issue/2205. +// +// Both of these wrapper functions still incur function call overhead since they +// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write +// for strings to squeeze out a bit more speed. Mid-stack inlining should +// eventually fix this. + +// Sum64String computes the 64-bit xxHash digest of s. +// It may be faster than Sum64([]byte(s)) by avoiding a copy. +func Sum64String(s string) uint64 { + var b []byte + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + bh.Len = len(s) + bh.Cap = len(s) + return Sum64(b) +} + +// WriteString adds more data to d. It always returns len(s), nil. +// It may be faster than Write([]byte(s)) by avoiding a copy. +func (d *Digest) WriteString(s string) (n int, err error) { + var b []byte + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + bh.Len = len(s) + bh.Cap = len(s) + return d.Write(b) +} diff --git a/vendor/github.com/containerd/cgroups/README.md b/vendor/github.com/containerd/cgroups/README.md index 7fe2b1a17a..d4b09f3d61 100644 --- a/vendor/github.com/containerd/cgroups/README.md +++ b/vendor/github.com/containerd/cgroups/README.md @@ -1,6 +1,6 @@ # cgroups -[![Build Status](https://travis-ci.org/containerd/cgroups.svg?branch=master)](https://travis-ci.org/containerd/cgroups) +[![Build Status](https://github.com/containerd/cgroups/workflows/CI/badge.svg)](https://github.com/containerd/cgroups/actions?query=workflow%3ACI) [![codecov](https://codecov.io/gh/containerd/cgroups/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/cgroups) [![GoDoc](https://godoc.org/github.com/containerd/cgroups?status.svg)](https://godoc.org/github.com/containerd/cgroups) [![Go Report Card](https://goreportcard.com/badge/github.com/containerd/cgroups)](https://goreportcard.com/report/github.com/containerd/cgroups) @@ -65,7 +65,7 @@ To update the resources applied in the cgroup ```go shares = uint64(200) if err := control.Update(&specs.LinuxResources{ - CPU: &specs.CPU{ + CPU: &specs.LinuxCPU{ Shares: &shares, }, }); err != nil { @@ -112,6 +112,27 @@ err := control.MoveTo(destination) subCgroup, err := control.New("child", resources) ``` +### Registering for memory events + +This allows you to get notified by an eventfd for v1 memory cgroups events. + +```go +event := cgroups.MemoryThresholdEvent(50 * 1024 * 1024, false) +efd, err := control.RegisterMemoryEvent(event) +``` + +```go +event := cgroups.MemoryPressureEvent(cgroups.MediumPressure, cgroups.DefaultMode) +efd, err := control.RegisterMemoryEvent(event) +``` + +```go +efd, err := control.OOMEventFD() +// or by using RegisterMemoryEvent +event := cgroups.OOMEvent() +efd, err := control.RegisterMemoryEvent(event) +``` + ### Attention All static path should not include `/sys/fs/cgroup/` prefix, it should start with your own cgroups name diff --git a/vendor/github.com/containerd/cgroups/go.mod b/vendor/github.com/containerd/cgroups/go.mod index e6257f69ea..64be56187e 100644 --- a/vendor/github.com/containerd/cgroups/go.mod +++ b/vendor/github.com/containerd/cgroups/go.mod @@ -3,17 +3,16 @@ module github.com/containerd/cgroups go 1.13 require ( - github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3 + github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775 github.com/coreos/go-systemd/v22 v22.0.0 github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect github.com/docker/go-units v0.4.0 github.com/godbus/dbus/v5 v5.0.3 github.com/gogo/protobuf v1.3.1 - github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect - github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700 + github.com/opencontainers/runtime-spec v1.0.2 github.com/pkg/errors v0.9.1 - github.com/sirupsen/logrus v1.4.2 + github.com/sirupsen/logrus v1.6.0 github.com/stretchr/testify v1.2.2 github.com/urfave/cli v1.22.2 - golang.org/x/sys v0.0.0-20200120151820-655fe14d7479 + golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 ) diff --git a/vendor/github.com/containerd/containerd/README.md b/vendor/github.com/containerd/containerd/README.md index de8adfb631..a973d5156e 100644 --- a/vendor/github.com/containerd/containerd/README.md +++ b/vendor/github.com/containerd/containerd/README.md @@ -154,7 +154,7 @@ Taking a container object and turning it into a runnable process on a system is ```go // create a new task -task, err := redis.NewTask(context, cio.Stdio) +task, err := redis.NewTask(context, cio.NewCreator(cio.WithStdio)) defer task.Delete(context) // the task is now running and has a pid that can be use to setup networking @@ -184,7 +184,7 @@ checkpoint, err := client.Pull(context, "myregistry/checkpoints/redis:master") redis, err = client.NewContainer(context, "redis-master", containerd.WithNewSnapshot("redis-rootfs", checkpoint)) defer container.Delete(context) -task, err = redis.NewTask(context, cio.Stdio, containerd.WithTaskCheckpoint(checkpoint)) +task, err = redis.NewTask(context, cio.NewCreator(cio.WithStdio), containerd.WithTaskCheckpoint(checkpoint)) defer task.Delete(context) err := task.Start(context) diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go index ffa8970a89..db65a726b9 100644 --- a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -74,8 +74,8 @@ func getCPUInfo(pattern string) (info string, err error) { } func getCPUVariant() string { - if runtime.GOOS == "windows" { - // Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use + if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use // runtime.GOARCH to determine the variants var variant string switch runtime.GOARCH { diff --git a/vendor/github.com/containerd/containerd/vendor.conf b/vendor/github.com/containerd/containerd/vendor.conf index d5c01b0cce..aee8ad2415 100644 --- a/vendor/github.com/containerd/containerd/vendor.conf +++ b/vendor/github.com/containerd/containerd/vendor.conf @@ -1,102 +1,101 @@ -github.com/beorn7/perks 37c8de3658fcb183f997c4e13e8337516ab753e6 # v1.0.1 -github.com/BurntSushi/toml 3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005 # v0.3.1 -github.com/cespare/xxhash/v2 d7df74196a9e781ede915320c11c378c1b2f3a1f # v2.1.1 +github.com/beorn7/perks v1.0.1 +github.com/BurntSushi/toml v0.3.1 +github.com/cespare/xxhash/v2 v2.1.1 github.com/containerd/btrfs 153935315f4ab9be5bf03650a1341454b05efa5d -github.com/containerd/cgroups b4448137398923af7f4918b8b2ad8249172ca7a6 -github.com/containerd/console 8375c3424e4d7b114e8a90a4a40c8e1b40d1d4e6 # v1.0.0 -github.com/containerd/continuity 0ec596719c75bfd42908850990acea594b7593ac -github.com/containerd/fifo bda0ff6ed73c67bfb5e62bc9c697f146b7fd7f13 -github.com/containerd/go-runc a5c2862aed5e6358b305b0e16bfce58e0549b1cd -github.com/containerd/ttrpc 72bb1b21c5b0a4a107f59dd85f6ab58e564b68d6 # v1.0.1 -github.com/containerd/typeurl cd3ce7159eae562a4f60ceff37dada11a939d247 # v1.0.1 -github.com/coreos/go-systemd/v22 2d78030078ef61b3cae27f42ad6d0e46db51b339 # v22.0.0 -github.com/cpuguy83/go-md2man 7762f7e404f8416dfa1d9bb6a8c192aa9acb4d19 # v1.0.10 +github.com/containerd/cgroups 318312a373405e5e91134d8063d04d59768a1bff +github.com/containerd/console v1.0.0 +github.com/containerd/continuity efbc4488d8fe1bdc16bde3b2d2990d9b3a899165 +github.com/containerd/fifo f15a3290365b9d2627d189e619ab4008e0069caf +github.com/containerd/go-runc 7016d3ce2328dd2cb1192b2076ebd565c4e8df0c +github.com/containerd/ttrpc v1.0.1 +github.com/containerd/typeurl v1.0.1 +github.com/coreos/go-systemd/v22 v22.1.0 +github.com/cpuguy83/go-md2man/v2 v2.0.0 github.com/docker/go-events e31b211e4f1cd09aa76fe4ac244571fab96ae47f -github.com/docker/go-metrics b619b3592b65de4f087d9f16863a7e6ff905973c # v0.0.1 -github.com/docker/go-units 519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0 -github.com/godbus/dbus/v5 37bf87eef99d69c4f1d3528bd66e3a87dc201472 # v5.0.3 -github.com/gogo/googleapis 01e0f9cca9b92166042241267ee2a5cdf5cff46c # v1.3.2 -github.com/gogo/protobuf 5628607bb4c51c3157aacc3a50f0ab707582b805 # v1.3.1 -github.com/golang/protobuf d23c5127dc24889085f8ccea5c9d560a57a879d8 # v1.3.3 -github.com/google/go-cmp 3af367b6b30c263d47e8895973edcca9a49cf029 # v0.2.0 -github.com/google/uuid 0cd6bf5da1e1c83f8b45653022c74f71af0538a4 # v1.1.1 -github.com/grpc-ecosystem/go-grpc-prometheus c225b8c3b01faf2899099b768856a9e916e5087b # v1.2.0 -github.com/hashicorp/errwrap 8a6fb523712970c966eefc6b39ed2c5e74880354 # v1.0.0 -github.com/hashicorp/go-multierror 886a7fbe3eb1c874d46f623bfa70af45f425b3d1 # v1.0.0 -github.com/hashicorp/golang-lru 7f827b33c0f158ec5dfbba01bb0b14a4541fd81d # v0.5.3 -github.com/imdario/mergo 7c29201646fa3de8506f701213473dd407f19646 # v0.3.7 -github.com/konsorten/go-windows-terminal-sequences edb144dfd453055e1e49a3d8b410a660b5a87613 # v1.0.3 -github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c # v1.0.1 -github.com/Microsoft/go-winio 6c72808b55902eae4c5943626030429ff20f3b63 # v0.4.14 -github.com/Microsoft/hcsshim 5bc557dd210ff2caf615e6e22d398123de77fc11 # v0.8.9 -github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7 -github.com/opencontainers/image-spec d60099175f88c47cd379c4738d158884749ed235 # v1.0.1 -github.com/opencontainers/runc dc9208a3303feef5b3839f4323d9beb36df0a9dd # v1.0.0-rc10 -github.com/opencontainers/runtime-spec c4ee7d12c742ffe806cd9350b6af3b4b19faed6f # v1.0.2 -github.com/pkg/errors 614d223910a179a466c1767a985424175c39b465 # v0.9.1 -github.com/prometheus/client_golang c42bebe5a5cddfc6b28cd639103369d8a75dfa89 # v1.3.0 -github.com/prometheus/client_model d1d2010b5beead3fa1c5f271a5cf626e40b3ad6e # v0.1.0 -github.com/prometheus/common 287d3e634a1e550c9e463dd7e5a75a422c614505 # v0.7.0 -github.com/prometheus/procfs 6d489fc7f1d9cd890a250f3ea3431b1744b9623f # v0.0.8 -github.com/russross/blackfriday 05f3235734ad95d0016f6a23902f06461fcf567a # v1.5.2 -github.com/sirupsen/logrus 60c74ad9be0d874af0ab0daef6ab07c5c5911f0d # v1.6.0 +github.com/docker/go-metrics v0.0.1 +github.com/docker/go-units v0.4.0 +github.com/godbus/dbus/v5 v5.0.3 +github.com/gogo/googleapis v1.3.2 +github.com/gogo/protobuf v1.3.1 +github.com/golang/protobuf v1.3.5 +github.com/google/go-cmp v0.2.0 +github.com/google/uuid v1.1.1 +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 +github.com/hashicorp/errwrap v1.0.0 +github.com/hashicorp/go-multierror v1.0.0 +github.com/hashicorp/golang-lru v0.5.3 +github.com/imdario/mergo v0.3.7 +github.com/konsorten/go-windows-terminal-sequences v1.0.3 +github.com/matttproud/golang_protobuf_extensions v1.0.1 +github.com/Microsoft/go-winio v0.4.14 +github.com/Microsoft/hcsshim v0.8.9 +github.com/opencontainers/go-digest v1.0.0 +github.com/opencontainers/image-spec v1.0.1 +github.com/opencontainers/runc v1.0.0-rc92 +github.com/opencontainers/runtime-spec 4d89ac9fbff6c455f46a5bb59c6b1bb7184a5e43 # v1.0.3-0.20200728170252-4d89ac9fbff6 +github.com/pkg/errors v0.9.1 +github.com/prometheus/client_golang v1.6.0 +github.com/prometheus/client_model v0.2.0 +github.com/prometheus/common v0.9.1 +github.com/prometheus/procfs v0.0.11 +github.com/russross/blackfriday/v2 v2.0.1 +github.com/shurcooL/sanitized_anchor_name v1.0.0 +github.com/sirupsen/logrus v1.6.0 github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2 -github.com/urfave/cli bfe2e925cfb6d44b40ad3a779165ea7e8aff9212 # v1.22.0 -go.etcd.io/bbolt a0458a2b35708eef59eb5f620ceb3cd1c01a824d # v1.3.3 -go.opencensus.io 9c377598961b706d1542bd2d84d538b5094d596e # v0.22.0 -golang.org/x/net f3200d17e092c607f615320ecaad13d87ad9a2b3 +github.com/urfave/cli v1.22.1 # NOTE: urfave/cli must be <= v1.22.1 due to a regression: https://github.com/urfave/cli/issues/1092 +go.etcd.io/bbolt v1.3.5 +go.opencensus.io v0.22.0 +golang.org/x/net ab34263943818b32f575efc978a3d24e80b04bd7 golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e -golang.org/x/sys 5c8b2ff67527cb88b770f693cebf3799036d8bc0 -golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4 +golang.org/x/sys ed371f2e16b4b305ee99df548828de367527b76b +golang.org/x/text v0.3.3 google.golang.org/genproto e50cd9704f63023d62cd06a1994b98227fc4d21a -google.golang.org/grpc f495f5b15ae7ccda3b38c53a1bfcde4c1a58a2bc # v1.27.1 -gotest.tools/v3 bb0d8a963040ea5048dcef1a14d8f8b58a33d4b3 # v3.0.2 +google.golang.org/grpc v1.27.1 +gotest.tools/v3 v3.0.2 # cgroups dependencies -github.com/cilium/ebpf 4032b1d8aae306b7bb94a2a11002932caf88c644 +github.com/cilium/ebpf 1c8d4c9ef7759622653a1d319284a44652333b28 # cri dependencies -github.com/containerd/cri 65830369b6b2b4edc454bf5cebbd9b76c1c1ac66 # master -github.com/davecgh/go-spew 8991bc29aa16c548c550c7ff78260e27b9ab7c73 # v1.1.1 -github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580 +github.com/containerd/cri 4e6644c8cf7fb825f62e0007421b7d83dfeab5a1 # master +github.com/davecgh/go-spew v1.1.1 github.com/docker/docker 4634ce647cf2ce2c6031129ccd109e557244986f github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528 -github.com/emicklei/go-restful b993709ae1a4f6dd19cfa475232614441b11c9d5 # v2.9.5 -github.com/google/gofuzz db92cf7ae75e4a7a28abc005addab2b394362888 # v1.1.0 -github.com/json-iterator/go 03217c3e97663914aec3faafde50d081f197a0a2 # v1.1.8 -github.com/modern-go/concurrent bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 # 1.0.3 -github.com/modern-go/reflect2 4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd # 1.0.1 -github.com/opencontainers/selinux 0d49ba2a6aae052c614dfe5de62a158711a6c461 # 1.5.1 -github.com/seccomp/libseccomp-golang 689e3c1541a84461afc49c1c87352a6cedf72e9c # v0.9.1 -github.com/stretchr/testify 221dbe5ed46703ee255b1da0dec05086f5035f62 # v1.4.0 -github.com/tchap/go-patricia 666120de432aea38ab06bd5c818f04f4129882c9 # v2.2.6 -golang.org/x/crypto bac4c82f69751a6dd76e702d54b3ceb88adab236 -golang.org/x/oauth2 0f29369cfe4552d0e4bcddc57cc75f4d7e672a33 -golang.org/x/time 9d24e82272b4f38b78bc8cff74fa936d31ccd8ef -gopkg.in/inf.v0 d2d2541c53f18d2a059457998ce2876cc8e67cbf # v0.9.1 -gopkg.in/yaml.v2 53403b58ad1b561927d19068c655246f2db79d48 # v2.2.8 -k8s.io/api d2dce8e1788e4be2be3a62b6439b3eaa087df0df # v0.18.0 -k8s.io/apimachinery 105e0c6d63f10531ed07f3b5a2195771a0fa444b # v0.18.0 -k8s.io/apiserver 5c8e895629a454efd75a453d1dea5b8142db0013 # v0.18.0 -k8s.io/client-go 0b19784585bd0a0ee5509855829ead81feaa2bdc # v0.18.0 -k8s.io/cri-api 3d1680d8d202aa12c5dc5689170c3c03a488d35b # v0.18.0 -k8s.io/klog 2ca9ad30301bf30a8a6e0fa2110db6b8df699a91 # v1.0.0 -k8s.io/kubernetes 9e991415386e4cf155a24b1da15becaa390438d8 # v1.18.0 -k8s.io/utils a9aa75ae1b89e1b992c33383f48e942d97e52dae -sigs.k8s.io/structured-merge-diff/v3 877aee05330847a873a1a8998b40e12a1e0fde25 # v3.0.0 -sigs.k8s.io/yaml 9fc95527decd95bb9d28cc2eab08179b2d0f6971 # v1.2.0 +github.com/emicklei/go-restful v2.9.5 +github.com/go-logr/logr v0.2.0 +github.com/google/gofuzz v1.1.0 +github.com/json-iterator/go v1.1.10 +github.com/modern-go/concurrent 1.0.3 +github.com/modern-go/reflect2 v1.0.1 +github.com/opencontainers/selinux v1.6.0 +github.com/tchap/go-patricia v2.2.6 +github.com/willf/bitset d5bec3311243426a3c6d1b7a795f24b17c686dbb # 1.1.10+ used by selinux pkg +golang.org/x/crypto 75b288015ac94e66e3d6715fb68a9b41bf046ec2 +golang.org/x/oauth2 858c2ad4c8b6c5d10852cb89079f6ca1c7309787 +golang.org/x/time 555d28b269f0569763d25dbe1a237ae74c6bcc82 +gopkg.in/inf.v0 v0.9.1 +gopkg.in/yaml.v2 v2.2.8 +k8s.io/api v0.19.0-rc.4 +k8s.io/apimachinery v0.19.0-rc.4 +k8s.io/apiserver v0.19.0-rc.4 +k8s.io/client-go v0.19.0-rc.4 +k8s.io/cri-api v0.19.0-rc.4 +k8s.io/klog/v2 v2.2.0 +k8s.io/utils 2df71ebbae66f39338aed4cd0bb82d2212ee33cc +sigs.k8s.io/structured-merge-diff/v3 v3.0.0 +sigs.k8s.io/yaml v1.2.0 # cni dependencies -github.com/containerd/go-cni 0d360c50b10b350b6bb23863fd4dfb1c232b01c9 -github.com/containernetworking/cni 4cfb7b568922a3c79a23e438dc52fe537fc9687e # v0.7.1 -github.com/containernetworking/plugins 9f96827c7cabb03f21d86326000c00f61e181f6a # v0.7.6 -github.com/fsnotify/fsnotify 4bf2d1fec78374803a39307bfb8d340688f4f28e # v1.4.8 +github.com/containerd/go-cni v1.0.0 +github.com/containernetworking/cni v0.7.1 +github.com/containernetworking/plugins v0.7.6 +github.com/fsnotify/fsnotify v1.4.9 # image decrypt depedencies -github.com/containerd/imgcrypt 9e761ccd6069fb707ec9493435f31475b5524b38 # v1.0.1 -github.com/containers/ocicrypt 0343cc6053fd65069df55bce6838096e09b4033a # v1.0.1 from containerd/imgcrypt -github.com/fullsailor/pkcs7 8306686428a5fe132eac8cb7c4848af725098bd4 # from containers/ocicrypt -gopkg.in/square/go-jose.v2 730df5f748271903322feb182be83b43ebbbe27d # v2.3.1 from containers/ocicrypt +github.com/containerd/imgcrypt v1.0.1 +github.com/containers/ocicrypt v1.0.1 +github.com/fullsailor/pkcs7 8306686428a5fe132eac8cb7c4848af725098bd4 +gopkg.in/square/go-jose.v2 v2.3.1 # zfs dependencies github.com/containerd/zfs 9abf673ca6ff9ab8d9bd776a4ceff8f6dc699c3d diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go index 027c6edb72..aa8fba8154 100644 --- a/vendor/github.com/docker/docker/api/types/events/events.go +++ b/vendor/github.com/docker/docker/api/types/events/events.go @@ -1,6 +1,8 @@ package events // import "github.com/docker/docker/api/types/events" const ( + // BuilderEventType is the event type that the builder generates + BuilderEventType = "builder" // ContainerEventType is the event type that containers generate ContainerEventType = "container" // DaemonEventType is the event type that daemon generate diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go index 32202ecc6c..af5e1c0bc2 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -5,6 +5,7 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/mount" + "github.com/docker/go-units" ) // DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) @@ -75,4 +76,5 @@ type ContainerSpec struct { Sysctls map[string]string `json:",omitempty"` CapabilityAdd []string `json:",omitempty"` CapabilityDrop []string `json:",omitempty"` + Ulimits []*units.Ulimit `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index ee15a46ed0..813eac2c9e 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -134,8 +134,7 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp // Don't decorate context sentinel errors; users may be comparing to // them directly. - switch err { - case context.Canceled, context.DeadlineExceeded: + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return serverResp, err } diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go index 3d64c1b46b..5defe6459f 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -9,7 +9,6 @@ import ( "os" "path/filepath" "strconv" - "strings" "sync" "syscall" @@ -107,14 +106,14 @@ func accessible(isOwner, isGroup bool, perms os.FileMode) bool { // LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupUser(username string) (user.User, error) { +func LookupUser(name string) (user.User, error) { // first try a local system files lookup using existing capabilities - usr, err := user.LookupUser(username) + usr, err := user.LookupUser(name) if err == nil { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs - usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) + usr, err = getentUser(name) if err != nil { return user.User{}, err } @@ -130,11 +129,11 @@ func LookupUID(uid int) (user.User, error) { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs - return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) + return getentUser(strconv.Itoa(uid)) } -func getentUser(args string) (user.User, error) { - reader, err := callGetent(args) +func getentUser(name string) (user.User, error) { + reader, err := callGetent("passwd", name) if err != nil { return user.User{}, err } @@ -143,21 +142,21 @@ func getentUser(args string) (user.User, error) { return user.User{}, err } if len(users) == 0 { - return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) + return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", name) } return users[0], nil } // LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupGroup(groupname string) (user.Group, error) { +func LookupGroup(name string) (user.Group, error) { // first try a local system files lookup using existing capabilities - group, err := user.LookupGroup(groupname) + group, err := user.LookupGroup(name) if err == nil { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) + return getentGroup(name) } // LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, @@ -169,11 +168,11 @@ func LookupGID(gid int) (user.Group, error) { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(fmt.Sprintf("%s %d", "group", gid)) + return getentGroup(strconv.Itoa(gid)) } -func getentGroup(args string) (user.Group, error) { - reader, err := callGetent(args) +func getentGroup(name string) (user.Group, error) { + reader, err := callGetent("group", name) if err != nil { return user.Group{}, err } @@ -182,18 +181,18 @@ func getentGroup(args string) (user.Group, error) { return user.Group{}, err } if len(groups) == 0 { - return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) + return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", name) } return groups[0], nil } -func callGetent(args string) (io.Reader, error) { +func callGetent(database, key string) (io.Reader, error) { entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) // if no `getent` command on host, can't do anything else if getentCmd == "" { - return nil, fmt.Errorf("") + return nil, fmt.Errorf("unable to find getent command") } - out, err := execCmd(getentCmd, args) + out, err := execCmd(getentCmd, database, key) if err != nil { exitCode, errC := system.GetExitCode(err) if errC != nil { @@ -203,8 +202,7 @@ func callGetent(args string) (io.Reader, error) { case 1: return nil, fmt.Errorf("getent reported invalid parameters/database unknown") case 2: - terms := strings.Split(args, " ") - return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) + return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, database) case 3: return nil, fmt.Errorf("getent database doesn't support enumeration") default: @@ -235,19 +233,19 @@ func lazyChown(p string, uid, gid int, stat *system.StatT) error { // NewIdentityMapping takes a requested username and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair -func NewIdentityMapping(username string) (*IdentityMapping, error) { - usr, err := LookupUser(username) +func NewIdentityMapping(name string) (*IdentityMapping, error) { + usr, err := LookupUser(name) if err != nil { - return nil, fmt.Errorf("Could not get user for username %s: %v", username, err) + return nil, fmt.Errorf("Could not get user for username %s: %v", name, err) } uid := strconv.Itoa(usr.Uid) - subuidRangesWithUserName, err := parseSubuid(username) + subuidRangesWithUserName, err := parseSubuid(name) if err != nil { return nil, err } - subgidRangesWithUserName, err := parseSubgid(username) + subgidRangesWithUserName, err := parseSubgid(name) if err != nil { return nil, err } @@ -265,10 +263,10 @@ func NewIdentityMapping(username string) (*IdentityMapping, error) { subgidRanges := append(subgidRangesWithUserName, subgidRangesWithUID...) if len(subuidRanges) == 0 { - return nil, errors.Errorf("no subuid ranges found for user %q", username) + return nil, errors.Errorf("no subuid ranges found for user %q", name) } if len(subgidRanges) == 0 { - return nil, errors.Errorf("no subgid ranges found for user %q", username) + return nil, errors.Errorf("no subgid ranges found for user %q", name) } return &IdentityMapping{ diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go index 6272c5a404..bf7ae0564b 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go @@ -17,18 +17,13 @@ import ( var ( once sync.Once userCommand string - - cmdTemplates = map[string]string{ - "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", - "useradd": "-r -s /bin/false %s", - "usermod": "-%s %d-%d %s", - } - idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) +) + +const ( // default length for a UID/GID subordinate range defaultRangeLen = 65536 defaultRangeStart = 100000 - userMod = "usermod" ) // AddNamespaceRangesUser takes a username and uses the standard system @@ -67,7 +62,7 @@ func AddNamespaceRangesUser(name string) (int, int, error) { return uid, gid, nil } -func addUser(userName string) error { +func addUser(name string) error { once.Do(func() { // set up which commands are used for adding users/groups dependent on distro if _, err := resolveBinary("adduser"); err == nil { @@ -76,13 +71,18 @@ func addUser(userName string) error { userCommand = "useradd" } }) - if userCommand == "" { - return fmt.Errorf("Cannot add user; no useradd/adduser binary found") + var args []string + switch userCommand { + case "adduser": + args = []string{"--system", "--shell", "/bin/false", "--no-create-home", "--disabled-login", "--disabled-password", "--group", name} + case "useradd": + args = []string{"-r", "-s", "/bin/false", name} + default: + return fmt.Errorf("cannot add user; no useradd/adduser binary found") } - args := fmt.Sprintf(cmdTemplates[userCommand], userName) - out, err := execCmd(userCommand, args) - if err != nil { - return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) + + if out, err := execCmd(userCommand, args...); err != nil { + return fmt.Errorf("failed to add user with error: %v; output: %q", err, string(out)) } return nil } @@ -101,7 +101,7 @@ func createSubordinateRanges(name string) error { if err != nil { return fmt.Errorf("Can't find available subuid range: %v", err) } - out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) + out, err := execCmd("usermod", "-v", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name) if err != nil { return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) } @@ -117,7 +117,7 @@ func createSubordinateRanges(name string) error { if err != nil { return fmt.Errorf("Can't find available subgid range: %v", err) } - out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) + out, err := execCmd("usermod", "-w", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name) if err != nil { return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) } diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go index bcf6a4ffbc..1e2d4a7a75 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go @@ -6,7 +6,6 @@ import ( "fmt" "os/exec" "path/filepath" - "strings" ) func resolveBinary(binname string) (string, error) { @@ -26,7 +25,7 @@ func resolveBinary(binname string) (string, error) { return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) } -func execCmd(cmd, args string) ([]byte, error) { - execCmd := exec.Command(cmd, strings.Split(args, " ")...) +func execCmd(cmd string, arg ...string) ([]byte, error) { + execCmd := exec.Command(cmd, arg...) return execCmd.CombinedOutput() } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go new file mode 100644 index 0000000000..6a51ccd642 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/docker/docker/vendor.conf b/vendor/github.com/docker/docker/vendor.conf index 55bb49e8cc..25d0928580 100644 --- a/vendor/github.com/docker/docker/vendor.conf +++ b/vendor/github.com/docker/docker/vendor.conf @@ -4,7 +4,7 @@ github.com/Microsoft/go-winio 6c72808b55902eae4c5943626030 github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a github.com/golang/gddo 72a348e765d293ed6d1ded7b699591f14d6cd921 github.com/google/uuid 0cd6bf5da1e1c83f8b45653022c74f71af0538a4 # v1.1.1 -github.com/gorilla/mux 75dcda0896e109a2a22c9315bca3bb21b87b2ba5 # v1.7.4 +github.com/gorilla/mux 98cb6bf42e086f6af920b965c38cacc07402d51b # v1.8.0 github.com/Microsoft/opengcs a10967154e143a36014584a6f664344e3bb0aa64 github.com/moby/term 73f35e472e8f0a3f91347164138ce6bd73b756a9 @@ -12,8 +12,8 @@ github.com/creack/pty 3a6a957789163cacdfe0e291617a github.com/konsorten/go-windows-terminal-sequences edb144dfd453055e1e49a3d8b410a660b5a87613 # v1.0.3 github.com/sirupsen/logrus 60c74ad9be0d874af0ab0daef6ab07c5c5911f0d # v1.6.0 github.com/tchap/go-patricia a7f0089c6f496e8e70402f61733606daa326cac5 # v2.3.0 -golang.org/x/net 0de0cce0169b09b364e001f108dc0399ea8630b3 -golang.org/x/sys 85ca7c5b95cdf1e557abb38a283d1e61a5959c31 +golang.org/x/net ab34263943818b32f575efc978a3d24e80b04bd7 +golang.org/x/sys ed371f2e16b4b305ee99df548828de367527b76b github.com/docker/go-units 519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0 github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55 # v0.4.0 github.com/moby/sys 6154f11e6840c0d6b0dbb23f4125a6134b3013c9 # mountinfo/v0.1.3 @@ -52,7 +52,7 @@ github.com/hashicorp/go-sockaddr c7188e74f6acae5a989bdc959aa7 github.com/hashicorp/go-multierror 886a7fbe3eb1c874d46f623bfa70af45f425b3d1 # v1.0.0 github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870 github.com/docker/libkv 458977154600b9f23984d9f4b82e79570b5ae12b -github.com/vishvananda/netns 0a2b9b5464df8343199164a0321edf3313202f7e +github.com/vishvananda/netns db3c7e526aae966c4ccfa6c8189b693d6ac5d202 github.com/vishvananda/netlink f049be6f391489d3f374498fe0c8df8449258372 # v1.1.0 github.com/moby/ipvs 4566ccea0e08d68e9614c3e7a64a23b850c4bb35 # v1.0.1 @@ -66,7 +66,7 @@ github.com/ugorji/go b4c50a2b199d93b13dc15e78929c github.com/hashicorp/consul 9a9cc9341bb487651a0399e3fc5e1e8a42e62dd9 # v0.5.2 github.com/miekg/dns 6c0c4e6581f8e173cc562c8b3363ab984e4ae071 # v1.1.27 github.com/ishidawataru/sctp 6e2cb1366111dcf547c13531e3a263a067715847 -go.etcd.io/bbolt a0458a2b35708eef59eb5f620ceb3cd1c01a824d # v1.3.3 +go.etcd.io/bbolt 232d8fc87f50244f9c808f4745759e08a304c029 # v1.3.5 # get graph and distribution packages github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580 @@ -83,10 +83,11 @@ google.golang.org/grpc f495f5b15ae7ccda3b38c53a1bfc # the containerd project first, and update both after that is merged. # This commit does not need to match RUNC_COMMIT as it is used for helper # packages but should be newer or equal. -github.com/opencontainers/runc dc9208a3303feef5b3839f4323d9beb36df0a9dd # v1.0.0-rc10 -github.com/opencontainers/runtime-spec c4ee7d12c742ffe806cd9350b6af3b4b19faed6f # v1.0.2 +github.com/opencontainers/runc ff819c7e9184c13b7c2607fe6c30ae19403a7aff # v1.0.0-rc92 +github.com/opencontainers/runtime-spec 4d89ac9fbff6c455f46a5bb59c6b1bb7184a5e43 # v1.0.3-0.20200728170252-4d89ac9fbff6 github.com/opencontainers/image-spec d60099175f88c47cd379c4738d158884749ed235 # v1.0.1 github.com/seccomp/libseccomp-golang 689e3c1541a84461afc49c1c87352a6cedf72e9c # v0.9.1 +github.com/cyphar/filepath-securejoin a261ee33d7a517f054effbf451841abaafe3e0fd # v0.2.2 # go-systemd v17 is required by github.com/coreos/pkg/capnslog/journald_formatter.go github.com/coreos/go-systemd 39ca1b05acc7ad1220e09f133283b8859a8b71ab # v17 @@ -122,25 +123,25 @@ github.com/googleapis/gax-go 317e0006254c44a0ac427cc52a0e google.golang.org/genproto 3f1135a288c9a07e340ae8ba4cc6c7065a3160e8 # containerd -github.com/containerd/containerd c80284d4b5291a351bb471bcdabb5c1d95e7a583 # master / v1.4.0-dev -github.com/containerd/fifo ff969a566b00877c63489baf6e8c35d60af6142c -github.com/containerd/continuity 26c1120b8d4107d2471b93ad78ef7ce1fc84c4c4 -github.com/containerd/cgroups 44306b6a1d46985d916b48b4199f93a378af314f +github.com/containerd/containerd e9f94064b9616ab36a8a51d632a63f97f7783c3d # v1.4.0-rc.1 +github.com/containerd/fifo f15a3290365b9d2627d189e619ab4008e0069caf +github.com/containerd/continuity efbc4488d8fe1bdc16bde3b2d2990d9b3a899165 +github.com/containerd/cgroups 318312a373405e5e91134d8063d04d59768a1bff github.com/containerd/console 8375c3424e4d7b114e8a90a4a40c8e1b40d1d4e6 # v1.0.0 github.com/containerd/go-runc 7016d3ce2328dd2cb1192b2076ebd565c4e8df0c github.com/containerd/typeurl cd3ce7159eae562a4f60ceff37dada11a939d247 # v1.0.1 github.com/containerd/ttrpc 72bb1b21c5b0a4a107f59dd85f6ab58e564b68d6 # v1.0.1 github.com/gogo/googleapis 01e0f9cca9b92166042241267ee2a5cdf5cff46c # v1.3.2 -github.com/cilium/ebpf 60c3aa43f488292fe2ee50fb8b833b383ca8ebbb +github.com/cilium/ebpf 1c8d4c9ef7759622653a1d319284a44652333b28 # cluster -github.com/docker/swarmkit 035d564a3686f5e348d861ec0c074ff26854c498 +github.com/docker/swarmkit d6592ddefd8a5319aadff74c558b816b1a0b2590 github.com/gogo/protobuf 5628607bb4c51c3157aacc3a50f0ab707582b805 # v1.3.1 -github.com/golang/protobuf d23c5127dc24889085f8ccea5c9d560a57a879d8 # v1.3.3 +github.com/golang/protobuf 84668698ea25b64748563aa20726db66a6b8d299 # v1.3.5 github.com/cloudflare/cfssl 5d63dbd981b5c408effbb58c442d54761ff94fbd # 1.3.2 github.com/fernet/fernet-go 9eac43b88a5efb8651d24de9b68e87567e029736 github.com/google/certificate-transparency-go 37a384cd035e722ea46e55029093e26687138edf # v1.0.20 -golang.org/x/crypto 2aa609cf4a9d7d1126360de73b55b6002f9e052a +golang.org/x/crypto 75b288015ac94e66e3d6715fb68a9b41bf046ec2 golang.org/x/time 555d28b269f0569763d25dbe1a237ae74c6bcc82 github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git @@ -149,11 +150,11 @@ github.com/coreos/pkg 3ac0863d7acf3bc44daf49afef89 code.cloudfoundry.org/clock 02e53af36e6c978af692887ed449b74026d76fec # v1.0.0 # prometheus -github.com/prometheus/client_golang c42bebe5a5cddfc6b28cd639103369d8a75dfa89 # v1.3.0 +github.com/prometheus/client_golang 6edbbd9e560190e318cdc5b4d3e630b442858380 # v1.6.0 github.com/beorn7/perks 37c8de3658fcb183f997c4e13e8337516ab753e6 # v1.0.1 -github.com/prometheus/client_model d1d2010b5beead3fa1c5f271a5cf626e40b3ad6e # v0.1.0 -github.com/prometheus/common 287d3e634a1e550c9e463dd7e5a75a422c614505 # v0.7.0 -github.com/prometheus/procfs 6d489fc7f1d9cd890a250f3ea3431b1744b9623f # v0.0.8 +github.com/prometheus/client_model 7bc5445566f0fe75b15de23e6b93886e982d7bf9 # v0.2.0 +github.com/prometheus/common d978bcb1309602d68bb4ba69cf3f8ed900e07308 # v0.9.1 +github.com/prometheus/procfs 46159f73e74d1cb8dc223deef9b2d049286f46b1 # v0.0.11 github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c # v1.0.1 github.com/pkg/errors 614d223910a179a466c1767a985424175c39b465 # v0.9.1 github.com/grpc-ecosystem/go-grpc-prometheus c225b8c3b01faf2899099b768856a9e916e5087b # v1.2.0 diff --git a/vendor/github.com/docker/go-metrics/LICENSE.code b/vendor/github.com/docker/go-metrics/LICENSE similarity index 100% rename from vendor/github.com/docker/go-metrics/LICENSE.code rename to vendor/github.com/docker/go-metrics/LICENSE diff --git a/vendor/github.com/docker/go-metrics/README.md b/vendor/github.com/docker/go-metrics/README.md index fdf7fb746f..a9e947cb56 100644 --- a/vendor/github.com/docker/go-metrics/README.md +++ b/vendor/github.com/docker/go-metrics/README.md @@ -68,9 +68,21 @@ If you need to use a unit but it is not defined in the package please open a PR Package documentation can be found [here](https://godoc.org/github.com/docker/go-metrics). +## HTTP Metrics + +To instrument a http handler, you can wrap the code like this: + +```go +namespace := metrics.NewNamespace("docker_distribution", "http", metrics.Labels{"handler": "your_http_handler_name"}) +httpMetrics := namespace.NewDefaultHttpMetrics() +metrics.Register(namespace) +instrumentedHandler = metrics.InstrumentHandler(httpMetrics, unInstrumentedHandler) +``` +Note: The `handler` label must be provided when a new namespace is created. + ## Additional Metrics -Additional metrics are also defined here that are not avaliable in the prometheus client. +Additional metrics are also defined here that are not available in the prometheus client. If you need a custom metrics and it is generic enough to be used by multiple projects, define it here. diff --git a/vendor/github.com/docker/go-metrics/go.mod b/vendor/github.com/docker/go-metrics/go.mod new file mode 100644 index 0000000000..7e328f0cff --- /dev/null +++ b/vendor/github.com/docker/go-metrics/go.mod @@ -0,0 +1,5 @@ +module github.com/docker/go-metrics + +go 1.11 + +require github.com/prometheus/client_golang v1.1.0 diff --git a/vendor/github.com/docker/go-metrics/handler.go b/vendor/github.com/docker/go-metrics/handler.go index bb3be41d36..05601e9ecd 100644 --- a/vendor/github.com/docker/go-metrics/handler.go +++ b/vendor/github.com/docker/go-metrics/handler.go @@ -4,10 +4,71 @@ import ( "net/http" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// HTTPHandlerOpts describes a set of configurable options of http metrics +type HTTPHandlerOpts struct { + DurationBuckets []float64 + RequestSizeBuckets []float64 + ResponseSizeBuckets []float64 +} + +const ( + InstrumentHandlerResponseSize = iota + InstrumentHandlerRequestSize + InstrumentHandlerDuration + InstrumentHandlerCounter + InstrumentHandlerInFlight +) + +type HTTPMetric struct { + prometheus.Collector + handlerType int +} + +var ( + defaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 60} + defaultRequestSizeBuckets = prometheus.ExponentialBuckets(1024, 2, 22) //1K to 4G + defaultResponseSizeBuckets = defaultRequestSizeBuckets ) // Handler returns the global http.Handler that provides the prometheus -// metrics format on GET requests +// metrics format on GET requests. This handler is no longer instrumented. func Handler() http.Handler { - return prometheus.Handler() + return promhttp.Handler() +} + +func InstrumentHandler(metrics []*HTTPMetric, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFunc(metrics, handler.ServeHTTP) +} + +func InstrumentHandlerFunc(metrics []*HTTPMetric, handlerFunc http.HandlerFunc) http.HandlerFunc { + var handler http.Handler + handler = http.HandlerFunc(handlerFunc) + for _, metric := range metrics { + switch metric.handlerType { + case InstrumentHandlerResponseSize: + if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { + handler = promhttp.InstrumentHandlerResponseSize(collector, handler) + } + case InstrumentHandlerRequestSize: + if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { + handler = promhttp.InstrumentHandlerRequestSize(collector, handler) + } + case InstrumentHandlerDuration: + if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { + handler = promhttp.InstrumentHandlerDuration(collector, handler) + } + case InstrumentHandlerCounter: + if collector, ok := metric.Collector.(*prometheus.CounterVec); ok { + handler = promhttp.InstrumentHandlerCounter(collector, handler) + } + case InstrumentHandlerInFlight: + if collector, ok := metric.Collector.(prometheus.Gauge); ok { + handler = promhttp.InstrumentHandlerInFlight(collector, handler) + } + } + } + return handler.ServeHTTP } diff --git a/vendor/github.com/docker/go-metrics/namespace.go b/vendor/github.com/docker/go-metrics/namespace.go index 7734c29459..798315451a 100644 --- a/vendor/github.com/docker/go-metrics/namespace.go +++ b/vendor/github.com/docker/go-metrics/namespace.go @@ -179,3 +179,137 @@ func makeName(name string, unit Unit) string { return fmt.Sprintf("%s_%s", name, unit) } + +func (n *Namespace) NewDefaultHttpMetrics(handlerName string) []*HTTPMetric { + return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ + DurationBuckets: defaultDurationBuckets, + RequestSizeBuckets: defaultResponseSizeBuckets, + ResponseSizeBuckets: defaultResponseSizeBuckets, + }) +} + +func (n *Namespace) NewHttpMetrics(handlerName string, durationBuckets, requestSizeBuckets, responseSizeBuckets []float64) []*HTTPMetric { + return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ + DurationBuckets: durationBuckets, + RequestSizeBuckets: requestSizeBuckets, + ResponseSizeBuckets: responseSizeBuckets, + }) +} + +func (n *Namespace) NewHttpMetricsWithOpts(handlerName string, opts HTTPHandlerOpts) []*HTTPMetric { + var httpMetrics []*HTTPMetric + inFlightMetric := n.NewInFlightGaugeMetric(handlerName) + requestTotalMetric := n.NewRequestTotalMetric(handlerName) + requestDurationMetric := n.NewRequestDurationMetric(handlerName, opts.DurationBuckets) + requestSizeMetric := n.NewRequestSizeMetric(handlerName, opts.RequestSizeBuckets) + responseSizeMetric := n.NewResponseSizeMetric(handlerName, opts.ResponseSizeBuckets) + httpMetrics = append(httpMetrics, inFlightMetric, requestDurationMetric, requestTotalMetric, requestSizeMetric, responseSizeMetric) + return httpMetrics +} + +func (n *Namespace) NewInFlightGaugeMetric(handlerName string) *HTTPMetric { + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + metric := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "in_flight_requests", + Help: "The in-flight HTTP requests", + ConstLabels: prometheus.Labels(labels), + }) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerInFlight, + } + n.Add(httpMetric) + return httpMetric +} + +func (n *Namespace) NewRequestTotalMetric(handlerName string) *HTTPMetric { + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + metric := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: prometheus.Labels(labels), + }, + []string{"code", "method"}, + ) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerCounter, + } + n.Add(httpMetric) + return httpMetric +} +func (n *Namespace) NewRequestDurationMetric(handlerName string, buckets []float64) *HTTPMetric { + if len(buckets) == 0 { + panic("DurationBuckets must be provided") + } + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + opts := prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "request_duration_seconds", + Help: "The HTTP request latencies in seconds.", + Buckets: buckets, + ConstLabels: prometheus.Labels(labels), + } + metric := prometheus.NewHistogramVec(opts, []string{"method"}) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerDuration, + } + n.Add(httpMetric) + return httpMetric +} + +func (n *Namespace) NewRequestSizeMetric(handlerName string, buckets []float64) *HTTPMetric { + if len(buckets) == 0 { + panic("RequestSizeBuckets must be provided") + } + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + opts := prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "request_size_bytes", + Help: "The HTTP request sizes in bytes.", + Buckets: buckets, + ConstLabels: prometheus.Labels(labels), + } + metric := prometheus.NewHistogramVec(opts, []string{}) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerRequestSize, + } + n.Add(httpMetric) + return httpMetric +} + +func (n *Namespace) NewResponseSizeMetric(handlerName string, buckets []float64) *HTTPMetric { + if len(buckets) == 0 { + panic("ResponseSizeBuckets must be provided") + } + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + opts := prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "response_size_bytes", + Help: "The HTTP response sizes in bytes.", + Buckets: buckets, + ConstLabels: prometheus.Labels(labels), + } + metrics := prometheus.NewHistogramVec(opts, []string{}) + httpMetric := &HTTPMetric{ + Collector: metrics, + handlerType: InstrumentHandlerResponseSize, + } + n.Add(httpMetric) + return httpMetric +} diff --git a/vendor/github.com/docker/go-metrics/timer.go b/vendor/github.com/docker/go-metrics/timer.go index e91eca76bd..824c98739c 100644 --- a/vendor/github.com/docker/go-metrics/timer.go +++ b/vendor/github.com/docker/go-metrics/timer.go @@ -28,15 +28,27 @@ type Timer interface { // LabeledTimer is a timer that must have label values populated before use. type LabeledTimer interface { - WithValues(labels ...string) Timer + WithValues(labels ...string) *labeledTimerObserver } type labeledTimer struct { m *prometheus.HistogramVec } -func (lt *labeledTimer) WithValues(labels ...string) Timer { - return &timer{m: lt.m.WithLabelValues(labels...)} +type labeledTimerObserver struct { + m prometheus.Observer +} + +func (lbo *labeledTimerObserver) Update(duration time.Duration) { + lbo.m.Observe(duration.Seconds()) +} + +func (lbo *labeledTimerObserver) UpdateSince(since time.Time) { + lbo.m.Observe(time.Since(since).Seconds()) +} + +func (lt *labeledTimer) WithValues(labels ...string) *labeledTimerObserver { + return &labeledTimerObserver{m: lt.m.WithLabelValues(labels...)} } func (lt *labeledTimer) Describe(c chan<- *prometheus.Desc) { @@ -48,7 +60,7 @@ func (lt *labeledTimer) Collect(c chan<- prometheus.Metric) { } type timer struct { - m prometheus.Histogram + m prometheus.Observer } func (t *timer) Update(duration time.Duration) { @@ -60,9 +72,14 @@ func (t *timer) UpdateSince(since time.Time) { } func (t *timer) Describe(c chan<- *prometheus.Desc) { - t.m.Describe(c) + c <- t.m.(prometheus.Metric).Desc() } func (t *timer) Collect(c chan<- prometheus.Metric) { - t.m.Collect(c) + // Are there any observers that don't implement Collector? It is really + // unclear what the point of the upstream change was, but we'll let this + // panic if we get an observer that doesn't implement collector. In this + // case, we should almost always see metricVec objects, so this should + // never panic. + t.m.(prometheus.Collector).Collect(c) } diff --git a/vendor/github.com/docker/swarmkit/api/specs.pb.go b/vendor/github.com/docker/swarmkit/api/specs.pb.go index d3e1a598ee..82e14798be 100644 --- a/vendor/github.com/docker/swarmkit/api/specs.pb.go +++ b/vendor/github.com/docker/swarmkit/api/specs.pb.go @@ -1021,8 +1021,11 @@ type ContainerSpec struct { Sysctls map[string]string `protobuf:"bytes,26,rep,name=sysctls,proto3" json:"sysctls,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // CapabilityAdd sets the list of capabilities to add to the default capability list CapabilityAdd []string `protobuf:"bytes,27,rep,name=capability_add,json=capabilityAdd,proto3" json:"capability_add,omitempty"` - // CapabilityAdd sets the list of capabilities to drop from the default capability list + // CapabilityDrop sets the list of capabilities to drop from the default capability list CapabilityDrop []string `protobuf:"bytes,28,rep,name=capability_drop,json=capabilityDrop,proto3" json:"capability_drop,omitempty"` + // Ulimits defines the list of ulimits to set in the container. This option + // is equivalent to passing --ulimit to docker run. + Ulimits []*ContainerSpec_Ulimit `protobuf:"bytes,29,rep,name=ulimits,proto3" json:"ulimits,omitempty"` } func (m *ContainerSpec) Reset() { *m = ContainerSpec{} } @@ -1143,6 +1146,44 @@ func (m *ContainerSpec_DNSConfig) XXX_DiscardUnknown() { var xxx_messageInfo_ContainerSpec_DNSConfig proto.InternalMessageInfo +type ContainerSpec_Ulimit struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Soft int64 `protobuf:"varint,2,opt,name=soft,proto3" json:"soft,omitempty"` + Hard int64 `protobuf:"varint,3,opt,name=hard,proto3" json:"hard,omitempty"` +} + +func (m *ContainerSpec_Ulimit) Reset() { *m = ContainerSpec_Ulimit{} } +func (*ContainerSpec_Ulimit) ProtoMessage() {} +func (*ContainerSpec_Ulimit) Descriptor() ([]byte, []int) { + return fileDescriptor_6589acc608f7d4fd, []int{10, 4} +} +func (m *ContainerSpec_Ulimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerSpec_Ulimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ContainerSpec_Ulimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ContainerSpec_Ulimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerSpec_Ulimit.Merge(m, src) +} +func (m *ContainerSpec_Ulimit) XXX_Size() int { + return m.Size() +} +func (m *ContainerSpec_Ulimit) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerSpec_Ulimit.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerSpec_Ulimit proto.InternalMessageInfo + // EndpointSpec defines the properties that can be configured to // access and loadbalance the service. type EndpointSpec struct { @@ -1491,6 +1532,7 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "docker.swarmkit.v1.ContainerSpec.SysctlsEntry") proto.RegisterType((*ContainerSpec_PullOptions)(nil), "docker.swarmkit.v1.ContainerSpec.PullOptions") proto.RegisterType((*ContainerSpec_DNSConfig)(nil), "docker.swarmkit.v1.ContainerSpec.DNSConfig") + proto.RegisterType((*ContainerSpec_Ulimit)(nil), "docker.swarmkit.v1.ContainerSpec.Ulimit") proto.RegisterType((*EndpointSpec)(nil), "docker.swarmkit.v1.EndpointSpec") proto.RegisterType((*NetworkSpec)(nil), "docker.swarmkit.v1.NetworkSpec") proto.RegisterType((*ClusterSpec)(nil), "docker.swarmkit.v1.ClusterSpec") @@ -1503,152 +1545,155 @@ func init() { } var fileDescriptor_6589acc608f7d4fd = []byte{ - // 2311 bytes of a gzipped FileDescriptorProto + // 2363 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x4d, 0x73, 0x1b, 0xc7, - 0xd1, 0x06, 0x48, 0x10, 0x1f, 0xbd, 0x00, 0x05, 0x8e, 0x65, 0x7b, 0x09, 0xc9, 0x20, 0x0c, 0xcb, + 0xd1, 0x06, 0x48, 0x10, 0x1f, 0xbd, 0x00, 0x05, 0x8e, 0x65, 0x7b, 0x09, 0x49, 0x20, 0x0c, 0xcb, 0x36, 0x6d, 0xd7, 0x0b, 0xd6, 0xcb, 0xb8, 0x1c, 0x7f, 0xc4, 0x49, 0x00, 0x02, 0x96, 0x60, 0x49, - 0x14, 0x6a, 0x40, 0x2b, 0x51, 0x55, 0xaa, 0x50, 0x83, 0xdd, 0x21, 0xb0, 0xe1, 0x62, 0x67, 0x33, - 0x3b, 0xa0, 0x8d, 0x5b, 0x8e, 0x2e, 0xe6, 0x92, 0x3f, 0xc0, 0x53, 0x2a, 0xa7, 0x5c, 0x92, 0x4b, - 0x7e, 0x83, 0x8f, 0x3e, 0x3a, 0x17, 0x56, 0x4c, 0xff, 0x84, 0xdc, 0x72, 0x49, 0x6a, 0x66, 0x67, - 0x17, 0x0b, 0x0a, 0x10, 0x95, 0x8a, 0x0e, 0xb9, 0xcd, 0xf4, 0x3e, 0x4f, 0x4f, 0xf7, 0x4c, 0x77, - 0x4f, 0xcf, 0xc2, 0xbb, 0x23, 0x47, 0x8c, 0xa7, 0xc3, 0x86, 0xc5, 0x26, 0x7b, 0x36, 0xb3, 0x4e, + 0x14, 0x6a, 0x40, 0x29, 0x51, 0x55, 0xaa, 0x50, 0x83, 0xdd, 0x21, 0xb0, 0xe1, 0x62, 0x67, 0x33, + 0x3b, 0xa0, 0x8d, 0x5b, 0x8e, 0x2e, 0xe5, 0x92, 0x3f, 0xc0, 0x53, 0x2a, 0xa7, 0x5c, 0x92, 0x7f, + 0x90, 0xa3, 0x8f, 0x3e, 0x3a, 0x17, 0x56, 0x4c, 0xff, 0x84, 0xdc, 0x72, 0x49, 0x6a, 0x66, 0x67, + 0x17, 0x0b, 0x0a, 0x10, 0x95, 0x8a, 0x0e, 0xb9, 0xcd, 0xf4, 0x3e, 0x4f, 0x4f, 0xcf, 0x4c, 0x77, + 0x4f, 0xf7, 0xc2, 0x7b, 0x23, 0x47, 0x8c, 0xa7, 0xc3, 0x86, 0xc5, 0x26, 0x7b, 0x36, 0xb3, 0x4e, 0x28, 0xdf, 0x0b, 0xbe, 0x24, 0x7c, 0x72, 0xe2, 0x88, 0x3d, 0xe2, 0x3b, 0x7b, 0x81, 0x4f, 0xad, - 0xa0, 0xe1, 0x73, 0x26, 0x18, 0x42, 0x21, 0xa0, 0x11, 0x01, 0x1a, 0xa7, 0xff, 0x5f, 0xb9, 0x8e, - 0x2f, 0x66, 0x3e, 0xd5, 0xfc, 0xca, 0xcd, 0x11, 0x1b, 0x31, 0x35, 0xdc, 0x93, 0x23, 0x2d, 0xad, + 0xa0, 0xe1, 0x73, 0x26, 0x18, 0x42, 0x21, 0xa0, 0x11, 0x01, 0x1a, 0xa7, 0xff, 0x5f, 0xb9, 0x8a, + 0x2f, 0x66, 0x3e, 0xd5, 0xfc, 0xca, 0xf5, 0x11, 0x1b, 0x31, 0x35, 0xdc, 0x93, 0x23, 0x2d, 0xad, 0x8e, 0x18, 0x1b, 0xb9, 0x74, 0x4f, 0xcd, 0x86, 0xd3, 0xe3, 0x3d, 0x7b, 0xca, 0x89, 0x70, 0x98, - 0xa7, 0xbf, 0x6f, 0x5f, 0xfd, 0x4e, 0xbc, 0xd9, 0x2a, 0xea, 0x97, 0x9c, 0xf8, 0x3e, 0xe5, 0x7a, - 0xc1, 0xfa, 0x79, 0x06, 0xf2, 0x87, 0xcc, 0xa6, 0x7d, 0x9f, 0x5a, 0xe8, 0x2e, 0x18, 0xc4, 0xf3, - 0x98, 0x50, 0xba, 0x03, 0x33, 0x5d, 0x4b, 0xef, 0x1a, 0xfb, 0x3b, 0x8d, 0xa7, 0x7d, 0x6a, 0x34, - 0xe7, 0xb0, 0x56, 0xe6, 0x9b, 0x8b, 0x9d, 0x14, 0x4e, 0x32, 0xd1, 0xcf, 0xa0, 0x68, 0xd3, 0xc0, - 0xe1, 0xd4, 0x1e, 0x70, 0xe6, 0x52, 0x73, 0xad, 0x96, 0xde, 0xdd, 0xdc, 0xbf, 0xbd, 0x4c, 0x93, - 0x5c, 0x1c, 0x33, 0x97, 0x62, 0x43, 0x33, 0xe4, 0x04, 0xdd, 0x05, 0x98, 0xd0, 0xc9, 0x90, 0xf2, - 0x60, 0xec, 0xf8, 0xe6, 0xba, 0xa2, 0xbf, 0xbd, 0x8a, 0x2e, 0x6d, 0x6f, 0x3c, 0x8c, 0xe1, 0x38, - 0x41, 0x45, 0x0f, 0xa1, 0x48, 0x4e, 0x89, 0xe3, 0x92, 0xa1, 0xe3, 0x3a, 0x62, 0x66, 0x66, 0x94, - 0xaa, 0x77, 0x9e, 0xa9, 0xaa, 0x99, 0x20, 0xe0, 0x05, 0x7a, 0xdd, 0x06, 0x98, 0x2f, 0x84, 0xde, - 0x82, 0x5c, 0xaf, 0x73, 0xd8, 0xee, 0x1e, 0xde, 0x2d, 0xa7, 0x2a, 0xdb, 0x67, 0xe7, 0xb5, 0x97, - 0xa5, 0x8e, 0x39, 0xa0, 0x47, 0x3d, 0xdb, 0xf1, 0x46, 0x68, 0x17, 0xf2, 0xcd, 0x83, 0x83, 0x4e, - 0xef, 0xa8, 0xd3, 0x2e, 0xa7, 0x2b, 0x95, 0xb3, 0xf3, 0xda, 0x2b, 0x8b, 0xc0, 0xa6, 0x65, 0x51, - 0x5f, 0x50, 0xbb, 0x92, 0xf9, 0xfa, 0x0f, 0xd5, 0x54, 0xfd, 0xeb, 0x34, 0x14, 0x93, 0x46, 0xa0, - 0xb7, 0x20, 0xdb, 0x3c, 0x38, 0xea, 0x3e, 0xee, 0x94, 0x53, 0x73, 0x7a, 0x12, 0xd1, 0xb4, 0x84, - 0x73, 0x4a, 0xd1, 0x1d, 0xd8, 0xe8, 0x35, 0xbf, 0xe8, 0x77, 0xca, 0xe9, 0xb9, 0x39, 0x49, 0x58, - 0x8f, 0x4c, 0x03, 0x85, 0x6a, 0xe3, 0x66, 0xf7, 0xb0, 0xbc, 0xb6, 0x1c, 0xd5, 0xe6, 0xc4, 0xf1, - 0xb4, 0x29, 0x7f, 0xda, 0x00, 0xa3, 0x4f, 0xf9, 0xa9, 0x63, 0xbd, 0xe0, 0x10, 0xf9, 0x00, 0x32, - 0x82, 0x04, 0x27, 0x2a, 0x34, 0x8c, 0xe5, 0xa1, 0x71, 0x44, 0x82, 0x13, 0xb9, 0xa8, 0xa6, 0x2b, - 0xbc, 0x8c, 0x0c, 0x4e, 0x7d, 0xd7, 0xb1, 0x88, 0xa0, 0xb6, 0x8a, 0x0c, 0x63, 0xff, 0xcd, 0x65, - 0x6c, 0x1c, 0xa3, 0xb4, 0xfd, 0xf7, 0x52, 0x38, 0x41, 0x45, 0x9f, 0x40, 0x76, 0xe4, 0xb2, 0x21, - 0x71, 0x55, 0x4c, 0x18, 0xfb, 0xaf, 0x2f, 0x53, 0x72, 0x57, 0x21, 0xe6, 0x0a, 0x34, 0x05, 0x7d, - 0x0e, 0x9b, 0x73, 0x55, 0x83, 0x5f, 0xb3, 0xa1, 0x09, 0xab, 0x95, 0xcc, 0x2d, 0xf9, 0x9c, 0x0d, - 0xef, 0xa5, 0x70, 0x89, 0x27, 0x05, 0xe8, 0xa7, 0x00, 0xa1, 0x56, 0xa5, 0xc7, 0x50, 0x7a, 0x5e, - 0x5b, 0x6d, 0x4c, 0xa8, 0xa3, 0x30, 0x8a, 0x26, 0xe8, 0x43, 0xc8, 0x4e, 0x7d, 0x9b, 0x08, 0x6a, - 0x66, 0x15, 0xb7, 0xb6, 0x8c, 0xfb, 0x85, 0x42, 0x1c, 0x30, 0xef, 0xd8, 0x19, 0x61, 0x8d, 0x47, - 0x3f, 0x81, 0x3c, 0x67, 0xae, 0x3b, 0x24, 0xd6, 0x89, 0x59, 0x78, 0x4e, 0x6e, 0xcc, 0x40, 0xf7, - 0x21, 0xef, 0x51, 0xf1, 0x25, 0xe3, 0x27, 0x81, 0x99, 0xab, 0xad, 0xef, 0x1a, 0xfb, 0xef, 0x2d, - 0x4d, 0xab, 0x10, 0xd3, 0x14, 0x82, 0x58, 0xe3, 0x09, 0xf5, 0x44, 0xa8, 0xa8, 0xb5, 0x66, 0xa6, + 0xa7, 0xbf, 0x6f, 0x5f, 0xfe, 0x4e, 0xbc, 0xd9, 0x2a, 0xea, 0x97, 0x9c, 0xf8, 0x3e, 0xe5, 0x7a, + 0xc1, 0xfa, 0x59, 0x06, 0xf2, 0x87, 0xcc, 0xa6, 0x7d, 0x9f, 0x5a, 0xe8, 0x0e, 0x18, 0xc4, 0xf3, + 0x98, 0x50, 0xba, 0x03, 0x33, 0x5d, 0x4b, 0xef, 0x1a, 0xfb, 0x3b, 0x8d, 0x67, 0xf7, 0xd4, 0x68, + 0xce, 0x61, 0xad, 0xcc, 0x37, 0xe7, 0x3b, 0x29, 0x9c, 0x64, 0xa2, 0x9f, 0x41, 0xd1, 0xa6, 0x81, + 0xc3, 0xa9, 0x3d, 0xe0, 0xcc, 0xa5, 0xe6, 0x5a, 0x2d, 0xbd, 0xbb, 0xb9, 0x7f, 0x73, 0x99, 0x26, + 0xb9, 0x38, 0x66, 0x2e, 0xc5, 0x86, 0x66, 0xc8, 0x09, 0xba, 0x03, 0x30, 0xa1, 0x93, 0x21, 0xe5, + 0xc1, 0xd8, 0xf1, 0xcd, 0x75, 0x45, 0x7f, 0x67, 0x15, 0x5d, 0xda, 0xde, 0x78, 0x10, 0xc3, 0x71, + 0x82, 0x8a, 0x1e, 0x40, 0x91, 0x9c, 0x12, 0xc7, 0x25, 0x43, 0xc7, 0x75, 0xc4, 0xcc, 0xcc, 0x28, + 0x55, 0xef, 0x3e, 0x57, 0x55, 0x33, 0x41, 0xc0, 0x0b, 0xf4, 0xba, 0x0d, 0x30, 0x5f, 0x08, 0xbd, + 0x0d, 0xb9, 0x5e, 0xe7, 0xb0, 0xdd, 0x3d, 0xbc, 0x53, 0x4e, 0x55, 0xb6, 0x9f, 0x9e, 0xd5, 0x5e, + 0x95, 0x3a, 0xe6, 0x80, 0x1e, 0xf5, 0x6c, 0xc7, 0x1b, 0xa1, 0x5d, 0xc8, 0x37, 0x0f, 0x0e, 0x3a, + 0xbd, 0xa3, 0x4e, 0xbb, 0x9c, 0xae, 0x54, 0x9e, 0x9e, 0xd5, 0x5e, 0x5b, 0x04, 0x36, 0x2d, 0x8b, + 0xfa, 0x82, 0xda, 0x95, 0xcc, 0xd7, 0x7f, 0xa8, 0xa6, 0xea, 0x5f, 0xa7, 0xa1, 0x98, 0x34, 0x02, + 0xbd, 0x0d, 0xd9, 0xe6, 0xc1, 0x51, 0xf7, 0x71, 0xa7, 0x9c, 0x9a, 0xd3, 0x93, 0x88, 0xa6, 0x25, + 0x9c, 0x53, 0x8a, 0x6e, 0xc3, 0x46, 0xaf, 0xf9, 0xa8, 0xdf, 0x29, 0xa7, 0xe7, 0xe6, 0x24, 0x61, + 0x3d, 0x32, 0x0d, 0x14, 0xaa, 0x8d, 0x9b, 0xdd, 0xc3, 0xf2, 0xda, 0x72, 0x54, 0x9b, 0x13, 0xc7, + 0xd3, 0xa6, 0xfc, 0x69, 0x03, 0x8c, 0x3e, 0xe5, 0xa7, 0x8e, 0xf5, 0x92, 0x5d, 0xe4, 0x43, 0xc8, + 0x08, 0x12, 0x9c, 0x28, 0xd7, 0x30, 0x96, 0xbb, 0xc6, 0x11, 0x09, 0x4e, 0xe4, 0xa2, 0x9a, 0xae, + 0xf0, 0xd2, 0x33, 0x38, 0xf5, 0x5d, 0xc7, 0x22, 0x82, 0xda, 0xca, 0x33, 0x8c, 0xfd, 0xb7, 0x96, + 0xb1, 0x71, 0x8c, 0xd2, 0xf6, 0xdf, 0x4d, 0xe1, 0x04, 0x15, 0x7d, 0x0a, 0xd9, 0x91, 0xcb, 0x86, + 0xc4, 0x55, 0x3e, 0x61, 0xec, 0xbf, 0xb1, 0x4c, 0xc9, 0x1d, 0x85, 0x98, 0x2b, 0xd0, 0x14, 0xf4, + 0x05, 0x6c, 0xce, 0x55, 0x0d, 0x7e, 0xcd, 0x86, 0x26, 0xac, 0x56, 0x32, 0xb7, 0xe4, 0x0b, 0x36, + 0xbc, 0x9b, 0xc2, 0x25, 0x9e, 0x14, 0xa0, 0x9f, 0x02, 0x84, 0x5a, 0x95, 0x1e, 0x43, 0xe9, 0xb9, + 0xb5, 0xda, 0x98, 0x50, 0x47, 0x61, 0x14, 0x4d, 0xd0, 0x47, 0x90, 0x9d, 0xfa, 0x36, 0x11, 0xd4, + 0xcc, 0x2a, 0x6e, 0x6d, 0x19, 0xf7, 0x91, 0x42, 0x1c, 0x30, 0xef, 0xd8, 0x19, 0x61, 0x8d, 0x47, + 0x3f, 0x81, 0x3c, 0x67, 0xae, 0x3b, 0x24, 0xd6, 0x89, 0x59, 0x78, 0x41, 0x6e, 0xcc, 0x40, 0xf7, + 0x20, 0xef, 0x51, 0xf1, 0x25, 0xe3, 0x27, 0x81, 0x99, 0xab, 0xad, 0xef, 0x1a, 0xfb, 0xef, 0x2f, + 0x0d, 0xab, 0x10, 0xd3, 0x14, 0x82, 0x58, 0xe3, 0x09, 0xf5, 0x44, 0xa8, 0xa8, 0xb5, 0x66, 0xa6, 0x71, 0xac, 0x40, 0x9a, 0x42, 0x3d, 0xdb, 0x67, 0x8e, 0x27, 0xcc, 0xfc, 0x6a, 0x53, 0x3a, 0x1a, - 0x23, 0xc3, 0x02, 0xc7, 0x8c, 0x56, 0x16, 0x32, 0x13, 0x66, 0xd3, 0xfa, 0x1e, 0x6c, 0x3d, 0x75, - 0xec, 0xa8, 0x02, 0x79, 0xbd, 0xe1, 0x61, 0xbc, 0x66, 0x70, 0x3c, 0xaf, 0xdf, 0x80, 0xd2, 0xc2, - 0x11, 0xd7, 0x2d, 0x28, 0x2d, 0x1c, 0x17, 0x7a, 0x13, 0x36, 0x27, 0xe4, 0xab, 0x81, 0xc5, 0x3c, - 0x6b, 0xca, 0x39, 0xf5, 0x84, 0xd6, 0x51, 0x9a, 0x90, 0xaf, 0x0e, 0x62, 0x21, 0x7a, 0x0f, 0xb6, - 0x04, 0x13, 0xc4, 0x1d, 0x58, 0x6c, 0xe2, 0xbb, 0x34, 0xcc, 0x8e, 0x35, 0x85, 0x2c, 0xab, 0x0f, - 0x07, 0x73, 0x79, 0xdd, 0x80, 0x42, 0x7c, 0x96, 0xf5, 0x3f, 0x6f, 0x40, 0x3e, 0x8a, 0x74, 0x74, - 0x1f, 0x80, 0xc4, 0x1b, 0xa5, 0x37, 0xe2, 0x9d, 0xe7, 0xda, 0x55, 0x49, 0x97, 0x11, 0x3e, 0xa7, - 0xa3, 0x26, 0x14, 0x2c, 0xe6, 0x09, 0xe2, 0x78, 0x94, 0xeb, 0x4c, 0x5d, 0x1a, 0x9f, 0x07, 0x11, - 0x48, 0xeb, 0x98, 0xb3, 0x50, 0x0b, 0x72, 0x23, 0xea, 0x51, 0xee, 0x58, 0x3a, 0xc0, 0xdf, 0x5a, - 0x1a, 0x98, 0x21, 0x04, 0x4f, 0x3d, 0xe1, 0x4c, 0xa8, 0xd6, 0x12, 0x11, 0xd1, 0x67, 0x50, 0xe0, - 0x34, 0x60, 0x53, 0x6e, 0xd1, 0x40, 0xa7, 0xfb, 0xee, 0xf2, 0x34, 0x09, 0x41, 0x98, 0xfe, 0x66, - 0xea, 0x70, 0x2a, 0x5d, 0x08, 0xf0, 0x9c, 0x8a, 0x3e, 0x81, 0x1c, 0xa7, 0x81, 0x20, 0x5c, 0x3c, - 0x2b, 0x63, 0x71, 0x08, 0xe9, 0x31, 0xd7, 0xb1, 0x66, 0x38, 0x62, 0xa0, 0x4f, 0xa0, 0xe0, 0xbb, - 0xc4, 0x52, 0x5a, 0xcd, 0x8d, 0xd5, 0x39, 0xd6, 0x8b, 0x40, 0x78, 0x8e, 0x47, 0x1f, 0x01, 0xb8, - 0x6c, 0x34, 0xb0, 0xb9, 0x73, 0x4a, 0xb9, 0xce, 0xb2, 0xca, 0x32, 0x76, 0x5b, 0x21, 0x70, 0xc1, - 0x65, 0xa3, 0x70, 0x88, 0xee, 0xfe, 0x57, 0x49, 0x92, 0x48, 0x90, 0xd7, 0xa1, 0x78, 0xcc, 0xb8, - 0x45, 0x07, 0x3a, 0xd7, 0x0b, 0x2a, 0xb6, 0x0c, 0x25, 0x0b, 0x13, 0x14, 0xfd, 0x0a, 0x5e, 0x8a, - 0x76, 0x6b, 0xc0, 0xe9, 0x31, 0xe5, 0xd4, 0x93, 0x5b, 0x6e, 0xa8, 0x65, 0xdf, 0x7c, 0xf6, 0x96, - 0x6b, 0xb4, 0x2e, 0xb5, 0x88, 0x5f, 0xfd, 0x10, 0xb4, 0x0a, 0x90, 0xe3, 0xe1, 0x01, 0xd7, 0x7f, - 0x97, 0x96, 0x79, 0x76, 0x05, 0x81, 0xf6, 0xc0, 0x88, 0x97, 0x77, 0x6c, 0x15, 0x70, 0x85, 0xd6, - 0xe6, 0xe5, 0xc5, 0x0e, 0x44, 0xd8, 0x6e, 0x5b, 0x56, 0x60, 0x3d, 0xb6, 0x51, 0x07, 0x4a, 0x31, - 0x41, 0x36, 0x41, 0xba, 0x4d, 0xa8, 0x3d, 0xcb, 0xd2, 0xa3, 0x99, 0x4f, 0x71, 0x91, 0x27, 0x66, - 0xf5, 0x5f, 0x02, 0x7a, 0x3a, 0x00, 0x11, 0x82, 0xcc, 0x89, 0xe3, 0x69, 0x33, 0xb0, 0x1a, 0xa3, - 0x06, 0xe4, 0x7c, 0x32, 0x73, 0x19, 0xb1, 0x75, 0x1c, 0xde, 0x6c, 0x84, 0xed, 0x51, 0x23, 0x6a, - 0x8f, 0x1a, 0x4d, 0x6f, 0x86, 0x23, 0x50, 0xfd, 0x3e, 0xbc, 0xbc, 0x34, 0xcf, 0xd0, 0x3e, 0x14, - 0xe3, 0x1c, 0x99, 0xfb, 0x7a, 0xe3, 0xf2, 0x62, 0xc7, 0x88, 0x93, 0xa9, 0xdb, 0xc6, 0x46, 0x0c, - 0xea, 0xda, 0xf5, 0xbf, 0x96, 0xa0, 0xb4, 0x90, 0x69, 0xe8, 0x26, 0x6c, 0x38, 0x13, 0x32, 0xa2, - 0xda, 0xc6, 0x70, 0x82, 0x3a, 0x90, 0x75, 0xc9, 0x90, 0xba, 0x32, 0x57, 0xe4, 0xc1, 0xfd, 0xdf, - 0xb5, 0x29, 0xdb, 0x78, 0xa0, 0xf0, 0x1d, 0x4f, 0xf0, 0x19, 0xd6, 0x64, 0x64, 0x42, 0xce, 0x62, - 0x93, 0x09, 0xf1, 0xe4, 0x25, 0xb9, 0xbe, 0x5b, 0xc0, 0xd1, 0x54, 0xee, 0x0c, 0xe1, 0xa3, 0xc0, - 0xcc, 0x28, 0xb1, 0x1a, 0xcb, 0x1a, 0x39, 0x66, 0x81, 0xf0, 0xc8, 0x84, 0x9a, 0x9b, 0xca, 0x9a, - 0x78, 0x8e, 0xca, 0xb0, 0x4e, 0xbd, 0x53, 0x73, 0x43, 0xc1, 0xe5, 0x50, 0x4a, 0x6c, 0x27, 0x4c, - 0x84, 0x02, 0x96, 0x43, 0xa9, 0x73, 0x1a, 0x50, 0x6e, 0xe6, 0xc2, 0xdd, 0x96, 0x63, 0xf4, 0x0a, - 0x64, 0x47, 0x9c, 0x4d, 0xfd, 0x30, 0x02, 0x0b, 0x58, 0xcf, 0xe4, 0x7d, 0xe7, 0x73, 0xe7, 0xd4, - 0x71, 0xe9, 0x88, 0x06, 0xe6, 0x2b, 0xea, 0x20, 0xaa, 0x4b, 0x73, 0x31, 0x46, 0xe1, 0x04, 0x03, - 0x35, 0x20, 0xe3, 0x78, 0x8e, 0x30, 0x5f, 0xd5, 0x79, 0x78, 0xf5, 0x08, 0x5b, 0x8c, 0xb9, 0x8f, - 0x89, 0x3b, 0xa5, 0x58, 0xe1, 0xd0, 0x36, 0xac, 0x0b, 0x31, 0x33, 0x4b, 0xb5, 0xf4, 0x6e, 0xbe, - 0x95, 0xbb, 0xbc, 0xd8, 0x59, 0x3f, 0x3a, 0x7a, 0x82, 0xa5, 0x0c, 0xbd, 0x06, 0xc0, 0x7c, 0xea, - 0x0d, 0x02, 0x61, 0x3b, 0x9e, 0x89, 0x24, 0x02, 0x17, 0xa4, 0xa4, 0x2f, 0x05, 0xe8, 0x96, 0xac, - 0x5c, 0xc4, 0x1e, 0x30, 0xcf, 0x9d, 0x99, 0x2f, 0xa9, 0xaf, 0x79, 0x29, 0x78, 0xe4, 0xb9, 0x33, - 0xb4, 0x03, 0x46, 0x20, 0x98, 0x3f, 0x08, 0x9c, 0x91, 0x47, 0x5c, 0xf3, 0xa6, 0xf2, 0x1c, 0xa4, - 0xa8, 0xaf, 0x24, 0xe8, 0xc7, 0x90, 0x9d, 0xb0, 0xa9, 0x27, 0x02, 0x33, 0xaf, 0x0e, 0x72, 0x7b, - 0x99, 0x8f, 0x0f, 0x25, 0x42, 0x67, 0x9d, 0x86, 0xa3, 0x0e, 0x6c, 0x29, 0xcd, 0x23, 0x4e, 0x2c, - 0x3a, 0xf0, 0x29, 0x77, 0x98, 0xad, 0xef, 0xe7, 0xed, 0xa7, 0xbc, 0x6d, 0xeb, 0xa7, 0x00, 0xbe, - 0x21, 0x39, 0x77, 0x25, 0xa5, 0xa7, 0x18, 0xa8, 0x07, 0x45, 0x7f, 0xea, 0xba, 0x03, 0xe6, 0x87, - 0xb7, 0x51, 0x58, 0xc0, 0x9f, 0x23, 0x9c, 0x7a, 0x53, 0xd7, 0x7d, 0x14, 0x92, 0xb0, 0xe1, 0xcf, - 0x27, 0xe8, 0x53, 0xc8, 0x05, 0xd4, 0xe2, 0x54, 0x04, 0x66, 0x51, 0xb9, 0xf4, 0xc6, 0x32, 0x65, - 0x7d, 0x05, 0x89, 0xeb, 0x02, 0x8e, 0x38, 0x92, 0x6e, 0xa9, 0xb2, 0x16, 0x98, 0x2f, 0xaf, 0xa6, - 0xeb, 0xca, 0x37, 0xa7, 0x6b, 0x8e, 0x4c, 0x17, 0x19, 0x93, 0x81, 0xb9, 0xa5, 0xc2, 0x29, 0x9c, - 0xa0, 0x27, 0x00, 0xb6, 0x17, 0x0c, 0x42, 0x90, 0x79, 0x43, 0xf9, 0xf8, 0xde, 0xf5, 0x3e, 0xb6, - 0x0f, 0xfb, 0xba, 0x0f, 0x29, 0x5d, 0x5e, 0xec, 0x14, 0xe2, 0x29, 0x2e, 0xd8, 0x5e, 0x10, 0x0e, - 0x51, 0x0b, 0x8c, 0x31, 0x25, 0xae, 0x18, 0x5b, 0x63, 0x6a, 0x9d, 0x98, 0xe5, 0xd5, 0x6d, 0xc9, - 0x3d, 0x05, 0xd3, 0x1a, 0x92, 0x24, 0xd4, 0x85, 0x82, 0x13, 0x30, 0x57, 0x1d, 0x91, 0x69, 0xaa, - 0xfa, 0xf6, 0x1c, 0xd6, 0x75, 0x23, 0x0a, 0x9e, 0xb3, 0xd1, 0x6d, 0x28, 0xf8, 0x8e, 0x1d, 0x3c, - 0x70, 0x26, 0x8e, 0x30, 0xb7, 0x6b, 0xe9, 0xdd, 0x75, 0x3c, 0x17, 0xa0, 0x7b, 0x90, 0x0b, 0x66, - 0x81, 0x25, 0xdc, 0xc0, 0xac, 0xa8, 0xcd, 0x6d, 0x5c, 0xbf, 0x4c, 0x3f, 0x24, 0x84, 0x85, 0x23, - 0xa2, 0xcb, 0x8e, 0xc7, 0x22, 0xbe, 0x7e, 0x0b, 0x0c, 0x88, 0x6d, 0x9b, 0xb7, 0xd4, 0x86, 0x97, - 0xe6, 0xd2, 0xa6, 0x6d, 0xa3, 0xb7, 0xe1, 0x46, 0x02, 0x66, 0x73, 0xe6, 0x9b, 0xb7, 0x15, 0x2e, - 0xc1, 0x6e, 0x73, 0xe6, 0x57, 0x3e, 0x02, 0x23, 0x51, 0xa0, 0x64, 0xf1, 0x38, 0xa1, 0x33, 0x5d, - 0xf3, 0xe4, 0x50, 0x1e, 0xec, 0xa9, 0xcc, 0x57, 0x55, 0x94, 0x0b, 0x38, 0x9c, 0x7c, 0xbc, 0xf6, - 0x61, 0xba, 0xb2, 0x0f, 0x46, 0x22, 0x18, 0xd1, 0x1b, 0xf2, 0xc2, 0x18, 0x39, 0x81, 0xe0, 0xb3, - 0x01, 0x99, 0x8a, 0xb1, 0xf9, 0x73, 0x45, 0x28, 0x46, 0xc2, 0xe6, 0x54, 0x8c, 0x2b, 0x03, 0x98, - 0x9f, 0x26, 0xaa, 0x81, 0x21, 0x6b, 0x58, 0x40, 0xf9, 0x29, 0xe5, 0xb2, 0xfd, 0x93, 0x06, 0x26, - 0x45, 0xb2, 0x4a, 0x05, 0x94, 0x70, 0x6b, 0xac, 0xca, 0x6d, 0x01, 0xeb, 0x99, 0xac, 0x9f, 0x51, - 0xe2, 0xe8, 0xfa, 0xa9, 0xa7, 0x95, 0x8f, 0xa1, 0x98, 0xdc, 0xb8, 0xff, 0xc4, 0xa1, 0xfa, 0x5f, - 0xd2, 0x50, 0x88, 0x0f, 0x17, 0xbd, 0x0f, 0x5b, 0xdd, 0xfe, 0xa3, 0x07, 0xcd, 0xa3, 0xee, 0xa3, - 0xc3, 0x41, 0xbb, 0xf3, 0x59, 0xf3, 0x8b, 0x07, 0x47, 0xe5, 0x54, 0xe5, 0xb5, 0xb3, 0xf3, 0xda, - 0xf6, 0xfc, 0x1e, 0x89, 0xe0, 0x6d, 0x7a, 0x4c, 0xa6, 0xae, 0x58, 0x64, 0xf5, 0xf0, 0xa3, 0x83, - 0x4e, 0xbf, 0x5f, 0x4e, 0xaf, 0x62, 0xf5, 0x38, 0xb3, 0x68, 0x10, 0xa0, 0x7d, 0x28, 0xcf, 0x59, - 0xf7, 0x9e, 0xf4, 0x3a, 0xf8, 0x71, 0x79, 0xad, 0x72, 0xfb, 0xec, 0xbc, 0x66, 0x3e, 0x4d, 0xba, - 0x37, 0xf3, 0x29, 0x7f, 0xac, 0x9f, 0x80, 0xff, 0x48, 0x43, 0x31, 0xd9, 0x77, 0xa3, 0x83, 0xb0, - 0xdb, 0x56, 0x1e, 0x6f, 0xee, 0xef, 0x5d, 0xd7, 0xa7, 0xab, 0xbb, 0xdb, 0x9d, 0x4a, 0xbd, 0x0f, - 0xe5, 0x63, 0x5f, 0x91, 0xd1, 0xfb, 0xb0, 0xe1, 0x33, 0x2e, 0xa2, 0x5b, 0x6e, 0xf9, 0x05, 0xc0, - 0x78, 0xd4, 0x08, 0x85, 0xe0, 0xfa, 0x18, 0x36, 0x17, 0xb5, 0xa1, 0x3b, 0xb0, 0xfe, 0xb8, 0xdb, - 0x2b, 0xa7, 0x2a, 0xb7, 0xce, 0xce, 0x6b, 0xaf, 0x2e, 0x7e, 0x7c, 0xec, 0x70, 0x31, 0x25, 0x6e, - 0xb7, 0x87, 0xde, 0x85, 0x8d, 0xf6, 0x61, 0x1f, 0xe3, 0x72, 0xba, 0xb2, 0x73, 0x76, 0x5e, 0xbb, - 0xb5, 0x88, 0x93, 0x9f, 0xd8, 0xd4, 0xb3, 0x31, 0x1b, 0xc6, 0x0f, 0xdf, 0x7f, 0xae, 0x81, 0xa1, - 0x2f, 0xff, 0x17, 0xfd, 0x6f, 0xa4, 0x14, 0x36, 0x92, 0x51, 0xcd, 0x5a, 0xbb, 0xb6, 0x9f, 0x2c, - 0x86, 0x04, 0x1d, 0xd3, 0xaf, 0x43, 0xd1, 0xf1, 0x4f, 0x3f, 0x18, 0x50, 0x8f, 0x0c, 0x5d, 0xfd, - 0x06, 0xce, 0x63, 0x43, 0xca, 0x3a, 0xa1, 0x48, 0x5e, 0xe7, 0x8e, 0x27, 0x28, 0xf7, 0xf4, 0xeb, - 0x36, 0x8f, 0xe3, 0x39, 0xfa, 0x14, 0x32, 0x8e, 0x4f, 0x26, 0xba, 0x09, 0x5e, 0xea, 0x41, 0xb7, - 0xd7, 0x7c, 0xa8, 0x73, 0xae, 0x95, 0xbf, 0xbc, 0xd8, 0xc9, 0x48, 0x01, 0x56, 0x34, 0x54, 0x8d, - 0x5e, 0x28, 0x72, 0x25, 0xd5, 0x02, 0xe4, 0x71, 0x42, 0x22, 0xf3, 0xc6, 0xf1, 0x46, 0x9c, 0x06, - 0x81, 0x6a, 0x06, 0xf2, 0x38, 0x9a, 0xa2, 0x0a, 0xe4, 0x74, 0x37, 0xab, 0x1e, 0x36, 0x05, 0xf9, - 0x46, 0xd0, 0x82, 0x56, 0x09, 0x8c, 0x70, 0x37, 0x06, 0xc7, 0x9c, 0x4d, 0xea, 0xff, 0xca, 0x80, - 0x71, 0xe0, 0x4e, 0x03, 0xa1, 0x3b, 0xa5, 0x17, 0xb6, 0xf9, 0x4f, 0x60, 0x8b, 0xa8, 0x7f, 0x2d, - 0xc4, 0x93, 0x57, 0xab, 0x7a, 0x24, 0xe8, 0x03, 0xb8, 0xb3, 0x54, 0x5d, 0x0c, 0x0e, 0x1f, 0x14, - 0xad, 0xac, 0xd4, 0x69, 0xa6, 0x71, 0x99, 0x5c, 0xf9, 0x82, 0xfa, 0x50, 0x62, 0xdc, 0x1a, 0xd3, - 0x40, 0x84, 0x17, 0xb2, 0xfe, 0x37, 0xb1, 0xf4, 0xaf, 0xd5, 0xa3, 0x24, 0x50, 0xdf, 0x43, 0xa1, - 0xb5, 0x8b, 0x3a, 0xd0, 0x87, 0x90, 0xe1, 0xe4, 0x38, 0x7a, 0xf0, 0x2c, 0x4d, 0x12, 0x4c, 0x8e, - 0xc5, 0x82, 0x0a, 0xc5, 0x40, 0x9f, 0x03, 0xd8, 0x4e, 0xe0, 0x13, 0x61, 0x8d, 0x29, 0xd7, 0x87, - 0xbd, 0xd4, 0xc5, 0x76, 0x8c, 0x5a, 0xd0, 0x92, 0x60, 0xa3, 0xfb, 0x50, 0xb0, 0x48, 0x14, 0xae, - 0xd9, 0xd5, 0x3f, 0x6c, 0x0e, 0x9a, 0x5a, 0x45, 0x59, 0xaa, 0xb8, 0xbc, 0xd8, 0xc9, 0x47, 0x12, - 0x9c, 0xb7, 0x88, 0x0e, 0xdf, 0xfb, 0x50, 0x12, 0x24, 0x38, 0x19, 0xd8, 0x61, 0x39, 0x0b, 0xc3, - 0x64, 0xc5, 0xbd, 0x2a, 0xdf, 0xc5, 0xba, 0xec, 0x45, 0xc7, 0x59, 0x14, 0x09, 0x19, 0xfa, 0x05, - 0x6c, 0x51, 0xcf, 0xe2, 0x33, 0x15, 0xac, 0x91, 0x85, 0xf9, 0xd5, 0xce, 0x76, 0x62, 0xf0, 0x82, - 0xb3, 0x65, 0x7a, 0x45, 0x5e, 0xff, 0x5b, 0x1a, 0x20, 0x6c, 0x64, 0x5e, 0x6c, 0x00, 0x22, 0xc8, - 0xd8, 0x44, 0x10, 0x15, 0x73, 0x45, 0xac, 0xc6, 0xe8, 0x63, 0x00, 0x41, 0x27, 0xbe, 0x2c, 0xbd, - 0xde, 0x48, 0x87, 0xcd, 0xb3, 0xca, 0x41, 0x02, 0x8d, 0xf6, 0x21, 0xab, 0x9f, 0xa5, 0x99, 0x6b, - 0x79, 0x1a, 0x59, 0xff, 0x63, 0x1a, 0x20, 0x74, 0xf3, 0x7f, 0xda, 0xb7, 0xd6, 0x9d, 0x6f, 0xbe, - 0xaf, 0xa6, 0xbe, 0xfb, 0xbe, 0x9a, 0xfa, 0xed, 0x65, 0x35, 0xfd, 0xcd, 0x65, 0x35, 0xfd, 0xed, - 0x65, 0x35, 0xfd, 0xf7, 0xcb, 0x6a, 0xfa, 0xf7, 0x3f, 0x54, 0x53, 0xdf, 0xfe, 0x50, 0x4d, 0x7d, - 0xf7, 0x43, 0x35, 0x35, 0xcc, 0xaa, 0x56, 0xf8, 0x47, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x92, - 0xc4, 0x34, 0xad, 0xa7, 0x17, 0x00, 0x00, + 0x23, 0xdd, 0x02, 0xc7, 0x8c, 0x56, 0x16, 0x32, 0x13, 0x66, 0xd3, 0xfa, 0x1e, 0x6c, 0x3d, 0x73, + 0xed, 0xa8, 0x02, 0x79, 0x7d, 0xe0, 0xa1, 0xbf, 0x66, 0x70, 0x3c, 0xaf, 0x5f, 0x83, 0xd2, 0xc2, + 0x15, 0xd7, 0x2d, 0x28, 0x2d, 0x5c, 0x17, 0x7a, 0x0b, 0x36, 0x27, 0xe4, 0xab, 0x81, 0xc5, 0x3c, + 0x6b, 0xca, 0x39, 0xf5, 0x84, 0xd6, 0x51, 0x9a, 0x90, 0xaf, 0x0e, 0x62, 0x21, 0x7a, 0x1f, 0xb6, + 0x04, 0x13, 0xc4, 0x1d, 0x58, 0x6c, 0xe2, 0xbb, 0x34, 0x8c, 0x8e, 0x35, 0x85, 0x2c, 0xab, 0x0f, + 0x07, 0x73, 0x79, 0xdd, 0x80, 0x42, 0x7c, 0x97, 0xf5, 0x3f, 0x6f, 0x40, 0x3e, 0xf2, 0x74, 0x74, + 0x0f, 0x80, 0xc4, 0x07, 0xa5, 0x0f, 0xe2, 0xdd, 0x17, 0x3a, 0x55, 0x49, 0x97, 0x1e, 0x3e, 0xa7, + 0xa3, 0x26, 0x14, 0x2c, 0xe6, 0x09, 0xe2, 0x78, 0x94, 0xeb, 0x48, 0x5d, 0xea, 0x9f, 0x07, 0x11, + 0x48, 0xeb, 0x98, 0xb3, 0x50, 0x0b, 0x72, 0x23, 0xea, 0x51, 0xee, 0x58, 0xda, 0xc1, 0xdf, 0x5e, + 0xea, 0x98, 0x21, 0x04, 0x4f, 0x3d, 0xe1, 0x4c, 0xa8, 0xd6, 0x12, 0x11, 0xd1, 0xe7, 0x50, 0xe0, + 0x34, 0x60, 0x53, 0x6e, 0xd1, 0x40, 0x87, 0xfb, 0xee, 0xf2, 0x30, 0x09, 0x41, 0x98, 0xfe, 0x66, + 0xea, 0x70, 0x2a, 0xb7, 0x10, 0xe0, 0x39, 0x15, 0x7d, 0x0a, 0x39, 0x4e, 0x03, 0x41, 0xb8, 0x78, + 0x5e, 0xc4, 0xe2, 0x10, 0xd2, 0x63, 0xae, 0x63, 0xcd, 0x70, 0xc4, 0x40, 0x9f, 0x42, 0xc1, 0x77, + 0x89, 0xa5, 0xb4, 0x9a, 0x1b, 0xab, 0x63, 0xac, 0x17, 0x81, 0xf0, 0x1c, 0x8f, 0x3e, 0x06, 0x70, + 0xd9, 0x68, 0x60, 0x73, 0xe7, 0x94, 0x72, 0x1d, 0x65, 0x95, 0x65, 0xec, 0xb6, 0x42, 0xe0, 0x82, + 0xcb, 0x46, 0xe1, 0x10, 0xdd, 0xf9, 0xaf, 0x82, 0x24, 0x11, 0x20, 0x6f, 0x40, 0xf1, 0x98, 0x71, + 0x8b, 0x0e, 0x74, 0xac, 0x17, 0x94, 0x6f, 0x19, 0x4a, 0x16, 0x06, 0x28, 0xfa, 0x15, 0xbc, 0x12, + 0x9d, 0xd6, 0x80, 0xd3, 0x63, 0xca, 0xa9, 0x27, 0x8f, 0xdc, 0x50, 0xcb, 0xbe, 0xf5, 0xfc, 0x23, + 0xd7, 0x68, 0x9d, 0x6a, 0x11, 0xbf, 0xfc, 0x21, 0x68, 0x15, 0x20, 0xc7, 0xc3, 0x0b, 0xae, 0xff, + 0x2e, 0x2d, 0xe3, 0xec, 0x12, 0x02, 0xed, 0x81, 0x11, 0x2f, 0xef, 0xd8, 0xca, 0xe1, 0x0a, 0xad, + 0xcd, 0x8b, 0xf3, 0x1d, 0x88, 0xb0, 0xdd, 0xb6, 0xcc, 0xc0, 0x7a, 0x6c, 0xa3, 0x0e, 0x94, 0x62, + 0x82, 0x2c, 0x82, 0x74, 0x99, 0x50, 0x7b, 0x9e, 0xa5, 0x47, 0x33, 0x9f, 0xe2, 0x22, 0x4f, 0xcc, + 0xea, 0xbf, 0x04, 0xf4, 0xac, 0x03, 0x22, 0x04, 0x99, 0x13, 0xc7, 0xd3, 0x66, 0x60, 0x35, 0x46, + 0x0d, 0xc8, 0xf9, 0x64, 0xe6, 0x32, 0x62, 0x6b, 0x3f, 0xbc, 0xde, 0x08, 0xcb, 0xa3, 0x46, 0x54, + 0x1e, 0x35, 0x9a, 0xde, 0x0c, 0x47, 0xa0, 0xfa, 0x3d, 0x78, 0x75, 0x69, 0x9c, 0xa1, 0x7d, 0x28, + 0xc6, 0x31, 0x32, 0xdf, 0xeb, 0xb5, 0x8b, 0xf3, 0x1d, 0x23, 0x0e, 0xa6, 0x6e, 0x1b, 0x1b, 0x31, + 0xa8, 0x6b, 0xd7, 0xff, 0xba, 0x09, 0xa5, 0x85, 0x48, 0x43, 0xd7, 0x61, 0xc3, 0x99, 0x90, 0x11, + 0xd5, 0x36, 0x86, 0x13, 0xd4, 0x81, 0xac, 0x4b, 0x86, 0xd4, 0x95, 0xb1, 0x22, 0x2f, 0xee, 0xff, + 0xae, 0x0c, 0xd9, 0xc6, 0x7d, 0x85, 0xef, 0x78, 0x82, 0xcf, 0xb0, 0x26, 0x23, 0x13, 0x72, 0x16, + 0x9b, 0x4c, 0x88, 0x27, 0x1f, 0xc9, 0xf5, 0xdd, 0x02, 0x8e, 0xa6, 0xf2, 0x64, 0x08, 0x1f, 0x05, + 0x66, 0x46, 0x89, 0xd5, 0x58, 0xe6, 0xc8, 0x31, 0x0b, 0x84, 0x47, 0x26, 0xd4, 0xdc, 0x54, 0xd6, + 0xc4, 0x73, 0x54, 0x86, 0x75, 0xea, 0x9d, 0x9a, 0x1b, 0x0a, 0x2e, 0x87, 0x52, 0x62, 0x3b, 0x61, + 0x20, 0x14, 0xb0, 0x1c, 0x4a, 0x9d, 0xd3, 0x80, 0x72, 0x33, 0x17, 0x9e, 0xb6, 0x1c, 0xa3, 0xd7, + 0x20, 0x3b, 0xe2, 0x6c, 0xea, 0x87, 0x1e, 0x58, 0xc0, 0x7a, 0x26, 0xdf, 0x3b, 0x9f, 0x3b, 0xa7, + 0x8e, 0x4b, 0x47, 0x34, 0x30, 0x5f, 0x53, 0x17, 0x51, 0x5d, 0x1a, 0x8b, 0x31, 0x0a, 0x27, 0x18, + 0xa8, 0x01, 0x19, 0xc7, 0x73, 0x84, 0xf9, 0xba, 0x8e, 0xc3, 0xcb, 0x57, 0xd8, 0x62, 0xcc, 0x7d, + 0x4c, 0xdc, 0x29, 0xc5, 0x0a, 0x87, 0xb6, 0x61, 0x5d, 0x88, 0x99, 0x59, 0xaa, 0xa5, 0x77, 0xf3, + 0xad, 0xdc, 0xc5, 0xf9, 0xce, 0xfa, 0xd1, 0xd1, 0x13, 0x2c, 0x65, 0xe8, 0x16, 0x00, 0xf3, 0xa9, + 0x37, 0x08, 0x84, 0xed, 0x78, 0x26, 0x92, 0x08, 0x5c, 0x90, 0x92, 0xbe, 0x14, 0xa0, 0x1b, 0x32, + 0x73, 0x11, 0x7b, 0xc0, 0x3c, 0x77, 0x66, 0xbe, 0xa2, 0xbe, 0xe6, 0xa5, 0xe0, 0xa1, 0xe7, 0xce, + 0xd0, 0x0e, 0x18, 0x81, 0x60, 0xfe, 0x20, 0x70, 0x46, 0x1e, 0x71, 0xcd, 0xeb, 0x6a, 0xe7, 0x20, + 0x45, 0x7d, 0x25, 0x41, 0x3f, 0x86, 0xec, 0x84, 0x4d, 0x3d, 0x11, 0x98, 0x79, 0x75, 0x91, 0xdb, + 0xcb, 0xf6, 0xf8, 0x40, 0x22, 0x74, 0xd4, 0x69, 0x38, 0xea, 0xc0, 0x96, 0xd2, 0x3c, 0xe2, 0xc4, + 0xa2, 0x03, 0x9f, 0x72, 0x87, 0xd9, 0xfa, 0x7d, 0xde, 0x7e, 0x66, 0xb7, 0x6d, 0xdd, 0x0a, 0xe0, + 0x6b, 0x92, 0x73, 0x47, 0x52, 0x7a, 0x8a, 0x81, 0x7a, 0x50, 0xf4, 0xa7, 0xae, 0x3b, 0x60, 0x7e, + 0xf8, 0x1a, 0x85, 0x09, 0xfc, 0x05, 0xdc, 0xa9, 0x37, 0x75, 0xdd, 0x87, 0x21, 0x09, 0x1b, 0xfe, + 0x7c, 0x82, 0x3e, 0x83, 0x5c, 0x40, 0x2d, 0x4e, 0x45, 0x60, 0x16, 0xd5, 0x96, 0xde, 0x5c, 0xa6, + 0xac, 0xaf, 0x20, 0x71, 0x5e, 0xc0, 0x11, 0x47, 0xd2, 0x2d, 0x95, 0xd6, 0x02, 0xf3, 0xd5, 0xd5, + 0x74, 0x9d, 0xf9, 0xe6, 0x74, 0xcd, 0x91, 0xe1, 0x22, 0x7d, 0x32, 0x30, 0xb7, 0x94, 0x3b, 0x85, + 0x13, 0xf4, 0x04, 0xc0, 0xf6, 0x82, 0x41, 0x08, 0x32, 0xaf, 0xa9, 0x3d, 0xbe, 0x7f, 0xf5, 0x1e, + 0xdb, 0x87, 0x7d, 0x5d, 0x87, 0x94, 0x2e, 0xce, 0x77, 0x0a, 0xf1, 0x14, 0x17, 0x6c, 0x2f, 0x08, + 0x87, 0xa8, 0x05, 0xc6, 0x98, 0x12, 0x57, 0x8c, 0xad, 0x31, 0xb5, 0x4e, 0xcc, 0xf2, 0xea, 0xb2, + 0xe4, 0xae, 0x82, 0x69, 0x0d, 0x49, 0x12, 0xea, 0x42, 0xc1, 0x09, 0x98, 0xab, 0xae, 0xc8, 0x34, + 0x55, 0x7e, 0x7b, 0x01, 0xeb, 0xba, 0x11, 0x05, 0xcf, 0xd9, 0xe8, 0x26, 0x14, 0x7c, 0xc7, 0x0e, + 0xee, 0x3b, 0x13, 0x47, 0x98, 0xdb, 0xb5, 0xf4, 0xee, 0x3a, 0x9e, 0x0b, 0xd0, 0x5d, 0xc8, 0x05, + 0xb3, 0xc0, 0x12, 0x6e, 0x60, 0x56, 0xd4, 0xe1, 0x36, 0xae, 0x5e, 0xa6, 0x1f, 0x12, 0xc2, 0xc4, + 0x11, 0xd1, 0x65, 0xc5, 0x63, 0x11, 0x5f, 0xf7, 0x02, 0x03, 0x62, 0xdb, 0xe6, 0x0d, 0x75, 0xe0, + 0xa5, 0xb9, 0xb4, 0x69, 0xdb, 0xe8, 0x1d, 0xb8, 0x96, 0x80, 0xd9, 0x9c, 0xf9, 0xe6, 0x4d, 0x85, + 0x4b, 0xb0, 0xdb, 0x9c, 0xf9, 0xb2, 0x86, 0x98, 0xba, 0xd2, 0xc6, 0xc0, 0xbc, 0xa5, 0x2c, 0xdb, + 0xbd, 0xda, 0xb2, 0x47, 0x8a, 0x80, 0x23, 0x62, 0xe5, 0x63, 0x30, 0x12, 0x49, 0x4e, 0x26, 0xa0, + 0x13, 0x3a, 0xd3, 0x79, 0x53, 0x0e, 0xa5, 0x73, 0x9c, 0xca, 0x98, 0x57, 0x89, 0xbd, 0x80, 0xc3, + 0xc9, 0x27, 0x6b, 0x1f, 0xa5, 0x2b, 0xfb, 0x60, 0x24, 0x1c, 0x1a, 0xbd, 0x29, 0x1f, 0x9d, 0x91, + 0x13, 0x08, 0x3e, 0x1b, 0x90, 0xa9, 0x18, 0x9b, 0x3f, 0x57, 0x84, 0x62, 0x24, 0x6c, 0x4e, 0xc5, + 0xb8, 0x32, 0x80, 0xb9, 0x47, 0xa0, 0x1a, 0x18, 0x32, 0x0f, 0x06, 0x94, 0x9f, 0x52, 0x2e, 0x4b, + 0x48, 0xb9, 0xc9, 0xa4, 0x48, 0x66, 0xba, 0x80, 0x12, 0x6e, 0x8d, 0x55, 0xca, 0x2e, 0x60, 0x3d, + 0x93, 0x39, 0x38, 0x0a, 0x3e, 0x9d, 0x83, 0xf5, 0xb4, 0xf2, 0x09, 0x14, 0x93, 0x87, 0xff, 0x1f, + 0x6d, 0xa8, 0x0d, 0xd9, 0xf0, 0x78, 0x64, 0xd6, 0x55, 0x19, 0x5b, 0xbf, 0x71, 0x2a, 0x5b, 0x23, + 0xc8, 0x04, 0xec, 0x58, 0x28, 0xda, 0x3a, 0x56, 0x63, 0x29, 0x1b, 0x13, 0x1e, 0x76, 0x4b, 0xeb, + 0x58, 0x8d, 0xeb, 0x7f, 0x49, 0x43, 0x21, 0x76, 0x33, 0xf4, 0x01, 0x6c, 0x75, 0xfb, 0x0f, 0xef, + 0x37, 0x8f, 0xba, 0x0f, 0x0f, 0x07, 0xed, 0xce, 0xe7, 0xcd, 0x47, 0xf7, 0x8f, 0xca, 0xa9, 0xca, + 0xad, 0xa7, 0x67, 0xb5, 0xed, 0xf9, 0x8b, 0x16, 0xc1, 0xdb, 0xf4, 0x98, 0x4c, 0x5d, 0xb1, 0xc8, + 0xea, 0xe1, 0x87, 0x07, 0x9d, 0x7e, 0xbf, 0x9c, 0x5e, 0xc5, 0xea, 0x71, 0x66, 0xd1, 0x20, 0x40, + 0xfb, 0x50, 0x9e, 0xb3, 0xee, 0x3e, 0xe9, 0x75, 0xf0, 0xe3, 0xf2, 0x5a, 0xe5, 0xe6, 0xd3, 0xb3, + 0x9a, 0xf9, 0x2c, 0xe9, 0xee, 0xcc, 0xa7, 0xfc, 0xb1, 0x6e, 0x46, 0xff, 0x91, 0x86, 0x62, 0xb2, + 0x03, 0x40, 0x07, 0x61, 0xdd, 0xaf, 0x0e, 0x60, 0x73, 0x7f, 0xef, 0xaa, 0x8e, 0x41, 0x55, 0x11, + 0xee, 0x54, 0xea, 0x7d, 0xc0, 0x6c, 0x8a, 0x15, 0x19, 0x7d, 0x00, 0x1b, 0x3e, 0xe3, 0x22, 0x7a, + 0x6f, 0x97, 0x3f, 0x45, 0x8c, 0x47, 0x25, 0x59, 0x08, 0xae, 0x8f, 0x61, 0x73, 0x51, 0x1b, 0xba, + 0x0d, 0xeb, 0x8f, 0xbb, 0xbd, 0x72, 0xaa, 0x72, 0xe3, 0xe9, 0x59, 0xed, 0xf5, 0xc5, 0x8f, 0x8f, + 0x1d, 0x2e, 0xa6, 0xc4, 0xed, 0xf6, 0xd0, 0x7b, 0xb0, 0xd1, 0x3e, 0xec, 0x63, 0x5c, 0x4e, 0x57, + 0x76, 0x9e, 0x9e, 0xd5, 0x6e, 0x2c, 0xe2, 0xe4, 0x27, 0x36, 0xf5, 0x6c, 0xcc, 0x86, 0x71, 0x0b, + 0xfe, 0xcf, 0x35, 0x30, 0x74, 0x19, 0xf2, 0xb2, 0xff, 0xd2, 0x94, 0xc2, 0x92, 0x36, 0xca, 0x9e, + 0x6b, 0x57, 0x56, 0xb6, 0xc5, 0x90, 0xa0, 0x23, 0xe3, 0x0d, 0x28, 0x3a, 0xfe, 0xe9, 0x87, 0x03, + 0xea, 0x91, 0xa1, 0xab, 0xbb, 0xf1, 0x3c, 0x36, 0xa4, 0xac, 0x13, 0x8a, 0x64, 0x61, 0xe1, 0x78, + 0x82, 0x72, 0x4f, 0xf7, 0xd9, 0x79, 0x1c, 0xcf, 0xd1, 0x67, 0x90, 0x71, 0x7c, 0x32, 0xd1, 0xe5, + 0xf8, 0xd2, 0x1d, 0x74, 0x7b, 0xcd, 0x07, 0x3a, 0x72, 0x5b, 0xf9, 0x8b, 0xf3, 0x9d, 0x8c, 0x14, + 0x60, 0x45, 0x43, 0xd5, 0xa8, 0x57, 0x92, 0x2b, 0xa9, 0x62, 0x24, 0x8f, 0x13, 0x12, 0x19, 0x7d, + 0x8e, 0x37, 0xe2, 0x34, 0x08, 0x54, 0x59, 0x92, 0xc7, 0xd1, 0x14, 0x55, 0x20, 0xa7, 0xeb, 0x6a, + 0xd5, 0x62, 0x15, 0x64, 0xb7, 0xa2, 0x05, 0xad, 0x12, 0x18, 0xe1, 0x69, 0x0c, 0x8e, 0x39, 0x9b, + 0xd4, 0xff, 0x95, 0x01, 0xe3, 0xc0, 0x9d, 0x06, 0x42, 0xd7, 0x6c, 0x2f, 0xed, 0xf0, 0x9f, 0xc0, + 0x16, 0x51, 0x7f, 0x7d, 0x88, 0x27, 0x1f, 0x79, 0xd5, 0xae, 0xe8, 0x0b, 0xb8, 0xbd, 0x54, 0x5d, + 0x0c, 0x0e, 0x5b, 0x9b, 0x56, 0x56, 0xea, 0x34, 0xd3, 0xb8, 0x4c, 0x2e, 0x7d, 0x41, 0x7d, 0x28, + 0x31, 0x6e, 0x8d, 0x69, 0x20, 0xc2, 0xd2, 0x40, 0xff, 0x25, 0x59, 0xfa, 0xff, 0xec, 0x61, 0x12, + 0xa8, 0x5f, 0xc4, 0xd0, 0xda, 0x45, 0x1d, 0xe8, 0x23, 0xc8, 0x70, 0x72, 0x1c, 0xb5, 0x5e, 0x4b, + 0x83, 0x04, 0x93, 0x63, 0xb1, 0xa0, 0x42, 0x31, 0xd0, 0x17, 0x00, 0xb6, 0x13, 0xf8, 0x44, 0x58, + 0x63, 0xca, 0xf5, 0x65, 0x2f, 0xdd, 0x62, 0x3b, 0x46, 0x2d, 0x68, 0x49, 0xb0, 0xd1, 0x3d, 0x28, + 0x58, 0x24, 0x72, 0xd7, 0xec, 0xea, 0x5f, 0x47, 0x07, 0x4d, 0xad, 0xa2, 0x2c, 0x55, 0x5c, 0x9c, + 0xef, 0xe4, 0x23, 0x09, 0xce, 0x5b, 0x44, 0xbb, 0xef, 0x3d, 0x28, 0x09, 0x12, 0x9c, 0x0c, 0xec, + 0x30, 0x9d, 0x85, 0x6e, 0xb2, 0xe2, 0x85, 0x97, 0x1d, 0xba, 0x4e, 0x7b, 0xd1, 0x75, 0x16, 0x45, + 0x42, 0x86, 0x7e, 0x01, 0x5b, 0xd4, 0xb3, 0xf8, 0x4c, 0x39, 0x6b, 0x64, 0x61, 0x7e, 0xf5, 0x66, + 0x3b, 0x31, 0x78, 0x61, 0xb3, 0x65, 0x7a, 0x49, 0x5e, 0xff, 0x5b, 0x1a, 0x20, 0x2c, 0xa9, 0x5e, + 0xae, 0x03, 0x22, 0xc8, 0xd8, 0x44, 0x10, 0xe5, 0x73, 0x45, 0xac, 0xc6, 0xe8, 0x13, 0x00, 0x41, + 0x27, 0xbe, 0x4c, 0xbd, 0xde, 0x48, 0xbb, 0xcd, 0xf3, 0xd2, 0x41, 0x02, 0x8d, 0xf6, 0x21, 0xab, + 0x1b, 0xe4, 0xcc, 0x95, 0x3c, 0x8d, 0xac, 0xff, 0x31, 0x0d, 0x10, 0x6e, 0xf3, 0x7f, 0x7a, 0x6f, + 0xad, 0xdb, 0xdf, 0x7c, 0x5f, 0x4d, 0x7d, 0xf7, 0x7d, 0x35, 0xf5, 0xdb, 0x8b, 0x6a, 0xfa, 0x9b, + 0x8b, 0x6a, 0xfa, 0xdb, 0x8b, 0x6a, 0xfa, 0xef, 0x17, 0xd5, 0xf4, 0xef, 0x7f, 0xa8, 0xa6, 0xbe, + 0xfd, 0xa1, 0x9a, 0xfa, 0xee, 0x87, 0x6a, 0x6a, 0x98, 0x55, 0x45, 0xf9, 0x8f, 0xfe, 0x1d, 0x00, + 0x00, 0xff, 0xff, 0x74, 0x9e, 0x83, 0x44, 0x31, 0x18, 0x00, 0x00, } func (m *NodeSpec) Copy() *NodeSpec { @@ -2010,6 +2055,14 @@ func (m *ContainerSpec) CopyFrom(src interface{}) { copy(m.CapabilityDrop, o.CapabilityDrop) } + if o.Ulimits != nil { + m.Ulimits = make([]*ContainerSpec_Ulimit, len(o.Ulimits)) + for i := range m.Ulimits { + m.Ulimits[i] = &ContainerSpec_Ulimit{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Ulimits[i], o.Ulimits[i]) + } + } + } func (m *ContainerSpec_PullOptions) Copy() *ContainerSpec_PullOptions { @@ -2057,6 +2110,21 @@ func (m *ContainerSpec_DNSConfig) CopyFrom(src interface{}) { } +func (m *ContainerSpec_Ulimit) Copy() *ContainerSpec_Ulimit { + if m == nil { + return nil + } + o := &ContainerSpec_Ulimit{} + o.CopyFrom(m) + return o +} + +func (m *ContainerSpec_Ulimit) CopyFrom(src interface{}) { + + o := src.(*ContainerSpec_Ulimit) + *m = *o +} + func (m *EndpointSpec) Copy() *EndpointSpec { if m == nil { return nil @@ -3025,6 +3093,20 @@ func (m *ContainerSpec) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } + if len(m.Ulimits) > 0 { + for _, msg := range m.Ulimits { + dAtA[i] = 0xea + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -3117,6 +3199,40 @@ func (m *ContainerSpec_DNSConfig) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *ContainerSpec_Ulimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerSpec_Ulimit) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Soft != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Soft)) + } + if m.Hard != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Hard)) + } + return i, nil +} + func (m *EndpointSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3851,6 +3967,12 @@ func (m *ContainerSpec) Size() (n int) { n += 2 + l + sovSpecs(uint64(l)) } } + if len(m.Ulimits) > 0 { + for _, e := range m.Ulimits { + l = e.Size() + n += 2 + l + sovSpecs(uint64(l)) + } + } return n } @@ -3894,6 +4016,25 @@ func (m *ContainerSpec_DNSConfig) Size() (n int) { return n } +func (m *ContainerSpec_Ulimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Soft != 0 { + n += 1 + sovSpecs(uint64(m.Soft)) + } + if m.Hard != 0 { + n += 1 + sovSpecs(uint64(m.Hard)) + } + return n +} + func (m *EndpointSpec) Size() (n int) { if m == nil { return 0 @@ -4276,6 +4417,7 @@ func (this *ContainerSpec) String() string { `Sysctls:` + mapStringForSysctls + `,`, `CapabilityAdd:` + fmt.Sprintf("%v", this.CapabilityAdd) + `,`, `CapabilityDrop:` + fmt.Sprintf("%v", this.CapabilityDrop) + `,`, + `Ulimits:` + strings.Replace(fmt.Sprintf("%v", this.Ulimits), "ContainerSpec_Ulimit", "ContainerSpec_Ulimit", 1) + `,`, `}`, }, "") return s @@ -4302,6 +4444,18 @@ func (this *ContainerSpec_DNSConfig) String() string { }, "") return s } +func (this *ContainerSpec_Ulimit) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerSpec_Ulimit{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Soft:` + fmt.Sprintf("%v", this.Soft) + `,`, + `Hard:` + fmt.Sprintf("%v", this.Hard) + `,`, + `}`, + }, "") + return s +} func (this *EndpointSpec) String() string { if this == nil { return "nil" @@ -6985,6 +7139,40 @@ func (m *ContainerSpec) Unmarshal(dAtA []byte) error { } m.CapabilityDrop = append(m.CapabilityDrop, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 29: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ulimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSpecs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ulimits = append(m.Ulimits, &ContainerSpec_Ulimit{}) + if err := m.Ulimits[len(m.Ulimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipSpecs(dAtA[iNdEx:]) @@ -7243,6 +7431,129 @@ func (m *ContainerSpec_DNSConfig) Unmarshal(dAtA []byte) error { } return nil } +func (m *ContainerSpec_Ulimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ulimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ulimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSpecs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Soft", wireType) + } + m.Soft = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Soft |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) + } + m.Hard = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hard |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *EndpointSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/docker/swarmkit/api/specs.proto b/vendor/github.com/docker/swarmkit/api/specs.proto index 5624c88f0f..80e0e52378 100644 --- a/vendor/github.com/docker/swarmkit/api/specs.proto +++ b/vendor/github.com/docker/swarmkit/api/specs.proto @@ -358,8 +358,18 @@ message ContainerSpec { // CapabilityAdd sets the list of capabilities to add to the default capability list repeated string capability_add = 27; - // CapabilityAdd sets the list of capabilities to drop from the default capability list + // CapabilityDrop sets the list of capabilities to drop from the default capability list repeated string capability_drop = 28; + + message Ulimit { + string name = 1; + int64 soft = 2; + int64 hard = 3; + } + + // Ulimits defines the list of ulimits to set in the container. This option + // is equivalent to passing --ulimit to docker run. + repeated Ulimit ulimits = 29; } // EndpointSpec defines the properties that can be configured to diff --git a/vendor/github.com/golang/protobuf/go.mod b/vendor/github.com/golang/protobuf/go.mod index de28f6f0aa..aa055824d8 100644 --- a/vendor/github.com/golang/protobuf/go.mod +++ b/vendor/github.com/golang/protobuf/go.mod @@ -1,3 +1,3 @@ module github.com/golang/protobuf -go 1.12 +go 1.9 diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index 78ee523349..7b0ad1ad86 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -102,7 +102,8 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // type Any struct { // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. The last segment of the URL's path must represent + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent // the fully qualified name of the type (as in // `path/google.protobuf.Duration`). The name should be in a canonical form // (e.g., leading "." is not accepted). @@ -181,7 +182,9 @@ func init() { proto.RegisterType((*Any)(nil), "google.protobuf.Any") } -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } +func init() { + proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) +} var fileDescriptor_b53526c13ae22eb4 = []byte{ // 185 bytes of a gzipped FileDescriptorProto diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto index 4932942558..c9be854167 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -121,7 +121,8 @@ option objc_class_prefix = "GPB"; // message Any { // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. The last segment of the URL's path must represent + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent // the fully qualified name of the type (as in // `path/google.protobuf.Duration`). The name should be in a canonical form // (e.g., leading "." is not accepted). diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index 0d681ee21a..58b0786990 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -41,7 +41,7 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // if (duration.seconds < 0 && duration.nanos > 0) { // duration.seconds += 1; // duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { +// } else if (duration.seconds > 0 && duration.nanos < 0) { // duration.seconds -= 1; // duration.nanos += 1000000000; // } @@ -142,7 +142,9 @@ func init() { proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") } -func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } +func init() { + proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) +} var fileDescriptor_23597b2ebd7ac6c5 = []byte{ // 190 bytes of a gzipped FileDescriptorProto diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto index 975fce41aa..99cb102c35 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto @@ -61,7 +61,7 @@ option objc_class_prefix = "GPB"; // if (duration.seconds < 0 && duration.nanos > 0) { // duration.seconds += 1; // duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { +// } else if (duration.seconds > 0 && duration.nanos < 0) { // duration.seconds -= 1; // duration.nanos += 1000000000; // } @@ -101,7 +101,6 @@ option objc_class_prefix = "GPB"; // // message Duration { - // Signed seconds of the span of time. Must be from -315,576,000,000 // to +315,576,000,000 inclusive. Note: these bounds are computed from: // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index 31cd846de9..7a3b1e40e2 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -20,17 +20,19 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. // // # Examples // @@ -91,12 +93,14 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // 01:30 UTC on January 15, 2017. // // In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// standard +// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) // method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- +// to this format using +// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +// the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D // ) to obtain a formatter capable of generating timestamps in this format. // // @@ -160,7 +164,9 @@ func init() { proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") } -func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } +func init() { + proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) +} var fileDescriptor_292007bbfe81227e = []byte{ // 191 bytes of a gzipped FileDescriptorProto diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto index eafb3fa03a..cd357864a9 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -40,17 +40,19 @@ option java_outer_classname = "TimestampProto"; option java_multiple_files = true; option objc_class_prefix = "GPB"; -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. // // # Examples // @@ -111,17 +113,18 @@ option objc_class_prefix = "GPB"; // 01:30 UTC on January 15, 2017. // // In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// standard +// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) // method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- +// to this format using +// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +// the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D // ) to obtain a formatter capable of generating timestamps in this format. // // message Timestamp { - // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go index c9ba647073..782a34b22a 100644 --- a/vendor/github.com/gorilla/mux/mux.go +++ b/vendor/github.com/gorilla/mux/mux.go @@ -435,8 +435,7 @@ func Vars(r *http.Request) map[string]string { // CurrentRoute returns the matched route for the current request, if any. // This only works when called inside the handler of the matched route // because the matched route is stored in the request context which is cleared -// after the handler returns, unless the KeepContext option is set on the -// Router. +// after the handler returns. func CurrentRoute(r *http.Request) *Route { if rv := r.Context().Value(routeKey); rv != nil { return rv.(*Route) diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go index 96dd94ad13..0144842bb2 100644 --- a/vendor/github.com/gorilla/mux/regexp.go +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -325,6 +325,12 @@ func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { // Store host variables. if v.host != nil { host := getHost(req) + if v.host.wildcardHostPort { + // Don't be strict on the port match + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + } matches := v.host.regexp.FindStringSubmatchIndex(host) if len(matches) > 0 { extractVars(host, matches, v.host.varsN, m.Vars) diff --git a/vendor/github.com/opencontainers/runc/README.md b/vendor/github.com/opencontainers/runc/README.md index a806f2729a..aeef59895b 100644 --- a/vendor/github.com/opencontainers/runc/README.md +++ b/vendor/github.com/opencontainers/runc/README.md @@ -3,6 +3,7 @@ [![Build Status](https://travis-ci.org/opencontainers/runc.svg?branch=master)](https://travis-ci.org/opencontainers/runc) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/runc)](https://goreportcard.com/report/github.com/opencontainers/runc) [![GoDoc](https://godoc.org/github.com/opencontainers/runc?status.svg)](https://godoc.org/github.com/opencontainers/runc) +[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/588/badge)](https://bestpractices.coreinfrastructure.org/projects/588) ## Introduction @@ -18,22 +19,23 @@ You can find official releases of `runc` on the [release](https://github.com/ope Currently, the following features are not considered to be production-ready: -* Support for cgroup v2 +* [Support for cgroup v2](./docs/cgroup-v2.md) ## Security -The reporting process and disclosure communications are outlined in [/org/security](https://github.com/opencontainers/org/blob/master/security/). +The reporting process and disclosure communications are outlined [here](https://github.com/opencontainers/org/blob/master/SECURITY.md). + +### Security Audit +A third party security audit was performed by Cure53, you can see the full report [here](https://github.com/opencontainers/runc/blob/master/docs/Security-Audit.pdf). ## Building `runc` currently supports the Linux platform with various architecture support. -It must be built with Go version 1.6 or higher in order for some features to function properly. +It must be built with Go version 1.13 or higher. In order to enable seccomp support you will need to install `libseccomp` on your platform. > e.g. `libseccomp-devel` for CentOS, or `libseccomp-dev` for Ubuntu -Otherwise, if you do not want to build `runc` with seccomp support you can add `BUILDTAGS=""` when running make. - ```bash # create a 'github.com/opencontainers' in your GOPATH/src cd github.com/opencontainers @@ -58,20 +60,22 @@ sudo make install #### Build Tags -`runc` supports optional build tags for compiling support of various features. -To add build tags to the make option the `BUILDTAGS` variable must be set. +`runc` supports optional build tags for compiling support of various features, +with some of them enabled by default (see `BUILDTAGS` in top-level `Makefile`). + +To change build tags from the default, set the `BUILDTAGS` variable for make, +e.g. ```bash make BUILDTAGS='seccomp apparmor' ``` -| Build Tag | Feature | Dependency | -|-----------|------------------------------------|-------------| -| seccomp | Syscall filtering | libseccomp | -| selinux | selinux process and mount labeling | | -| apparmor | apparmor profile support | | -| ambient | ambient capability support | kernel 4.3 | -| nokmem | disable kernel memory account | | +| Build Tag | Feature | Enabled by default | Dependency | +|-----------|------------------------------------|--------------------|------------| +| seccomp | Syscall filtering | yes | libseccomp | +| selinux | selinux process and mount labeling | yes | | +| apparmor | apparmor profile support | yes | | +| nokmem | disable kernel memory accounting | no | | ### Running the test suite @@ -97,17 +101,30 @@ You can run a specific integration test by setting the `TESTPATH` variable. # make test TESTPATH="/checkpoint.bats" ``` -You can run a test in your proxy environment by setting `DOCKER_BUILD_PROXY` and `DOCKER_RUN_PROXY` variables. +You can run a specific rootless integration test by setting the `ROOTLESS_TESTPATH` variable. ```bash -# make test DOCKER_BUILD_PROXY="--build-arg HTTP_PROXY=http://yourproxy/" DOCKER_RUN_PROXY="-e HTTP_PROXY=http://yourproxy/" +# make test ROOTLESS_TESTPATH="/checkpoint.bats" +``` + +You can run a test using your container engine's flags by setting `CONTAINER_ENGINE_BUILD_FLAGS` and `CONTAINER_ENGINE_RUN_FLAGS` variables. + +```bash +# make test CONTAINER_ENGINE_BUILD_FLAGS="--build-arg http_proxy=http://yourproxy/" CONTAINER_ENGINE_RUN_FLAGS="-e http_proxy=http://yourproxy/" ``` ### Dependencies Management -`runc` uses [vndr](https://github.com/LK4D4/vndr) for dependencies management. -Please refer to [vndr](https://github.com/LK4D4/vndr) for how to add or update -new dependencies. +`runc` uses [Go Modules](https://github.com/golang/go/wiki/Modules) for dependencies management. +Please refer to [Go Modules](https://github.com/golang/go/wiki/Modules) for how to add or update +new dependencies. When updating dependencies, be sure that you are running Go `1.14` or newer. + +``` +# Update vendored dependencies +make vendor +# Verify all dependencies +make verify-dependencies +``` ## Using runc @@ -275,6 +292,9 @@ PIDFile=/run/mycontainerid.pid WantedBy=multi-user.target ``` +#### cgroup v2 +See [`./docs/cgroup-v2.md`](./docs/cgroup-v2.md). + ## License The code and docs are released under the [Apache 2.0 license](LICENSE). diff --git a/vendor/github.com/opencontainers/runc/go.mod b/vendor/github.com/opencontainers/runc/go.mod new file mode 100644 index 0000000000..fcf068dfae --- /dev/null +++ b/vendor/github.com/opencontainers/runc/go.mod @@ -0,0 +1,26 @@ +module github.com/opencontainers/runc + +go 1.14 + +require ( + github.com/checkpoint-restore/go-criu/v4 v4.1.0 + github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775 + github.com/containerd/console v1.0.0 + github.com/coreos/go-systemd/v22 v22.1.0 + github.com/cyphar/filepath-securejoin v0.2.2 + github.com/docker/go-units v0.4.0 + github.com/godbus/dbus/v5 v5.0.3 + github.com/golang/protobuf v1.4.2 + github.com/moby/sys/mountinfo v0.1.3 + github.com/mrunalp/fileutils v0.0.0-20200520151820-abd8a0e76976 + github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6 + github.com/opencontainers/selinux v1.6.0 + github.com/pkg/errors v0.9.1 + github.com/seccomp/libseccomp-golang v0.9.1 + github.com/sirupsen/logrus v1.6.0 + github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 + // NOTE: urfave/cli must be <= v1.22.1 due to a regression: https://github.com/urfave/cli/issues/1092 + github.com/urfave/cli v1.22.1 + github.com/vishvananda/netlink v1.1.0 + golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1 +) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/README.md b/vendor/github.com/opencontainers/runc/libcontainer/README.md index a791ca2d24..6803ef56c5 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/README.md +++ b/vendor/github.com/opencontainers/runc/libcontainer/README.md @@ -155,8 +155,7 @@ config := &configs.Config{ Parent: "system", Resources: &configs.Resources{ MemorySwappiness: nil, - AllowAllDevices: nil, - AllowedDevices: configs.DefaultAllowedDevices, + Devices: specconv.AllowedDevices, }, }, MaskPaths: []string{ @@ -166,7 +165,7 @@ config := &configs.Config{ ReadonlyPaths: []string{ "/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus", }, - Devices: configs.DefaultAutoCreatedDevices, + Devices: specconv.AllowedDevices, Hostname: "testing", Mounts: []*configs.Mount{ { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/cloned_binary.c b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/cloned_binary.c index ad10f14067..24cc8c6e16 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/cloned_binary.c +++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/cloned_binary.c @@ -1,7 +1,14 @@ +// SPDX-License-Identifier: Apache-2.0 OR LGPL-2.1-or-later /* * Copyright (C) 2019 Aleksa Sarai * Copyright (C) 2019 SUSE LLC * + * This work is dual licensed under the following licenses. You may use, + * redistribute, and/or modify the work under the conditions of either (or + * both) licenses. + * + * === Apache-2.0 === + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,6 +20,23 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. + * + * === LGPL-2.1-or-later === + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library. If not, see + * . + * */ #define _GNU_SOURCE @@ -95,8 +119,10 @@ static int is_self_cloned(void) struct statfs fsbuf = {}; fd = open("/proc/self/exe", O_RDONLY|O_CLOEXEC); - if (fd < 0) + if (fd < 0) { + fprintf(stderr, "you have no read access to runc binary file\n"); return -ENOTRECOVERABLE; + } /* * Is the binary a fully-sealed memfd? We don't need CLONED_BINARY_ENV for diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c index 072656831d..a33f2fcc37 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c +++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c @@ -714,12 +714,12 @@ void nsexec(void) * ready, so we can receive all possible error codes * generated by children. */ + syncfd = sync_child_pipe[1]; + close(sync_child_pipe[0]); + while (!ready) { enum sync_t s; - syncfd = sync_child_pipe[1]; - close(sync_child_pipe[0]); - if (read(syncfd, &s, sizeof(s)) != sizeof(s)) bail("failed to sync with child: next state"); @@ -789,13 +789,13 @@ void nsexec(void) /* Now sync with grandchild. */ + syncfd = sync_grandchild_pipe[1]; + close(sync_grandchild_pipe[0]); + ready = false; while (!ready) { enum sync_t s; - syncfd = sync_grandchild_pipe[1]; - close(sync_grandchild_pipe[0]); - s = SYNC_GRANDCHILD; if (write(syncfd, &s, sizeof(s)) != sizeof(s)) { kill(child, SIGKILL); diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go index 7b912bbf8b..4b89dad737 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go @@ -60,7 +60,7 @@ type Group struct { // groupFromOS converts an os/user.(*Group) to local Group // -// (This does not include Pass, Shell or Gecos) +// (This does not include Pass or List) func groupFromOS(g *user.Group) (Group, error) { newGroup := Group{ Name: g.Name, @@ -162,10 +162,6 @@ func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { ) for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - line := strings.TrimSpace(s.Text()) if line == "" { continue @@ -183,6 +179,9 @@ func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { out = append(out, p) } } + if err := s.Err(); err != nil { + return nil, err + } return out, nil } @@ -221,10 +220,6 @@ func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { ) for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - text := s.Text() if text == "" { continue @@ -242,6 +237,9 @@ func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { out = append(out, p) } } + if err := s.Err(); err != nil { + return nil, err + } return out, nil } @@ -532,10 +530,6 @@ func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) { ) for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - line := strings.TrimSpace(s.Text()) if line == "" { continue @@ -549,6 +543,9 @@ func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) { out = append(out, p) } } + if err := s.Err(); err != nil { + return nil, err + } return out, nil } @@ -586,10 +583,6 @@ func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) { ) for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - line := strings.TrimSpace(s.Text()) if line == "" { continue @@ -603,6 +596,9 @@ func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) { out = append(out, p) } } + if err := s.Err(); err != nil { + return nil, err + } return out, nil } diff --git a/vendor/github.com/opencontainers/runc/vendor.conf b/vendor/github.com/opencontainers/runc/vendor.conf deleted file mode 100644 index dd51785e6b..0000000000 --- a/vendor/github.com/opencontainers/runc/vendor.conf +++ /dev/null @@ -1,31 +0,0 @@ -# OCI runtime-spec. When updating this, make sure you use a version tag rather -# than a commit ID so it's much more obvious what version of the spec we are -# using. -github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db - -# Core libcontainer functionality. -github.com/checkpoint-restore/go-criu 17b0214f6c48980c45dc47ecb0cfd6d9e02df723 # v3.11 -github.com/mrunalp/fileutils 7d4729fb36185a7c1719923406c9d40e54fb93c7 -github.com/opencontainers/selinux 5215b1806f52b1fcc2070a8826c542c9d33cd3cf # v1.3.0 (+ CVE-2019-16884) -github.com/seccomp/libseccomp-golang 689e3c1541a84461afc49c1c87352a6cedf72e9c # v0.9.1 -github.com/sirupsen/logrus 8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f # v1.4.1 -github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2 -github.com/vishvananda/netlink 1e2e08e8a2dcdacaae3f14ac44c5cfa31361f270 - -# systemd integration. -github.com/coreos/go-systemd 95778dfbb74eb7e4dbaf43bf7d71809650ef8076 # v19 -github.com/godbus/dbus 2ff6f7ffd60f0f2410b3105864bdd12c7894f844 # v5.0.1 -github.com/golang/protobuf 925541529c1fa6821df4e44ce2723319eb2be768 # v1.0.0 - -# Command-line interface. -github.com/cyphar/filepath-securejoin a261ee33d7a517f054effbf451841abaafe3e0fd # v0.2.2 -github.com/docker/go-units 47565b4f722fb6ceae66b95f853feed578a4a51c # v0.3.3 -github.com/urfave/cli cfb38830724cc34fedffe9a2a29fb54fa9169cd1 # v1.20.0 -golang.org/x/sys 9eafafc0a87e0fd0aeeba439a4573537970c44c7 https://github.com/golang/sys - -# console dependencies -github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f -github.com/pkg/errors ba968bfe8b2f7e042a574c888954fccecfa385b4 # v0.8.1 - -# ebpf dependencies -github.com/cilium/ebpf 95b36a581eed7b0f127306ed1d16cc0ddc06cf67 diff --git a/vendor/github.com/prometheus/client_golang/README.md b/vendor/github.com/prometheus/client_golang/README.md index 557eacf5a8..9bab32a215 100644 --- a/vendor/github.com/prometheus/client_golang/README.md +++ b/vendor/github.com/prometheus/client_golang/README.md @@ -1,12 +1,34 @@ # Prometheus Go client library [![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/client_golang)](https://goreportcard.com/report/github.com/prometheus/client_golang) +[![go-doc](https://godoc.org/github.com/prometheus/client_golang?status.svg)](https://godoc.org/github.com/prometheus/client_golang) This is the [Go](http://golang.org) client library for [Prometheus](http://prometheus.io). It has two separate parts, one for instrumenting application code, and one for creating clients that talk to the Prometheus HTTP API. +__This library requires Go1.9 or later.__ The minimum required patch releases for older Go versions are Go1.9.7 and Go1.10.3. + +## Important note about releases and stability + +This repository generally follows [Semantic +Versioning](https://semver.org/). However, the API client in +prometheus/client_golang/api/… is still considered experimental. Breaking +changes of the API client will _not_ trigger a new major release. The same is +true for selected other new features explicitly marked as **EXPERIMENTAL** in +CHANGELOG.md. + +Features that require breaking changes in the stable parts of the repository +are being batched up and tracked in the [v2 +milestone](https://github.com/prometheus/client_golang/milestone/2). The v2 +development happens in a [separate +branch](https://github.com/prometheus/client_golang/tree/dev-v2) for the time +being. v2 releases off that branch will happen once sufficient stability is +reached. In view of the widespread use of this repository, v1 and v2 will +coexist for a while to enable a convenient transition. + ## Instrumenting applications [![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus) @@ -14,8 +36,8 @@ Prometheus HTTP API. The [`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus) contains the instrumentation library. See the -[best practices section](http://prometheus.io/docs/practices/naming/) of the -Prometheus documentation to learn more about instrumenting applications. +[guide](https://prometheus.io/docs/guides/go-application/) on the Prometheus +website to learn more about instrumenting applications. The [`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples) @@ -23,13 +45,14 @@ contains simple examples of instrumented code. ## Client for the Prometheus HTTP API -[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api/prometheus) +[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus/v1)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus/v1) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api) The [`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus) contains the client for the [Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you -to write Go applications that query time series data from a Prometheus server. +to write Go applications that query time series data from a Prometheus +server. It is still in alpha stage. ## Where is `model`, `extraction`, and `text`? diff --git a/vendor/github.com/prometheus/client_golang/go.mod b/vendor/github.com/prometheus/client_golang/go.mod new file mode 100644 index 0000000000..61b13edc7a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/go.mod @@ -0,0 +1,18 @@ +module github.com/prometheus/client_golang + +require ( + github.com/beorn7/perks v1.0.1 + github.com/cespare/xxhash/v2 v2.1.1 + github.com/golang/protobuf v1.4.0 + github.com/json-iterator/go v1.1.9 + github.com/kr/pretty v0.1.0 // indirect + github.com/prometheus/client_model v0.2.0 + github.com/prometheus/common v0.9.1 + github.com/prometheus/procfs v0.0.11 + github.com/stretchr/testify v1.4.0 // indirect + golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect + gopkg.in/yaml.v2 v2.2.5 // indirect +) + +go 1.11 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go new file mode 100644 index 0000000000..288f0e8548 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.12 + +package prometheus + +import "runtime/debug" + +// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+. +func readBuildInfo() (path, version, sum string) { + path, version, sum = "unknown", "unknown", "unknown" + if bi, ok := debug.ReadBuildInfo(); ok { + path = bi.Main.Path + version = bi.Main.Version + sum = bi.Main.Sum + } + return +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go new file mode 100644 index 0000000000..6609e2877c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.12 + +package prometheus + +// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before +// 1.12. Remove this whole file once the minimum supported Go version is 1.12. +func readBuildInfo() (path, version, sum string) { + return "unknown", "unknown", "unknown" +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go index 623d3d83fe..1e839650d4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -29,27 +29,72 @@ type Collector interface { // collected by this Collector to the provided channel and returns once // the last descriptor has been sent. The sent descriptors fulfill the // consistency and uniqueness requirements described in the Desc - // documentation. (It is valid if one and the same Collector sends - // duplicate descriptors. Those duplicates are simply ignored. However, - // two different Collectors must not send duplicate descriptors.) This - // method idempotently sends the same descriptors throughout the - // lifetime of the Collector. If a Collector encounters an error while - // executing this method, it must send an invalid descriptor (created - // with NewInvalidDesc) to signal the error to the registry. + // documentation. + // + // It is valid if one and the same Collector sends duplicate + // descriptors. Those duplicates are simply ignored. However, two + // different Collectors must not send duplicate descriptors. + // + // Sending no descriptor at all marks the Collector as “unchecked”, + // i.e. no checks will be performed at registration time, and the + // Collector may yield any Metric it sees fit in its Collect method. + // + // This method idempotently sends the same descriptors throughout the + // lifetime of the Collector. It may be called concurrently and + // therefore must be implemented in a concurrency safe way. + // + // If a Collector encounters an error while executing this method, it + // must send an invalid descriptor (created with NewInvalidDesc) to + // signal the error to the registry. Describe(chan<- *Desc) // Collect is called by the Prometheus registry when collecting // metrics. The implementation sends each collected metric via the // provided channel and returns once the last metric has been sent. The - // descriptor of each sent metric is one of those returned by - // Describe. Returned metrics that share the same descriptor must differ - // in their variable label values. This method may be called - // concurrently and must therefore be implemented in a concurrency safe - // way. Blocking occurs at the expense of total performance of rendering - // all registered metrics. Ideally, Collector implementations support - // concurrent readers. + // descriptor of each sent metric is one of those returned by Describe + // (unless the Collector is unchecked, see above). Returned metrics that + // share the same descriptor must differ in their variable label + // values. + // + // This method may be called concurrently and must therefore be + // implemented in a concurrency safe way. Blocking occurs at the expense + // of total performance of rendering all registered metrics. Ideally, + // Collector implementations support concurrent readers. Collect(chan<- Metric) } +// DescribeByCollect is a helper to implement the Describe method of a custom +// Collector. It collects the metrics from the provided Collector and sends +// their descriptors to the provided channel. +// +// If a Collector collects the same metrics throughout its lifetime, its +// Describe method can simply be implemented as: +// +// func (c customCollector) Describe(ch chan<- *Desc) { +// DescribeByCollect(c, ch) +// } +// +// However, this will not work if the metrics collected change dynamically over +// the lifetime of the Collector in a way that their combined set of descriptors +// changes as well. The shortcut implementation will then violate the contract +// of the Describe method. If a Collector sometimes collects no metrics at all +// (for example vectors like CounterVec, GaugeVec, etc., which only collect +// metrics after a metric with a fully specified label set has been accessed), +// it might even get registered as an unchecked Collector (cf. the Register +// method of the Registerer interface). Hence, only use this shortcut +// implementation of Describe if you are certain to fulfill the contract. +// +// The Collector example demonstrates a use of DescribeByCollect. +func DescribeByCollect(c Collector, descs chan<- *Desc) { + metrics := make(chan Metric) + go func() { + c.Collect(metrics) + close(metrics) + }() + for m := range metrics { + descs <- m.Desc() + } +} + // selfCollector implements Collector for a single Metric so that the Metric // collects itself. Add it as an anonymous field to a struct that implements // Metric, and call init with the Metric itself as an argument. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index ee37949ada..0e1b48c03f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -15,6 +15,11 @@ package prometheus import ( "errors" + "math" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" ) // Counter is a Metric that represents a single numerical value that only ever @@ -30,26 +35,42 @@ type Counter interface { Metric Collector - // Set is used to set the Counter to an arbitrary value. It is only used - // if you have to transfer a value from an external counter into this - // Prometheus metric. Do not use it for regular handling of a - // Prometheus counter (as it can be used to break the contract of - // monotonically increasing values). - // - // Deprecated: Use NewConstMetric to create a counter for an external - // value. A Counter should never be set. - Set(float64) - // Inc increments the counter by 1. + // Inc increments the counter by 1. Use Add to increment it by arbitrary + // non-negative values. Inc() // Add adds the given value to the counter. It panics if the value is < // 0. Add(float64) } +// ExemplarAdder is implemented by Counters that offer the option of adding a +// value to the Counter together with an exemplar. Its AddWithExemplar method +// works like the Add method of the Counter interface but also replaces the +// currently saved exemplar (if any) with a new one, created from the provided +// value, the current time as timestamp, and the provided labels. Empty Labels +// will lead to a valid (label-less) exemplar. But if Labels is nil, the current +// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any +// of the provided labels are invalid, or if the provided labels contain more +// than 64 runes in total. +type ExemplarAdder interface { + AddWithExemplar(value float64, exemplar Labels) +} + // CounterOpts is an alias for Opts. See there for doc comments. type CounterOpts Opts // NewCounter creates a new Counter based on the provided CounterOpts. +// +// The returned implementation also implements ExemplarAdder. It is safe to +// perform the corresponding type assertion. +// +// The returned implementation tracks the counter value in two separate +// variables, a float64 and a uint64. The latter is used to track calls of the +// Inc method and calls of the Add method with a value that can be represented +// as a uint64. This allows atomic increments of the counter with optimal +// performance. (It is common to have an Inc call in very hot execution paths.) +// Both internal tracking values are added up in the Write method. This has to +// be taken into account when it comes to precision and overflow behavior. func NewCounter(opts CounterOpts) Counter { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -57,20 +78,83 @@ func NewCounter(opts CounterOpts) Counter { nil, opts.ConstLabels, ) - result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} + result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now} result.init(result) // Init self-collection. return result } type counter struct { - value + // valBits contains the bits of the represented float64 value, while + // valInt stores values that are exact integers. Both have to go first + // in the struct to guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + valInt uint64 + + selfCollector + desc *Desc + + labelPairs []*dto.LabelPair + exemplar atomic.Value // Containing nil or a *dto.Exemplar. + + now func() time.Time // To mock out time.Now() for testing. +} + +func (c *counter) Desc() *Desc { + return c.desc } func (c *counter) Add(v float64) { if v < 0 { panic(errors.New("counter cannot decrease in value")) } - c.value.Add(v) + + ival := uint64(v) + if float64(ival) == v { + atomic.AddUint64(&c.valInt, ival) + return + } + + for { + oldBits := atomic.LoadUint64(&c.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { + return + } + } +} + +func (c *counter) AddWithExemplar(v float64, e Labels) { + c.Add(v) + c.updateExemplar(v, e) +} + +func (c *counter) Inc() { + atomic.AddUint64(&c.valInt, 1) +} + +func (c *counter) Write(out *dto.Metric) error { + fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) + ival := atomic.LoadUint64(&c.valInt) + val := fval + float64(ival) + + var exemplar *dto.Exemplar + if e := c.exemplar.Load(); e != nil { + exemplar = e.(*dto.Exemplar) + } + + return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) +} + +func (c *counter) updateExemplar(v float64, l Labels) { + if l == nil { + return + } + e, err := newExemplar(v, c.now(), l) + if err != nil { + panic(err) + } + c.exemplar.Store(e) } // CounterVec is a Collector that bundles a set of Counters that all share the @@ -78,16 +162,12 @@ func (c *counter) Add(v float64) { // if you want to count the same thing partitioned by various dimensions // (e.g. number of HTTP requests, partitioned by response code and // method). Create instances with NewCounterVec. -// -// CounterVec embeds MetricVec. See there for a full list of methods with -// detailed documentation. type CounterVec struct { - *MetricVec + *metricVec } // NewCounterVec creates a new CounterVec based on the provided CounterOpts and -// partitioned by the given label names. At least one label name must be -// provided. +// partitioned by the given label names. func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -96,34 +176,62 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { opts.ConstLabels, ) return &CounterVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { - result := &counter{value: value{ - desc: desc, - valType: CounterValue, - labelPairs: makeLabelPairs(desc, lvs), - }} + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + } + result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs), now: time.Now} result.init(result) // Init self-collection. return result }), } } -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Counter and not a -// Metric so that no type conversion is required. -func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) +// GetMetricWithLabelValues returns the Counter for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created. +// +// It is possible to call this method without using the returned Counter to only +// create the new Counter but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Counter for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Counter from the CounterVec. In that case, +// the Counter will still exist, but it will not be exported anymore, even if a +// Counter with the same label values is created later. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) if metric != nil { return metric.(Counter), err } return nil, err } -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Counter and not a Metric so that no -// type conversion is required. -func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { - metric, err := m.MetricVec.GetMetricWith(labels) +// GetMetricWith returns the Counter for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Counter is created. Implications of +// creating a Counter without using it and keeping the Counter for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := v.metricVec.getMetricWith(labels) if metric != nil { return metric.(Counter), err } @@ -131,18 +239,57 @@ func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { } // WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like // myVec.WithLabelValues("404", "GET").Add(42) -func (m *CounterVec) WithLabelValues(lvs ...string) Counter { - return m.MetricVec.WithLabelValues(lvs...).(Counter) +func (v *CounterVec) WithLabelValues(lvs ...string) Counter { + c, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return c } // With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *CounterVec) With(labels Labels) Counter { - return m.MetricVec.With(labels).(Counter) +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *CounterVec) With(labels Labels) Counter { + c, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return c +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the CounterVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &CounterVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec } // CounterFunc is a Counter whose value is determined at collect time by calling a @@ -162,6 +309,8 @@ type CounterFunc interface { // provided function must be concurrency-safe. The function should also honor // the contract for a Counter (values only go up, not down), but compliance will // not be checked. +// +// Check out the ExampleGaugeFunc examples for the similar GaugeFunc. func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { return newValueFunc(NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 77f4b30e84..e3232d79f4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -16,33 +16,16 @@ package prometheus import ( "errors" "fmt" - "regexp" "sort" "strings" + "github.com/cespare/xxhash/v2" "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" ) -var ( - metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) - labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") -) - -// reservedLabelPrefix is a prefix which is not legal in user-supplied -// label names. -const reservedLabelPrefix = "__" - -// Labels represents a collection of label name -> value mappings. This type is -// commonly used with the With(Labels) and GetMetricWith(Labels) methods of -// metric vector Collectors, e.g.: -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -// -// The other use-case is the specification of constant label pairs in Opts or to -// create a Desc. -type Labels map[string]string - // Desc is the descriptor used by every Prometheus Metric. It is essentially // the immutable meta-data of a Metric. The normal Metric implementations // included in this package manage their Desc under the hood. Users only have to @@ -78,32 +61,27 @@ type Desc struct { // Help string. Each Desc with the same fqName must have the same // dimHash. dimHash uint64 - // err is an error that occured during construction. It is reported on + // err is an error that occurred during construction. It is reported on // registration time. err error } // NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc // and will be reported on registration time. variableLabels and constLabels can -// be nil if no such labels should be set. fqName and help must not be empty. +// be nil if no such labels should be set. fqName must not be empty. // // variableLabels only contain the label names. Their label values are variable // and therefore not part of the Desc. (They are managed within the Metric.) // // For constLabels, the label values are constant. Therefore, they are fully -// specified in the Desc. See the Opts documentation for the implications of -// constant labels. +// specified in the Desc. See the Collector example for a usage pattern. func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { d := &Desc{ fqName: fqName, help: help, variableLabels: variableLabels, } - if help == "" { - d.err = errors.New("empty help string") - return d - } - if !metricNameRE.MatchString(fqName) { + if !model.IsValidMetricName(model.LabelValue(fqName)) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) return d } @@ -116,7 +94,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * // First add only the const label names and sort them... for labelName := range constLabels { if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name", labelName) + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) return d } labelNames = append(labelNames, labelName) @@ -127,12 +105,18 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * for _, labelName := range labelNames { labelValues = append(labelValues, constLabels[labelName]) } + // Validate the const label values. They can't have a wrong cardinality, so + // use in len(labelValues) as expectedNumberOfValues. + if err := validateLabelValues(labelValues, len(labelValues)); err != nil { + d.err = err + return d + } // Now add the variable label names, but prefix them with something that // cannot be in a regular label name. That prevents matching the label // dimension with a different mix between preset and variable labels. for _, labelName := range variableLabels { if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name", labelName) + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) return d } labelNames = append(labelNames, "$"+labelName) @@ -142,24 +126,25 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * d.err = errors.New("duplicate label names") return d } - vh := hashNew() + + xxh := xxhash.New() for _, val := range labelValues { - vh = hashAdd(vh, val) - vh = hashAddByte(vh, separatorByte) + xxh.WriteString(val) + xxh.Write(separatorByteSlice) } - d.id = vh + d.id = xxh.Sum64() // Sort labelNames so that order doesn't matter for the hash. sort.Strings(labelNames) // Now hash together (in this order) the help string and the sorted // label names. - lh := hashNew() - lh = hashAdd(lh, help) - lh = hashAddByte(lh, separatorByte) + xxh.Reset() + xxh.WriteString(help) + xxh.Write(separatorByteSlice) for _, labelName := range labelNames { - lh = hashAdd(lh, labelName) - lh = hashAddByte(lh, separatorByte) + xxh.WriteString(labelName) + xxh.Write(separatorByteSlice) } - d.dimHash = lh + d.dimHash = xxh.Sum64() d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) for n, v := range constLabels { @@ -168,7 +153,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * Value: proto.String(v), }) } - sort.Sort(LabelPairSorter(d.constLabelPairs)) + sort.Sort(labelPairSorter(d.constLabelPairs)) return d } @@ -198,8 +183,3 @@ func (d *Desc) String() string { d.variableLabels, ) } - -func checkLabelName(l string) bool { - return labelNameRE.MatchString(l) && - !strings.HasPrefix(l, reservedLabelPrefix) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index b15a2d3b98..98450125d6 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -11,13 +11,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package prometheus provides metrics primitives to instrument code for -// monitoring. It also offers a registry for metrics. Sub-packages allow to -// expose the registered metrics via HTTP (package promhttp) or push them to a -// Pushgateway (package push). +// Package prometheus is the core instrumentation package. It provides metrics +// primitives to instrument code for monitoring. It also offers a registry for +// metrics. Sub-packages allow to expose the registered metrics via HTTP +// (package promhttp) or push them to a Pushgateway (package push). There is +// also a sub-package promauto, which provides metrics constructors with +// automatic registration. // // All exported functions and methods are safe to be used concurrently unless -//specified otherwise. +// specified otherwise. // // A Basic Example // @@ -26,6 +28,7 @@ // package main // // import ( +// "log" // "net/http" // // "github.com/prometheus/client_golang/prometheus" @@ -59,7 +62,7 @@ // // The Handler function provides a default handler to expose metrics // // via an HTTP server. "/metrics" is the usual endpoint for that. // http.Handle("/metrics", promhttp.Handler()) -// http.ListenAndServe(":8080", nil) +// log.Fatal(http.ListenAndServe(":8080", nil)) // } // // @@ -69,34 +72,33 @@ // Metrics // // The number of exported identifiers in this package might appear a bit -// overwhelming. Hovever, in addition to the basic plumbing shown in the example +// overwhelming. However, in addition to the basic plumbing shown in the example // above, you only need to understand the different metric types and their -// vector versions for basic usage. +// vector versions for basic usage. Furthermore, if you are not concerned with +// fine-grained control of when and how to register metrics with the registry, +// have a look at the promauto package, which will effectively allow you to +// ignore registration altogether in simple cases. // // Above, you have already touched the Counter and the Gauge. There are two more // advanced metric types: the Summary and Histogram. A more thorough description // of those four metric types can be found in the Prometheus docs: // https://prometheus.io/docs/concepts/metric_types/ // -// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the -// Prometheus server not to assume anything about its type. -// -// In addition to the fundamental metric types Gauge, Counter, Summary, -// Histogram, and Untyped, a very important part of the Prometheus data model is -// the partitioning of samples along dimensions called labels, which results in +// In addition to the fundamental metric types Gauge, Counter, Summary, and +// Histogram, a very important part of the Prometheus data model is the +// partitioning of samples along dimensions called labels, which results in // metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, -// HistogramVec, and UntypedVec. +// and HistogramVec. // // While only the fundamental metric types implement the Metric interface, both // the metrics and their vector versions implement the Collector interface. A // Collector manages the collection of a number of Metrics, but for convenience, -// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, -// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, -// SummaryVec, HistogramVec, and UntypedVec are not. +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and +// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec, +// and HistogramVec are not. // // To create instances of Metrics and their vector versions, you need a suitable -// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, -// HistogramOpts, or UntypedOpts. +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. // // Custom Collectors and constant Metrics // @@ -112,10 +114,23 @@ // existing numbers into Prometheus Metrics during collection. An own // implementation of the Collector interface is perfect for that. You can create // Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and -// NewConstSummary (and their respective Must… versions). That will happen in -// the Collect method. The Describe method has to return separate Desc -// instances, representative of the “throw-away” metrics to be created -// later. NewDesc comes in handy to create those Desc instances. +// NewConstSummary (and their respective Must… versions). NewConstMetric is used +// for all metric types with just a float64 as their value: Counter, Gauge, and +// a special “type” called Untyped. Use the latter if you are not sure if the +// mirrored metric is a Counter or a Gauge. Creation of the Metric instance +// happens in the Collect method. The Describe method has to return separate +// Desc instances, representative of the “throw-away” metrics to be created +// later. NewDesc comes in handy to create those Desc instances. Alternatively, +// you could return no Desc at all, which will mark the Collector “unchecked”. +// No checks are performed at registration time, but metric consistency will +// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape +// errors. Thus, with unchecked Collectors, the responsibility to not collect +// metrics that lead to inconsistencies in the total scrape result lies with the +// implementer of the Collector. While this is not a desirable state, it is +// sometimes necessary. The typical use case is a situation where the exact +// metrics to be returned by a Collector cannot be predicted at registration +// time, but the implementer has sufficient knowledge of the whole system to +// guarantee metric consistency. // // The Collector example illustrates the use case. You can also look at the // source code of the processCollector (mirroring process metrics), the @@ -129,34 +144,34 @@ // Advanced Uses of the Registry // // While MustRegister is the by far most common way of registering a Collector, -// sometimes you might want to handle the errors the registration might -// cause. As suggested by the name, MustRegister panics if an error occurs. With -// the Register function, the error is returned and can be handled. +// sometimes you might want to handle the errors the registration might cause. +// As suggested by the name, MustRegister panics if an error occurs. With the +// Register function, the error is returned and can be handled. // // An error is returned if the registered Collector is incompatible or // inconsistent with already registered metrics. The registry aims for -// consistency of the collected metrics according to the Prometheus data -// model. Inconsistencies are ideally detected at registration time, not at -// collect time. The former will usually be detected at start-up time of a -// program, while the latter will only happen at scrape time, possibly not even -// on the first scrape if the inconsistency only becomes relevant later. That is -// the main reason why a Collector and a Metric have to describe themselves to -// the registry. +// consistency of the collected metrics according to the Prometheus data model. +// Inconsistencies are ideally detected at registration time, not at collect +// time. The former will usually be detected at start-up time of a program, +// while the latter will only happen at scrape time, possibly not even on the +// first scrape if the inconsistency only becomes relevant later. That is the +// main reason why a Collector and a Metric have to describe themselves to the +// registry. // // So far, everything we did operated on the so-called default registry, as it -// can be found in the global DefaultRegistry variable. With NewRegistry, you +// can be found in the global DefaultRegisterer variable. With NewRegistry, you // can create a custom registry, or you can even implement the Registerer or -// Gatherer interfaces yourself. The methods Register and Unregister work in -// the same way on a custom registry as the global functions Register and -// Unregister on the default registry. +// Gatherer interfaces yourself. The methods Register and Unregister work in the +// same way on a custom registry as the global functions Register and Unregister +// on the default registry. // -// There are a number of uses for custom registries: You can use registries -// with special properties, see NewPedanticRegistry. You can avoid global state, -// as it is imposed by the DefaultRegistry. You can use multiple registries at -// the same time to expose different metrics in different ways. You can use +// There are a number of uses for custom registries: You can use registries with +// special properties, see NewPedanticRegistry. You can avoid global state, as +// it is imposed by the DefaultRegisterer. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use // separate registries for testing purposes. // -// Also note that the DefaultRegistry comes registered with a Collector for Go +// Also note that the DefaultRegisterer comes registered with a Collector for Go // runtime metrics (via NewGoCollector) and a Collector for process metrics (via // NewProcessCollector). With a custom registry, you are in control and decide // yourself about the Collectors to register. @@ -166,16 +181,19 @@ // The Registry implements the Gatherer interface. The caller of the Gather // method can then expose the gathered metrics in some way. Usually, the metrics // are served via HTTP on the /metrics endpoint. That's happening in the example -// above. The tools to expose metrics via HTTP are in the promhttp -// sub-package. (The top-level functions in the prometheus package are -// deprecated.) +// above. The tools to expose metrics via HTTP are in the promhttp sub-package. // // Pushing to the Pushgateway // // Function for pushing to the Pushgateway can be found in the push sub-package. // +// Graphite Bridge +// +// Functions and examples to push metrics from a Gatherer to Graphite can be +// found in the graphite sub-package. +// // Other Means of Exposition // -// More ways of exposing metrics can easily be added. Sending metrics to -// Graphite would be an example that will soon be implemented. +// More ways of exposing metrics can easily be added by following the approaches +// of the existing implementations. package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go index e3b67df8ac..3d383a735c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package prometheus // Inline and byte-free variant of hash/fnv's fnv64a. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index 8b70e5141d..d67573f767 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -13,6 +13,14 @@ package prometheus +import ( + "math" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" +) + // Gauge is a Metric that represents a single numerical value that can // arbitrarily go up and down. // @@ -27,29 +35,95 @@ type Gauge interface { // Set sets the Gauge to an arbitrary value. Set(float64) - // Inc increments the Gauge by 1. + // Inc increments the Gauge by 1. Use Add to increment it by arbitrary + // values. Inc() - // Dec decrements the Gauge by 1. + // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary + // values. Dec() - // Add adds the given value to the Gauge. (The value can be - // negative, resulting in a decrease of the Gauge.) + // Add adds the given value to the Gauge. (The value can be negative, + // resulting in a decrease of the Gauge.) Add(float64) // Sub subtracts the given value from the Gauge. (The value can be // negative, resulting in an increase of the Gauge.) Sub(float64) + + // SetToCurrentTime sets the Gauge to the current Unix time in seconds. + SetToCurrentTime() } // GaugeOpts is an alias for Opts. See there for doc comments. type GaugeOpts Opts // NewGauge creates a new Gauge based on the provided GaugeOpts. +// +// The returned implementation is optimized for a fast Set method. If you have a +// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick +// the former. For example, the Inc method of the returned Gauge is slower than +// the Inc method of a Counter returned by NewCounter. This matches the typical +// scenarios for Gauges and Counters, where the former tends to be Set-heavy and +// the latter Inc-heavy. func NewGauge(opts GaugeOpts) Gauge { - return newValue(NewDesc( + desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, nil, opts.ConstLabels, - ), GaugeValue, 0) + ) + result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type gauge struct { + // valBits contains the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + selfCollector + + desc *Desc + labelPairs []*dto.LabelPair +} + +func (g *gauge) Desc() *Desc { + return g.desc +} + +func (g *gauge) Set(val float64) { + atomic.StoreUint64(&g.valBits, math.Float64bits(val)) +} + +func (g *gauge) SetToCurrentTime() { + g.Set(float64(time.Now().UnixNano()) / 1e9) +} + +func (g *gauge) Inc() { + g.Add(1) +} + +func (g *gauge) Dec() { + g.Add(-1) +} + +func (g *gauge) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&g.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { + return + } + } +} + +func (g *gauge) Sub(val float64) { + g.Add(val * -1) +} + +func (g *gauge) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) + return populateMetric(GaugeValue, val, g.labelPairs, nil, out) } // GaugeVec is a Collector that bundles a set of Gauges that all share the same @@ -58,12 +132,11 @@ func NewGauge(opts GaugeOpts) Gauge { // (e.g. number of operations queued, partitioned by user and operation // type). Create instances with NewGaugeVec. type GaugeVec struct { - *MetricVec + *metricVec } // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and -// partitioned by the given label names. At least one label name must be -// provided. +// partitioned by the given label names. func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -72,28 +145,62 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { opts.ConstLabels, ) return &GaugeVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newValue(desc, GaugeValue, 0, lvs...) + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + } + result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result }), } } -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Gauge and not a -// Metric so that no type conversion is required. -func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) +// GetMetricWithLabelValues returns the Gauge for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Gauge is created. +// +// It is possible to call this method without using the returned Gauge to only +// create the new Gauge but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Gauge for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Gauge from the GaugeVec. In that case, the +// Gauge will still exist, but it will not be exported anymore, even if a +// Gauge with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) if metric != nil { return metric.(Gauge), err } return nil, err } -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Gauge and not a Metric so that no -// type conversion is required. -func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { - metric, err := m.MetricVec.GetMetricWith(labels) +// GetMetricWith returns the Gauge for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Gauge is created. Implications of +// creating a Gauge without using it and keeping the Gauge for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := v.metricVec.getMetricWith(labels) if metric != nil { return metric.(Gauge), err } @@ -101,18 +208,57 @@ func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { } // WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like // myVec.WithLabelValues("404", "GET").Add(42) -func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { - return m.MetricVec.WithLabelValues(lvs...).(Gauge) +func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { + g, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return g } // With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *GaugeVec) With(labels Labels) Gauge { - return m.MetricVec.With(labels).(Gauge) +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *GaugeVec) With(labels Labels) Gauge { + g, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return g +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the GaugeVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &GaugeVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec } // GaugeFunc is a Gauge whose value is determined at collect time by calling a @@ -127,9 +273,12 @@ type GaugeFunc interface { // NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The // value reported is determined by calling the given function from within the // Write method. Take into account that metric collection may happen -// concurrently. If that results in concurrent calls to Write, like in the case -// where a GaugeFunc is directly registered with Prometheus, the provided -// function must be concurrency-safe. +// concurrently. Therefore, it must be safe to call the provided function +// concurrently. +// +// NewGaugeFunc is a good way to create an “info” style metric with a constant +// value of 1. Example: +// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56 func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { return newValueFunc(NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index abc9d4ec40..ea05cf429f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -1,34 +1,89 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package prometheus import ( - "fmt" "runtime" "runtime/debug" + "sync" "time" ) type goCollector struct { - goroutines Gauge - gcDesc *Desc + goroutinesDesc *Desc + threadsDesc *Desc + gcDesc *Desc + goInfoDesc *Desc - // metrics to describe and collect - metrics memStatsMetrics + // ms... are memstats related. + msLast *runtime.MemStats // Previously collected memstats. + msLastTimestamp time.Time + msMtx sync.Mutex // Protects msLast and msLastTimestamp. + msMetrics memStatsMetrics + msRead func(*runtime.MemStats) // For mocking in tests. + msMaxWait time.Duration // Wait time for fresh memstats. + msMaxAge time.Duration // Maximum allowed age of old memstats. } -// NewGoCollector returns a collector which exports metrics about the current -// go process. +// NewGoCollector returns a collector that exports metrics about the current Go +// process. This includes memory stats. To collect those, runtime.ReadMemStats +// is called. This requires to “stop the world”, which usually only happens for +// garbage collection (GC). Take the following implications into account when +// deciding whether to use the Go collector: +// +// 1. The performance impact of stopping the world is the more relevant the more +// frequently metrics are collected. However, with Go1.9 or later the +// stop-the-world time per metrics collection is very short (~25µs) so that the +// performance impact will only matter in rare cases. However, with older Go +// versions, the stop-the-world duration depends on the heap size and can be +// quite significant (~1.7 ms/GiB as per +// https://go-review.googlesource.com/c/go/+/34937). +// +// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the +// metrics collection happens to coincide with GC, it will only complete after +// GC has finished. Usually, GC is fast enough to not cause problems. However, +// with a very large heap, GC might take multiple seconds, which is enough to +// cause scrape timeouts in common setups. To avoid this problem, the Go +// collector will use the memstats from a previous collection if +// runtime.ReadMemStats takes more than 1s. However, if there are no previously +// collected memstats, or their collection is more than 5m ago, the collection +// will block until runtime.ReadMemStats succeeds. (The problem might be solved +// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go +// issue.) func NewGoCollector() Collector { return &goCollector{ - goroutines: NewGauge(GaugeOpts{ - Namespace: "go", - Name: "goroutines", - Help: "Number of goroutines that currently exist.", - }), + goroutinesDesc: NewDesc( + "go_goroutines", + "Number of goroutines that currently exist.", + nil, nil), + threadsDesc: NewDesc( + "go_threads", + "Number of OS threads created.", + nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", - "A summary of the GC invocation durations.", + "A summary of the pause duration of garbage collection cycles.", nil, nil), - metrics: memStatsMetrics{ + goInfoDesc: NewDesc( + "go_info", + "Information about the Go environment.", + nil, Labels{"version": runtime.Version()}), + msLast: &runtime.MemStats{}, + msRead: runtime.ReadMemStats, + msMaxWait: time.Second, + msMaxAge: 5 * time.Minute, + msMetrics: memStatsMetrics{ { desc: NewDesc( memstatNamespace("alloc_bytes"), @@ -48,7 +103,7 @@ func NewGoCollector() Collector { }, { desc: NewDesc( memstatNamespace("sys_bytes"), - "Number of bytes obtained by system. Sum of all system allocations.", + "Number of bytes obtained from system.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, @@ -111,12 +166,12 @@ func NewGoCollector() Collector { valType: GaugeValue, }, { desc: NewDesc( - memstatNamespace("heap_released_bytes_total"), - "Total number of heap bytes released to OS.", + memstatNamespace("heap_released_bytes"), + "Number of heap bytes released to OS.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, - valType: CounterValue, + valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("heap_objects"), @@ -213,29 +268,53 @@ func NewGoCollector() Collector { ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, }, }, } } func memstatNamespace(s string) string { - return fmt.Sprintf("go_memstats_%s", s) + return "go_memstats_" + s } // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { - ch <- c.goroutines.Desc() + ch <- c.goroutinesDesc + ch <- c.threadsDesc ch <- c.gcDesc - - for _, i := range c.metrics { + ch <- c.goInfoDesc + for _, i := range c.msMetrics { ch <- i.desc } } // Collect returns the current state of all metrics of the collector. func (c *goCollector) Collect(ch chan<- Metric) { - c.goroutines.Set(float64(runtime.NumGoroutine())) - ch <- c.goroutines + var ( + ms = &runtime.MemStats{} + done = make(chan struct{}) + ) + // Start reading memstats first as it might take a while. + go func() { + c.msRead(ms) + c.msMtx.Lock() + c.msLast = ms + c.msLastTimestamp = time.Now() + c.msMtx.Unlock() + close(done) + }() + + ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) + n, _ := runtime.ThreadCreateProfile(nil) + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) var stats debug.GCStats stats.PauseQuantiles = make([]time.Duration, 5) @@ -246,11 +325,35 @@ func (c *goCollector) Collect(ch chan<- Metric) { quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() } quantiles[0.0] = stats.PauseQuantiles[0].Seconds() - ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) - ms := &runtime.MemStats{} - runtime.ReadMemStats(ms) - for _, i := range c.metrics { + ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) + + timer := time.NewTimer(c.msMaxWait) + select { + case <-done: // Our own ReadMemStats succeeded in time. Use it. + timer.Stop() // Important for high collection frequencies to not pile up timers. + c.msCollect(ch, ms) + return + case <-timer.C: // Time out, use last memstats if possible. Continue below. + } + c.msMtx.Lock() + if time.Since(c.msLastTimestamp) < c.msMaxAge { + // Last memstats are recent enough. Collect from them under the lock. + c.msCollect(ch, c.msLast) + c.msMtx.Unlock() + return + } + // If we are here, the last memstats are too old or don't exist. We have + // to wait until our own ReadMemStats finally completes. For that to + // happen, we have to release the lock. + c.msMtx.Unlock() + <-done + c.msCollect(ch, ms) +} + +func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) { + for _, i := range c.msMetrics { ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) } } @@ -261,3 +364,33 @@ type memStatsMetrics []struct { eval func(*runtime.MemStats) float64 valType ValueType } + +// NewBuildInfoCollector returns a collector collecting a single metric +// "go_build_info" with the constant value 1 and three labels "path", "version", +// and "checksum". Their label values contain the main module path, version, and +// checksum, respectively. The labels will only have meaningful values if the +// binary is built with Go module support and from source code retrieved from +// the source repository (rather than the local file system). This is usually +// accomplished by building from outside of GOPATH, specifying the full address +// of the main package, e.g. "GO111MODULE=on go run +// github.com/prometheus/client_golang/examples/random". If built without Go +// module support, all label values will be "unknown". If built with Go module +// support but using the source code from the local file system, the "path" will +// be set appropriately, but "checksum" will be empty and "version" will be +// "(devel)". +// +// This collector uses only the build information for the main module. See +// https://github.com/povilasv/prommod for an example of a collector for the +// module dependencies. +func NewBuildInfoCollector() Collector { + path, version, sum := readBuildInfo() + c := &selfCollector{MustNewConstMetric( + NewDesc( + "go_build_info", + "Build information about the main Go module.", + nil, Labels{"path": path, "version": version, "checksum": sum}, + ), + GaugeValue, 1)} + c.init(c.self) + return c +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 9719e8fac8..4271f438ae 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -16,8 +16,11 @@ package prometheus import ( "fmt" "math" + "runtime" "sort" + "sync" "sync/atomic" + "time" "github.com/golang/protobuf/proto" @@ -108,8 +111,9 @@ func ExponentialBuckets(start, factor float64, count int) []float64 { } // HistogramOpts bundles the options for creating a Histogram metric. It is -// mandatory to set Name and Help to a non-empty string. All other fields are -// optional and can safely be left at their zero value. +// mandatory to set Name to a non-empty string. All other fields are optional +// and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. type HistogramOpts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Histogram (created by joining these components with @@ -120,29 +124,22 @@ type HistogramOpts struct { Subsystem string Name string - // Help provides information about this Histogram. Mandatory! + // Help provides information about this Histogram. // // Metrics with the same fully-qualified name must have the same Help // string. Help string - // ConstLabels are used to attach fixed labels to this - // Histogram. Histograms with the same fully-qualified name must have the - // same label names in their ConstLabels. + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a - // HistogramVec. ConstLabels serve only special purposes. One is for the - // special case where the value of a label does not change during the - // lifetime of a process, e.g. if the revision of the running binary is - // put into a label. Another, more advanced purpose is if more than one - // Collector needs to collect Histograms with the same fully-qualified - // name. In that case, those Summaries must differ in the values of - // their ConstLabels. See the Collector examples. - // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels ConstLabels Labels // Buckets defines the buckets into which observations are counted. Each @@ -155,6 +152,10 @@ type HistogramOpts struct { // NewHistogram creates a new Histogram based on the provided HistogramOpts. It // panics if the buckets in HistogramOpts are not in strictly increasing order. +// +// The returned implementation also implements ExemplarObserver. It is safe to +// perform the corresponding type assertion. Exemplars are tracked separately +// for each bucket. func NewHistogram(opts HistogramOpts) Histogram { return newHistogram( NewDesc( @@ -169,7 +170,7 @@ func NewHistogram(opts HistogramOpts) Histogram { func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { if len(desc.variableLabels) != len(labelValues) { - panic(errInconsistentCardinality) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) } for _, n := range desc.variableLabels { @@ -191,6 +192,8 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr desc: desc, upperBounds: opts.Buckets, labelPairs: makeLabelPairs(desc, labelValues), + counts: [2]*histogramCounts{{}, {}}, + now: time.Now, } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -207,30 +210,60 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } } - // Finally we know the final length of h.upperBounds and can make counts. - h.counts = make([]uint64, len(h.upperBounds)) + // Finally we know the final length of h.upperBounds and can make buckets + // for both counts as well as exemplars: + h.counts[0].buckets = make([]uint64, len(h.upperBounds)) + h.counts[1].buckets = make([]uint64, len(h.upperBounds)) + h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) h.init(h) // Init self-collection. return h } -type histogram struct { +type histogramCounts struct { // sumBits contains the bits of the float64 representing the sum of all // observations. sumBits and count have to go first in the struct to // guarantee alignment for atomic operations. // http://golang.org/pkg/sync/atomic/#pkg-note-BUG sumBits uint64 count uint64 + buckets []uint64 +} + +type histogram struct { + // countAndHotIdx enables lock-free writes with use of atomic updates. + // The most significant bit is the hot index [0 or 1] of the count field + // below. Observe calls update the hot one. All remaining bits count the + // number of Observe calls. Observe starts by incrementing this counter, + // and finish by incrementing the count field in the respective + // histogramCounts, as a marker for completion. + // + // Calls of the Write method (which are non-mutating reads from the + // perspective of the histogram) swap the hot–cold under the writeMtx + // lock. A cooldown is awaited (while locked) by comparing the number of + // observations with the initiation count. Once they match, then the + // last observation on the now cool one has completed. All cool fields must + // be merged into the new hot before releasing writeMtx. + // + // Fields with atomic access first! See alignment constraint: + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 selfCollector - // Note that there is no mutex required. + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. - desc *Desc + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*histogramCounts upperBounds []float64 - counts []uint64 + labelPairs []*dto.LabelPair + exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. - labelPairs []*dto.LabelPair + now func() time.Time // To mock out time.Now() for testing. } func (h *histogram) Desc() *Desc { @@ -238,6 +271,89 @@ func (h *histogram) Desc() *Desc { } func (h *histogram) Observe(v float64) { + h.observe(v, h.findBucket(v)) +} + +func (h *histogram) ObserveWithExemplar(v float64, e Labels) { + i := h.findBucket(v) + h.observe(v, i) + h.updateExemplar(v, i, e) +} + +func (h *histogram) Write(out *dto.Metric) error { + // For simplicity, we protect this whole method by a mutex. It is not in + // the hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it, if possible at + // all. + h.writeMtx.Lock() + defer h.writeMtx.Unlock() + + // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) + // without touching the count bits. See the struct comments for a full + // description of the algorithm. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + // count is contained unchanged in the lower 63 bits. + count := n & ((1 << 63) - 1) + // The most significant bit tells us which counts is hot. The complement + // is thus the cold one. + hotCounts := h.counts[n>>63] + coldCounts := h.counts[(^n)>>63] + + // Await cooldown. + for count != atomic.LoadUint64(&coldCounts.count) { + runtime.Gosched() // Let observations get work done. + } + + his := &dto.Histogram{ + Bucket: make([]*dto.Bucket, len(h.upperBounds)), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + } + var cumCount uint64 + for i, upperBound := range h.upperBounds { + cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) + his.Bucket[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(cumCount), + UpperBound: proto.Float64(upperBound), + } + if e := h.exemplars[i].Load(); e != nil { + his.Bucket[i].Exemplar = e.(*dto.Exemplar) + } + } + // If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly. + if e := h.exemplars[len(h.upperBounds)].Load(); e != nil { + b := &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(math.Inf(1)), + Exemplar: e.(*dto.Exemplar), + } + his.Bucket = append(his.Bucket, b) + } + + out.Histogram = his + out.Label = h.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + for i := range h.upperBounds { + atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) + atomic.StoreUint64(&coldCounts.buckets[i], 0) + } + return nil +} + +// findBucket returns the index of the bucket for the provided value, or +// len(h.upperBounds) for the +Inf bucket. +func (h *histogram) findBucket(v float64) int { // TODO(beorn7): For small numbers of buckets (<30), a linear search is // slightly faster than the binary search. If we really care, we could // switch from one search strategy to the other depending on the number @@ -247,38 +363,43 @@ func (h *histogram) Observe(v float64) { // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op // 300 buckets: 154 ns/op linear - binary 61.6 ns/op - i := sort.SearchFloat64s(h.upperBounds, v) - if i < len(h.counts) { - atomic.AddUint64(&h.counts[i], 1) + return sort.SearchFloat64s(h.upperBounds, v) +} + +// observe is the implementation for Observe without the findBucket part. +func (h *histogram) observe(v float64, bucket int) { + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1) + hotCounts := h.counts[n>>63] + + if bucket < len(h.upperBounds) { + atomic.AddUint64(&hotCounts.buckets[bucket], 1) } - atomic.AddUint64(&h.count, 1) for { - oldBits := atomic.LoadUint64(&h.sumBits) + oldBits := atomic.LoadUint64(&hotCounts.sumBits) newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { break } } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) } -func (h *histogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} - buckets := make([]*dto.Bucket, len(h.upperBounds)) - - his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) - his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) - var count uint64 - for i, upperBound := range h.upperBounds { - count += atomic.LoadUint64(&h.counts[i]) - buckets[i] = &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(upperBound), - } +// updateExemplar replaces the exemplar for the provided bucket. With empty +// labels, it's a no-op. It panics if any of the labels is invalid. +func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { + if l == nil { + return } - his.Bucket = buckets - out.Histogram = his - out.Label = h.labelPairs - return nil + e, err := newExemplar(v, h.now(), l) + if err != nil { + panic(err) + } + h.exemplars[bucket].Store(e) } // HistogramVec is a Collector that bundles a set of Histograms that all share the @@ -287,12 +408,11 @@ func (h *histogram) Write(out *dto.Metric) error { // (e.g. HTTP request latencies, partitioned by status code and method). Create // instances with NewHistogramVec. type HistogramVec struct { - *MetricVec + *metricVec } // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and -// partitioned by the given label names. At least one label name must be -// provided. +// partitioned by the given label names. func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -301,47 +421,116 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { opts.ConstLabels, ) return &HistogramVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + metricVec: newMetricVec(desc, func(lvs ...string) Metric { return newHistogram(desc, opts, lvs...) }), } } -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Histogram and not a -// Metric so that no type conversion is required. -func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) +// GetMetricWithLabelValues returns the Histogram for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Histogram is created. +// +// It is possible to call this method without using the returned Histogram to only +// create the new Histogram but leave it at its starting value, a Histogram without +// any observations. +// +// Keeping the Histogram for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Histogram from the HistogramVec. In that case, the +// Histogram will still exist, but it will not be exported anymore, even if a +// Histogram with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) if metric != nil { - return metric.(Histogram), err + return metric.(Observer), err } return nil, err } -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Histogram and not a Metric so that no -// type conversion is required. -func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) { - metric, err := m.MetricVec.GetMetricWith(labels) +// GetMetricWith returns the Histogram for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Histogram is created. Implications of +// creating a Histogram without using it and keeping the Histogram for later use +// are the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) if metric != nil { - return metric.(Histogram), err + return metric.(Observer), err } return nil, err } // WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like // myVec.WithLabelValues("404", "GET").Observe(42.21) -func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram { - return m.MetricVec.WithLabelValues(lvs...).(Histogram) +func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { + h, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return h } -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (m *HistogramVec) With(labels Labels) Histogram { - return m.MetricVec.With(labels).(Histogram) +// With works as GetMetricWith but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *HistogramVec) With(labels Labels) Observer { + h, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return h +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the HistogramVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &HistogramVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec } type constHistogram struct { @@ -393,7 +582,7 @@ func (h *constHistogram) Write(out *dto.Metric) error { // bucket. // // NewConstHistogram returns an error if the length of labelValues is not -// consistent with the variable labels in Desc. +// consistent with the variable labels in Desc or if Desc is invalid. func NewConstHistogram( desc *Desc, count uint64, @@ -401,8 +590,11 @@ func NewConstHistogram( buckets map[float64]uint64, labelValues ...string, ) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err } return &constHistogram{ desc: desc, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go deleted file mode 100644 index 67ee5ac794..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/http.go +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "bufio" - "bytes" - "compress/gzip" - "fmt" - "io" - "net" - "net/http" - "strconv" - "strings" - "sync" - "time" - - "github.com/prometheus/common/expfmt" -) - -// TODO(beorn7): Remove this whole file. It is a partial mirror of -// promhttp/http.go (to avoid circular import chains) where everything HTTP -// related should live. The functions here are just for avoiding -// breakage. Everything is deprecated. - -const ( - contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" -) - -var bufPool sync.Pool - -func getBuf() *bytes.Buffer { - buf := bufPool.Get() - if buf == nil { - return &bytes.Buffer{} - } - return buf.(*bytes.Buffer) -} - -func giveBuf(buf *bytes.Buffer) { - buf.Reset() - bufPool.Put(buf) -} - -// Handler returns an HTTP handler for the DefaultGatherer. It is -// already instrumented with InstrumentHandler (using "prometheus" as handler -// name). -// -// Deprecated: Please note the issues described in the doc comment of -// InstrumentHandler. You might want to consider using promhttp.Handler instead -// (which is non instrumented). -func Handler() http.Handler { - return InstrumentHandler("prometheus", UninstrumentedHandler()) -} - -// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. -// -// Deprecated: Use promhttp.Handler instead. See there for further documentation. -func UninstrumentedHandler() http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - mfs, err := DefaultGatherer.Gather() - if err != nil { - http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - - contentType := expfmt.Negotiate(req.Header) - buf := getBuf() - defer giveBuf(buf) - writer, encoding := decorateWriter(req, buf) - enc := expfmt.NewEncoder(writer, contentType) - var lastErr error - for _, mf := range mfs { - if err := enc.Encode(mf); err != nil { - lastErr = err - http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - } - if closer, ok := writer.(io.Closer); ok { - closer.Close() - } - if lastErr != nil && buf.Len() == 0 { - http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - header := w.Header() - header.Set(contentTypeHeader, string(contentType)) - header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) - if encoding != "" { - header.Set(contentEncodingHeader, encoding) - } - w.Write(buf.Bytes()) - }) -} - -// decorateWriter wraps a writer to handle gzip compression if requested. It -// returns the decorated writer and the appropriate "Content-Encoding" header -// (which is empty if no compression is enabled). -func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { - header := request.Header.Get(acceptEncodingHeader) - parts := strings.Split(header, ",") - for _, part := range parts { - part := strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return gzip.NewWriter(writer), "gzip" - } - } - return writer, "" -} - -var instLabels = []string{"method", "code"} - -type nower interface { - Now() time.Time -} - -type nowFunc func() time.Time - -func (n nowFunc) Now() time.Time { - return n() -} - -var now nower = nowFunc(func() time.Time { - return time.Now() -}) - -func nowSeries(t ...time.Time) nower { - return nowFunc(func() time.Time { - defer func() { - t = t[1:] - }() - - return t[0] - }) -} - -// InstrumentHandler wraps the given HTTP handler for instrumentation. It -// registers four metric collectors (if not already done) and reports HTTP -// metrics to the (newly or already) registered collectors: http_requests_total -// (CounterVec), http_request_duration_microseconds (Summary), -// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each -// has a constant label named "handler" with the provided handlerName as -// value. http_requests_total is a metric vector partitioned by HTTP method -// (label name "method") and HTTP status code (label name "code"). -// -// Deprecated: InstrumentHandler has several issues: -// -// - It uses Summaries rather than Histograms. Summaries are not useful if -// aggregation across multiple instances is required. -// -// - It uses microseconds as unit, which is deprecated and should be replaced by -// seconds. -// -// - The size of the request is calculated in a separate goroutine. Since this -// calculator requires access to the request header, it creates a race with -// any writes to the header performed during request handling. -// httputil.ReverseProxy is a prominent example for a handler -// performing such writes. -// -// Upcoming versions of this package will provide ways of instrumenting HTTP -// handlers that are more flexible and have fewer issues. Please prefer direct -// instrumentation in the meantime. -func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) -} - -// InstrumentHandlerFunc wraps the given function for instrumentation. It -// otherwise works in the same way as InstrumentHandler (and shares the same -// issues). -// -// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as -// InstrumentHandler is. -func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - return InstrumentHandlerFuncWithOpts( - SummaryOpts{ - Subsystem: "http", - ConstLabels: Labels{"handler": handlerName}, - }, - handlerFunc, - ) -} - -// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same -// issues) but provides more flexibility (at the cost of a more complex call -// syntax). As InstrumentHandler, this function registers four metric -// collectors, but it uses the provided SummaryOpts to create them. However, the -// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced -// by "requests_total", "request_duration_microseconds", "request_size_bytes", -// and "response_size_bytes", respectively. "Help" is replaced by an appropriate -// help string. The names of the variable labels of the http_requests_total -// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). -// -// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the -// behavior of InstrumentHandler: -// -// prometheus.InstrumentHandlerWithOpts( -// prometheus.SummaryOpts{ -// Subsystem: "http", -// ConstLabels: prometheus.Labels{"handler": handlerName}, -// }, -// handler, -// ) -// -// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it -// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, -// and all its fields are set to the equally named fields in the provided -// SummaryOpts. -// -// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as -// InstrumentHandler is. -func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) -} - -// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares -// the same issues) but provides more flexibility (at the cost of a more complex -// call syntax). See InstrumentHandlerWithOpts for details how the provided -// SummaryOpts are used. -// -// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons -// as InstrumentHandler is. -func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - reqCnt := NewCounterVec( - CounterOpts{ - Namespace: opts.Namespace, - Subsystem: opts.Subsystem, - Name: "requests_total", - Help: "Total number of HTTP requests made.", - ConstLabels: opts.ConstLabels, - }, - instLabels, - ) - - opts.Name = "request_duration_microseconds" - opts.Help = "The HTTP request latencies in microseconds." - reqDur := NewSummary(opts) - - opts.Name = "request_size_bytes" - opts.Help = "The HTTP request sizes in bytes." - reqSz := NewSummary(opts) - - opts.Name = "response_size_bytes" - opts.Help = "The HTTP response sizes in bytes." - resSz := NewSummary(opts) - - regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec) - regReqDur := MustRegisterOrGet(reqDur).(Summary) - regReqSz := MustRegisterOrGet(reqSz).(Summary) - regResSz := MustRegisterOrGet(resSz).(Summary) - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - - delegate := &responseWriterDelegator{ResponseWriter: w} - out := make(chan int) - urlLen := 0 - if r.URL != nil { - urlLen = len(r.URL.String()) - } - go computeApproximateRequestSize(r, out, urlLen) - - _, cn := w.(http.CloseNotifier) - _, fl := w.(http.Flusher) - _, hj := w.(http.Hijacker) - _, rf := w.(io.ReaderFrom) - var rw http.ResponseWriter - if cn && fl && hj && rf { - rw = &fancyResponseWriterDelegator{delegate} - } else { - rw = delegate - } - handlerFunc(rw, r) - - elapsed := float64(time.Since(now)) / float64(time.Microsecond) - - method := sanitizeMethod(r.Method) - code := sanitizeCode(delegate.status) - regReqCnt.WithLabelValues(method, code).Inc() - regReqDur.Observe(elapsed) - regResSz.Observe(float64(delegate.written)) - regReqSz.Observe(float64(<-out)) - }) -} - -func computeApproximateRequestSize(r *http.Request, out chan int, s int) { - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) - } - } - s += len(r.Host) - - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - out <- s -} - -type responseWriterDelegator struct { - http.ResponseWriter - - handler, method string - status int - written int64 - wroteHeader bool -} - -func (r *responseWriterDelegator) WriteHeader(code int) { - r.status = code - r.wroteHeader = true - r.ResponseWriter.WriteHeader(code) -} - -func (r *responseWriterDelegator) Write(b []byte) (int, error) { - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - n, err := r.ResponseWriter.Write(b) - r.written += int64(n) - return n, err -} - -type fancyResponseWriterDelegator struct { - *responseWriterDelegator -} - -func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { - return f.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -func (f *fancyResponseWriterDelegator) Flush() { - f.ResponseWriter.(http.Flusher).Flush() -} - -func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return f.ResponseWriter.(http.Hijacker).Hijack() -} - -func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { - if !f.wroteHeader { - f.WriteHeader(http.StatusOK) - } - n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) - f.written += n - return n, err -} - -func sanitizeMethod(m string) string { - switch m { - case "GET", "get": - return "get" - case "PUT", "put": - return "put" - case "HEAD", "head": - return "head" - case "POST", "post": - return "post" - case "DELETE", "delete": - return "delete" - case "CONNECT", "connect": - return "connect" - case "OPTIONS", "options": - return "options" - case "NOTIFY", "notify": - return "notify" - default: - return strings.ToLower(m) - } -} - -func sanitizeCode(s int) string { - switch s { - case 100: - return "100" - case 101: - return "101" - - case 200: - return "200" - case 201: - return "201" - case 202: - return "202" - case 203: - return "203" - case 204: - return "204" - case 205: - return "205" - case 206: - return "206" - - case 300: - return "300" - case 301: - return "301" - case 302: - return "302" - case 304: - return "304" - case 305: - return "305" - case 307: - return "307" - - case 400: - return "400" - case 401: - return "401" - case 402: - return "402" - case 403: - return "403" - case 404: - return "404" - case 405: - return "405" - case 406: - return "406" - case 407: - return "407" - case 408: - return "408" - case 409: - return "409" - case 410: - return "410" - case 411: - return "411" - case 412: - return "412" - case 413: - return "413" - case 414: - return "414" - case 415: - return "415" - case 416: - return "416" - case 417: - return "417" - case 418: - return "418" - - case 500: - return "500" - case 501: - return "501" - case 502: - return "502" - case 503: - return "503" - case 504: - return "504" - case 505: - return "505" - - case 428: - return "428" - case 429: - return "429" - case 431: - return "431" - case 511: - return "511" - - default: - return strconv.Itoa(s) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go new file mode 100644 index 0000000000..351c26e1ae --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go @@ -0,0 +1,85 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "sort" + + dto "github.com/prometheus/client_model/go" +) + +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// NormalizeMetricFamilies returns a MetricFamily slice with empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go new file mode 100644 index 0000000000..2744443ac2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -0,0 +1,87 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "strings" + "unicode/utf8" + + "github.com/prometheus/common/model" +) + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { + return fmt.Errorf( + "%s: %q has %d variable labels named %q but %d values %q were provided", + errInconsistentCardinality, fqName, + len(labels), labels, + len(labelValues), labelValues, + ) +} + +func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { + if len(labels) != expectedNumberOfValues { + return fmt.Errorf( + "%s: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(labels), labels, + ) + } + + for name, val := range labels { + if !utf8.ValidString(val) { + return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) + } + } + + return nil +} + +func validateLabelValues(vals []string, expectedNumberOfValues int) error { + if len(vals) != expectedNumberOfValues { + return fmt.Errorf( + "%s: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(vals), vals, + ) + } + + for _, val := range vals { + if !utf8.ValidString(val) { + return fmt.Errorf("label value %q is not valid UTF-8", val) + } + } + + return nil +} + +func checkLabelName(l string) bool { + return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index d4063d98f4..0df1eff881 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -15,11 +15,15 @@ package prometheus import ( "strings" + "time" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" ) -const separatorByte byte = 255 +var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. // A Metric models a single sample value with its meta data being exported to // Prometheus. Implementations of Metric in this package are Gauge, Counter, @@ -43,9 +47,8 @@ type Metric interface { // While populating dto.Metric, it is the responsibility of the // implementation to ensure validity of the Metric protobuf (like valid // UTF-8 strings or syntactically valid metric and label names). It is - // recommended to sort labels lexicographically. (Implementers may find - // LabelPairSorter useful for that.) Callers of Write should still make - // sure of sorting if they depend on it. + // recommended to sort labels lexicographically. Callers of Write should + // still make sure of sorting if they depend on it. Write(*dto.Metric) error // TODO(beorn7): The original rationale of passing in a pre-allocated // dto.Metric protobuf to save allocations has disappeared. The @@ -57,8 +60,9 @@ type Metric interface { // implementation XXX has its own XXXOpts type, but in most cases, it is just be // an alias of this type (which might change when the requirement arises.) // -// It is mandatory to set Name and Help to a non-empty string. All other fields -// are optional and can safely be left at their zero value. +// It is mandatory to set Name to a non-empty string. All other fields are +// optional and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. type Opts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Metric (created by joining these components with @@ -69,7 +73,7 @@ type Opts struct { Subsystem string Name string - // Help provides information about this metric. Mandatory! + // Help provides information about this metric. // // Metrics with the same fully-qualified name must have the same Help // string. @@ -79,20 +83,12 @@ type Opts struct { // with the same fully-qualified name must have the same label names in // their ConstLabels. // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a metric - // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels - // serve only special purposes. One is for the special case where the - // value of a label does not change during the lifetime of a process, - // e.g. if the revision of the running binary is put into a - // label. Another, more advanced purpose is if more than one Collector - // needs to collect Metrics with the same fully-qualified name. In that - // case, those Metrics must differ in the values of their - // ConstLabels. See the Collector examples. - // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels ConstLabels Labels } @@ -118,37 +114,22 @@ func BuildFQName(namespace, subsystem, name string) string { return name } -// LabelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. This is useful for implementing the Write method of -// custom metrics. -type LabelPairSorter []*dto.LabelPair +// labelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. +type labelPairSorter []*dto.LabelPair -func (s LabelPairSorter) Len() int { +func (s labelPairSorter) Len() int { return len(s) } -func (s LabelPairSorter) Swap(i, j int) { +func (s labelPairSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s LabelPairSorter) Less(i, j int) bool { +func (s labelPairSorter) Less(i, j int) bool { return s[i].GetName() < s[j].GetName() } -type hashSorter []uint64 - -func (s hashSorter) Len() int { - return len(s) -} - -func (s hashSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s hashSorter) Less(i, j int) bool { - return s[i] < s[j] -} - type invalidMetric struct { desc *Desc err error @@ -164,3 +145,31 @@ func NewInvalidMetric(desc *Desc, err error) Metric { func (m *invalidMetric) Desc() *Desc { return m.desc } func (m *invalidMetric) Write(*dto.Metric) error { return m.err } + +type timestampedMetric struct { + Metric + t time.Time +} + +func (m timestampedMetric) Write(pb *dto.Metric) error { + e := m.Metric.Write(pb) + pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) + return e +} + +// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a +// way that it has an explicit timestamp set to the provided Time. This is only +// useful in rare cases as the timestamp of a Prometheus metric should usually +// be set by the Prometheus server during scraping. Exceptions include mirroring +// metrics with given timestamps from other metric +// sources. +// +// NewMetricWithTimestamp works best with MustNewConstMetric, +// MustNewConstHistogram, and MustNewConstSummary, see example. +// +// Currently, the exposition formats used by Prometheus are limited to +// millisecond resolution. Thus, the provided time will be rounded down to the +// next full millisecond value. +func NewMetricWithTimestamp(t time.Time, m Metric) Metric { + return timestampedMetric{Metric: m, t: t} +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go new file mode 100644 index 0000000000..44128016fd --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -0,0 +1,64 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Observer is the interface that wraps the Observe method, which is used by +// Histogram and Summary to add observations. +type Observer interface { + Observe(float64) +} + +// The ObserverFunc type is an adapter to allow the use of ordinary +// functions as Observers. If f is a function with the appropriate +// signature, ObserverFunc(f) is an Observer that calls f. +// +// This adapter is usually used in connection with the Timer type, and there are +// two general use cases: +// +// The most common one is to use a Gauge as the Observer for a Timer. +// See the "Gauge" Timer example. +// +// The more advanced use case is to create a function that dynamically decides +// which Observer to use for observing the duration. See the "Complex" Timer +// example. +type ObserverFunc func(float64) + +// Observe calls f(value). It implements Observer. +func (f ObserverFunc) Observe(value float64) { + f(value) +} + +// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. +type ObserverVec interface { + GetMetricWith(Labels) (Observer, error) + GetMetricWithLabelValues(lvs ...string) (Observer, error) + With(Labels) Observer + WithLabelValues(...string) Observer + CurryWith(Labels) (ObserverVec, error) + MustCurryWith(Labels) ObserverVec + + Collector +} + +// ExemplarObserver is implemented by Observers that offer the option of +// observing a value together with an exemplar. Its ObserveWithExemplar method +// works like the Observe method of an Observer but also replaces the currently +// saved exemplar (if any) with a new one, created from the provided value, the +// current time as timestamp, and the provided Labels. Empty Labels will lead to +// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is +// left in place. ObserveWithExemplar panics if any of the provided labels are +// invalid or if the provided labels contain more than 64 runes in total. +type ExemplarObserver interface { + ObserveWithExemplar(value float64, exemplar Labels) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index e31e62e78d..9b80979421 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -13,89 +13,126 @@ package prometheus -import "github.com/prometheus/procfs" +import ( + "errors" + "os" +) type processCollector struct { - pid int collectFn func(chan<- Metric) pidFn func() (int, error) - cpuTotal Counter - openFDs, maxFDs Gauge - vsize, rss Gauge - startTime Gauge + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc +} + +// ProcessCollectorOpts defines the behavior of a process metrics collector +// created with NewProcessCollector. +type ProcessCollectorOpts struct { + // PidFn returns the PID of the process the collector collects metrics + // for. It is called upon each collection. By default, the PID of the + // current process is used, as determined on construction time by + // calling os.Getpid(). + PidFn func() (int, error) + // If non-empty, each of the collected metrics is prefixed by the + // provided string and an underscore ("_"). + Namespace string + // If true, any error encountered during collection is reported as an + // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored + // and the collected metrics will be incomplete. (Possibly, no metrics + // will be collected at all.) While that's usually not desired, it is + // appropriate for the common "mix-in" of process metrics, where process + // metrics are nice to have, but failing to collect them should not + // disrupt the collection of the remaining metrics. + ReportErrors bool } // NewProcessCollector returns a collector which exports the current state of -// process metrics including cpu, memory and file descriptor usage as well as -// the process start time for the given process id under the given namespace. -func NewProcessCollector(pid int, namespace string) Collector { - return NewProcessCollectorPIDFn( - func() (int, error) { return pid, nil }, - namespace, - ) -} +// process metrics including CPU, memory and file descriptor usage as well as +// the process start time. The detailed behavior is defined by the provided +// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a +// collector for the current process with an empty namespace string and no error +// reporting. +// +// The collector only works on operating systems with a Linux-style proc +// filesystem and on Microsoft Windows. On other operating systems, it will not +// collect any metrics. +func NewProcessCollector(opts ProcessCollectorOpts) Collector { + ns := "" + if len(opts.Namespace) > 0 { + ns = opts.Namespace + "_" + } -// NewProcessCollectorPIDFn returns a collector which exports the current state -// of process metrics including cpu, memory and file descriptor usage as well -// as the process start time under the given namespace. The given pidFn is -// called on each collect and is used to determine the process to export -// metrics for. -func NewProcessCollectorPIDFn( - pidFn func() (int, error), - namespace string, -) Collector { - c := processCollector{ - pidFn: pidFn, - collectFn: func(chan<- Metric) {}, + c := &processCollector{ + reportErrors: opts.ReportErrors, + cpuTotal: NewDesc( + ns+"process_cpu_seconds_total", + "Total user and system CPU time spent in seconds.", + nil, nil, + ), + openFDs: NewDesc( + ns+"process_open_fds", + "Number of open file descriptors.", + nil, nil, + ), + maxFDs: NewDesc( + ns+"process_max_fds", + "Maximum number of open file descriptors.", + nil, nil, + ), + vsize: NewDesc( + ns+"process_virtual_memory_bytes", + "Virtual memory size in bytes.", + nil, nil, + ), + maxVsize: NewDesc( + ns+"process_virtual_memory_max_bytes", + "Maximum amount of virtual memory available in bytes.", + nil, nil, + ), + rss: NewDesc( + ns+"process_resident_memory_bytes", + "Resident memory size in bytes.", + nil, nil, + ), + startTime: NewDesc( + ns+"process_start_time_seconds", + "Start time of the process since unix epoch in seconds.", + nil, nil, + ), + } - cpuTotal: NewCounter(CounterOpts{ - Namespace: namespace, - Name: "process_cpu_seconds_total", - Help: "Total user and system CPU time spent in seconds.", - }), - openFDs: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_open_fds", - Help: "Number of open file descriptors.", - }), - maxFDs: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_max_fds", - Help: "Maximum number of open file descriptors.", - }), - vsize: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_virtual_memory_bytes", - Help: "Virtual memory size in bytes.", - }), - rss: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_resident_memory_bytes", - Help: "Resident memory size in bytes.", - }), - startTime: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_start_time_seconds", - Help: "Start time of the process since unix epoch in seconds.", - }), + if opts.PidFn == nil { + pid := os.Getpid() + c.pidFn = func() (int, error) { return pid, nil } + } else { + c.pidFn = opts.PidFn } // Set up process metric collection if supported by the runtime. - if _, err := procfs.NewStat(); err == nil { + if canCollectProcess() { c.collectFn = c.processCollect + } else { + c.collectFn = func(ch chan<- Metric) { + c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) + } } - return &c + return c } // Describe returns all descriptions of the collector. func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal.Desc() - ch <- c.openFDs.Desc() - ch <- c.maxFDs.Desc() - ch <- c.vsize.Desc() - ch <- c.rss.Desc() - ch <- c.startTime.Desc() + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.maxVsize + ch <- c.rss + ch <- c.startTime } // Collect returns the current state of all metrics of the collector. @@ -103,40 +140,12 @@ func (c *processCollector) Collect(ch chan<- Metric) { c.collectFn(ch) } -// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the -// client allows users to configure the error behavior. -func (c *processCollector) processCollect(ch chan<- Metric) { - pid, err := c.pidFn() - if err != nil { +func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { + if !c.reportErrors { return } - - p, err := procfs.NewProc(pid) - if err != nil { - return - } - - if stat, err := p.NewStat(); err == nil { - c.cpuTotal.Set(stat.CPUTime()) - ch <- c.cpuTotal - c.vsize.Set(float64(stat.VirtualMemory())) - ch <- c.vsize - c.rss.Set(float64(stat.ResidentMemory())) - ch <- c.rss - - if startTime, err := stat.StartTime(); err == nil { - c.startTime.Set(startTime) - ch <- c.startTime - } - } - - if fds, err := p.FileDescriptorsLen(); err == nil { - c.openFDs.Set(float64(fds)) - ch <- c.openFDs - } - - if limits, err := p.NewLimits(); err == nil { - c.maxFDs.Set(float64(limits.OpenFiles)) - ch <- c.maxFDs + if desc == nil { + desc = NewInvalidDesc(err) } + ch <- NewInvalidMetric(desc, err) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go new file mode 100644 index 0000000000..3117461cde --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -0,0 +1,65 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package prometheus + +import ( + "github.com/prometheus/procfs" +) + +func canCollectProcess() bool { + _, err := procfs.NewDefaultFS() + return err == nil +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + c.reportError(ch, nil, err) + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + c.reportError(ch, nil, err) + return + } + + if stat, err := p.Stat(); err == nil { + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) + if startTime, err := stat.StartTime(); err == nil { + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } else { + c.reportError(ch, c.startTime, err) + } + } else { + c.reportError(ch, nil, err) + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) + } else { + c.reportError(ch, c.openFDs, err) + } + + if limits, err := p.Limits(); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) + ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) + } else { + c.reportError(ch, nil, err) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go new file mode 100644 index 0000000000..f973398df2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go @@ -0,0 +1,116 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +func canCollectProcess() bool { + return true +} + +var ( + modpsapi = syscall.NewLazyDLL("psapi.dll") + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") + procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount") +) + +type processMemoryCounters struct { + // System interface description + // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex + + // Refer to the Golang internal implementation + // https://golang.org/src/internal/syscall/windows/psapi_windows.go + _ uint32 + PageFaultCount uint32 + PeakWorkingSetSize uintptr + WorkingSetSize uintptr + QuotaPeakPagedPoolUsage uintptr + QuotaPagedPoolUsage uintptr + QuotaPeakNonPagedPoolUsage uintptr + QuotaNonPagedPoolUsage uintptr + PagefileUsage uintptr + PeakPagefileUsage uintptr + PrivateUsage uintptr +} + +func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) { + mem := processMemoryCounters{} + r1, _, err := procGetProcessMemoryInfo.Call( + uintptr(handle), + uintptr(unsafe.Pointer(&mem)), + uintptr(unsafe.Sizeof(mem)), + ) + if r1 != 1 { + return mem, err + } else { + return mem, nil + } +} + +func getProcessHandleCount(handle windows.Handle) (uint32, error) { + var count uint32 + r1, _, err := procGetProcessHandleCount.Call( + uintptr(handle), + uintptr(unsafe.Pointer(&count)), + ) + if r1 != 1 { + return 0, err + } else { + return count, nil + } +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + h, err := windows.GetCurrentProcess() + if err != nil { + c.reportError(ch, nil, err) + return + } + + var startTime, exitTime, kernelTime, userTime windows.Filetime + err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9)) + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime)) + + mem, err := getProcessMemoryInfo(h) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage)) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize)) + + handles, err := getProcessHandleCount(h) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles)) + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. +} + +func fileTimeToSeconds(ft windows.Filetime) float64 { + return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go new file mode 100644 index 0000000000..5070e72e28 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -0,0 +1,370 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "bufio" + "io" + "net" + "net/http" +) + +const ( + closeNotifier = 1 << iota + flusher + hijacker + readerFrom + pusher +) + +type delegator interface { + http.ResponseWriter + + Status() int + Written() int64 +} + +type responseWriterDelegator struct { + http.ResponseWriter + + status int + written int64 + wroteHeader bool + observeWriteHeader func(int) +} + +func (r *responseWriterDelegator) Status() int { + return r.status +} + +func (r *responseWriterDelegator) Written() int64 { + return r.written +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + if r.observeWriteHeader != nil && !r.wroteHeader { + // Only call observeWriteHeader for the 1st time. It's a bug if + // WriteHeader is called more than once, but we want to protect + // against it here. Note that we still delegate the WriteHeader + // to the original ResponseWriter to not mask the bug from it. + r.observeWriteHeader(code) + } + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type closeNotifierDelegator struct{ *responseWriterDelegator } +type flusherDelegator struct{ *responseWriterDelegator } +type hijackerDelegator struct{ *responseWriterDelegator } +type readerFromDelegator struct{ *responseWriterDelegator } +type pusherDelegator struct{ *responseWriterDelegator } + +func (d closeNotifierDelegator) CloseNotify() <-chan bool { + //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to + //remove support from client_golang yet. + return d.ResponseWriter.(http.CloseNotifier).CloseNotify() +} +func (d flusherDelegator) Flush() { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + d.ResponseWriter.(http.Flusher).Flush() +} +func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return d.ResponseWriter.(http.Hijacker).Hijack() +} +func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) + d.written += n + return n, err +} +func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { + return d.ResponseWriter.(http.Pusher).Push(target, opts) +} + +var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) + +func init() { + // TODO(beorn7): Code generation would help here. + pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 + return d + } + pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 + return closeNotifierDelegator{d} + } + pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 + return flusherDelegator{d} + } + pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 + return struct { + *responseWriterDelegator + http.Flusher + http.CloseNotifier + }{d, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 + return hijackerDelegator{d} + } + pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 + return struct { + *responseWriterDelegator + http.Hijacker + http.CloseNotifier + }{d, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + }{d, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 + return readerFromDelegator{d} + } + pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.CloseNotifier + }{d, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + }{d, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + }{d, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 + return pusherDelegator{d} + } + pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 + return struct { + *responseWriterDelegator + http.Pusher + http.CloseNotifier + }{d, pusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + }{d, pusherDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + }{d, pusherDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + }{d, pusherDelegator{d}, readerFromDelegator{d}} + } + pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } +} + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to + //remove support from client_golang yet. + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + if _, ok := w.(http.Pusher); ok { + id += pusher + } + + return pickDelegator[id](d) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go new file mode 100644 index 0000000000..5e1c4546ce --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -0,0 +1,379 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promhttp provides tooling around HTTP servers and clients. +// +// First, the package allows the creation of http.Handler instances to expose +// Prometheus metrics via HTTP. promhttp.Handler acts on the +// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a +// custom registry or anything that implements the Gatherer interface. It also +// allows the creation of handlers that act differently on errors or allow to +// log errors. +// +// Second, the package provides tooling to instrument instances of http.Handler +// via middleware. Middleware wrappers follow the naming scheme +// InstrumentHandlerX, where X describes the intended use of the middleware. +// See each function's doc comment for specific details. +// +// Finally, the package allows for an http.RoundTripper to be instrumented via +// middleware. Middleware wrappers follow the naming scheme +// InstrumentRoundTripperX, where X describes the intended use of the +// middleware. See each function's doc comment for specific details. +package promhttp + +import ( + "compress/gzip" + "fmt" + "io" + "net/http" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + contentTypeHeader = "Content-Type" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var gzipPool = sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, +} + +// Handler returns an http.Handler for the prometheus.DefaultGatherer, using +// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has +// no error logging, and it applies compression if requested by the client. +// +// The returned http.Handler is already instrumented using the +// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you +// create multiple http.Handlers by separate calls of the Handler function, the +// metrics used for instrumentation will be shared between them, providing +// global scrape counts. +// +// This function is meant to cover the bulk of basic use cases. If you are doing +// anything that requires more customization (including using a non-default +// Gatherer, different instrumentation, and non-default HandlerOpts), use the +// HandlerFor function. See there for details. +func Handler() http.Handler { + return InstrumentMetricHandler( + prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), + ) +} + +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. +func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { + var ( + inFlightSem chan struct{} + errCnt = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_errors_total", + Help: "Total number of internal errors encountered by the promhttp metric handler.", + }, + []string{"cause"}, + ) + ) + + if opts.MaxRequestsInFlight > 0 { + inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) + } + if opts.Registry != nil { + // Initialize all possibilites that can occur below. + errCnt.WithLabelValues("gathering") + errCnt.WithLabelValues("encoding") + if err := opts.Registry.Register(errCnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + errCnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + } + + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { + if inFlightSem != nil { + select { + case inFlightSem <- struct{}{}: // All good, carry on. + defer func() { <-inFlightSem }() + default: + http.Error(rsp, fmt.Sprintf( + "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, + ), http.StatusServiceUnavailable) + return + } + } + mfs, err := reg.Gather() + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error gathering metrics:", err) + } + errCnt.WithLabelValues("gathering").Inc() + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + if len(mfs) == 0 { + // Still report the error if no metrics have been gathered. + httpError(rsp, err) + return + } + case HTTPErrorOnError: + httpError(rsp, err) + return + } + } + + var contentType expfmt.Format + if opts.EnableOpenMetrics { + contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header) + } else { + contentType = expfmt.Negotiate(req.Header) + } + header := rsp.Header() + header.Set(contentTypeHeader, string(contentType)) + + w := io.Writer(rsp) + if !opts.DisableCompression && gzipAccepted(req.Header) { + header.Set(contentEncodingHeader, "gzip") + gz := gzipPool.Get().(*gzip.Writer) + defer gzipPool.Put(gz) + + gz.Reset(w) + defer gz.Close() + + w = gz + } + + enc := expfmt.NewEncoder(w, contentType) + + // handleError handles the error according to opts.ErrorHandling + // and returns true if we have to abort after the handling. + handleError := func(err error) bool { + if err == nil { + return false + } + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error encoding and sending metric family:", err) + } + errCnt.WithLabelValues("encoding").Inc() + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case HTTPErrorOnError: + // We cannot really send an HTTP error at this + // point because we most likely have written + // something to rsp already. But at least we can + // stop sending. + return true + } + // Do nothing in all other cases, including ContinueOnError. + return false + } + + for _, mf := range mfs { + if handleError(enc.Encode(mf)) { + return + } + } + if closer, ok := enc.(expfmt.Closer); ok { + // This in particular takes care of the final "# EOF\n" line for OpenMetrics. + if handleError(closer.Close()) { + return + } + } + }) + + if opts.Timeout <= 0 { + return h + } + return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( + "Exceeded configured timeout of %v.\n", + opts.Timeout, + )) +} + +// InstrumentMetricHandler is usually used with an http.Handler returned by the +// HandlerFor function. It instruments the provided http.Handler with two +// metrics: A counter vector "promhttp_metric_handler_requests_total" to count +// scrapes partitioned by HTTP status code, and a gauge +// "promhttp_metric_handler_requests_in_flight" to track the number of +// simultaneous scrapes. This function idempotently registers collectors for +// both metrics with the provided Registerer. It panics if the registration +// fails. The provided metrics are useful to see how many scrapes hit the +// monitored target (which could be from different Prometheus servers or other +// scrapers), and how often they overlap (which would result in more than one +// scrape in flight at the same time). Note that the scrapes-in-flight gauge +// will contain the scrape by which it is exposed, while the scrape counter will +// only get incremented after the scrape is complete (as only then the status +// code is known). For tracking scrape durations, use the +// "scrape_duration_seconds" gauge created by the Prometheus server upon each +// scrape. +func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { + cnt := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_requests_total", + Help: "Total number of scrapes by HTTP status code.", + }, + []string{"code"}, + ) + // Initialize the most likely HTTP status codes. + cnt.WithLabelValues("200") + cnt.WithLabelValues("500") + cnt.WithLabelValues("503") + if err := reg.Register(cnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + cnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + + gge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "promhttp_metric_handler_requests_in_flight", + Help: "Current number of scrapes being served.", + }) + if err := reg.Register(gge); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + gge = are.ExistingCollector.(prometheus.Gauge) + } else { + panic(err) + } + } + + return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) +} + +// HandlerErrorHandling defines how a Handler serving metrics will handle +// errors. +type HandlerErrorHandling int + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. Note that HTTP + // errors cannot be served anymore once the beginning of a regular + // payload has been sent. Thus, in the (unlikely) case that encoding the + // payload into the negotiated wire format fails, serving the response + // will simply be aborted. Set an ErrorLog in HandlerOpts to detect + // those errors. + HTTPErrorOnError HandlerErrorHandling = iota + // Ignore errors and try to serve as many metrics as possible. However, + // if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. In this case, it is highly + // recommended to provide other means of detecting errors: By setting an + // ErrorLog in HandlerOpts, the errors are logged. By providing a + // Registry in HandlerOpts, the exposed metrics include an error counter + // "promhttp_metric_handler_errors_total", which can be used for + // alerts. + ContinueOnError + // Panic upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// Logger is the minimal interface HandlerOpts needs for logging. Note that +// log.Logger from the standard library implements this interface, and it is +// easy to implement by custom loggers, if they don't do so already anyway. +type Logger interface { + Println(v ...interface{}) +} + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts struct { + // ErrorLog specifies an optional logger for errors collecting and + // serving metrics. If nil, errors are not logged at all. + ErrorLog Logger + // ErrorHandling defines how errors are handled. Note that errors are + // logged regardless of the configured ErrorHandling provided ErrorLog + // is not nil. + ErrorHandling HandlerErrorHandling + // If Registry is not nil, it is used to register a metric + // "promhttp_metric_handler_errors_total", partitioned by "cause". A + // failed registration causes a panic. Note that this error counter is + // different from the instrumentation you get from the various + // InstrumentHandler... helpers. It counts errors that don't necessarily + // result in a non-2xx HTTP status code. There are two typical cases: + // (1) Encoding errors that only happen after streaming of the HTTP body + // has already started (and the status code 200 has been sent). This + // should only happen with custom collectors. (2) Collection errors with + // no effect on the HTTP status code because ErrorHandling is set to + // ContinueOnError. + Registry prometheus.Registerer + // If DisableCompression is true, the handler will never compress the + // response, even if requested by the client. + DisableCompression bool + // The number of concurrent HTTP requests is limited to + // MaxRequestsInFlight. Additional requests are responded to with 503 + // Service Unavailable and a suitable message in the body. If + // MaxRequestsInFlight is 0 or negative, no limit is applied. + MaxRequestsInFlight int + // If handling a request takes longer than Timeout, it is responded to + // with 503 ServiceUnavailable and a suitable Message. No timeout is + // applied if Timeout is 0 or negative. Note that with the current + // implementation, reaching the timeout simply ends the HTTP requests as + // described above (and even that only if sending of the body hasn't + // started yet), while the bulk work of gathering all the metrics keeps + // running in the background (with the eventual result to be thrown + // away). Until the implementation is improved, it is recommended to + // implement a separate timeout in potentially slow Collectors. + Timeout time.Duration + // If true, the experimental OpenMetrics encoding is added to the + // possible options during content negotiation. Note that Prometheus + // 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is + // the only way to transmit exemplars. However, the move to OpenMetrics + // is not completely transparent. Most notably, the values of "quantile" + // labels of Summaries and "le" labels of Histograms are formatted with + // a trailing ".0" if they would otherwise look like integer numbers + // (which changes the identity of the resulting series on the Prometheus + // server). + EnableOpenMetrics bool +} + +// gzipAccepted returns whether the client will accept gzip-encoded content. +func gzipAccepted(header http.Header) bool { + a := header.Get(acceptEncodingHeader) + parts := strings.Split(a, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return true + } + } + return false +} + +// httpError removes any content-encoding header and then calls http.Error with +// the provided error and http.StatusInternalServerError. Error contents is +// supposed to be uncompressed plain text. Same as with a plain http.Error, this +// must not be called if the header or any payload has already been sent. +func httpError(rsp http.ResponseWriter, err error) { + rsp.Header().Del(contentEncodingHeader) + http.Error( + rsp, + "An error has occurred while serving metrics:\n\n"+err.Error(), + http.StatusInternalServerError, + ) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go new file mode 100644 index 0000000000..83c49b66a8 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -0,0 +1,219 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptrace" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// The RoundTripperFunc type is an adapter to allow the use of ordinary +// functions as RoundTrippers. If f is a function with the appropriate +// signature, RountTripperFunc(f) is a RoundTripper that calls f. +type RoundTripperFunc func(req *http.Request) (*http.Response, error) + +// RoundTrip implements the RoundTripper interface. +func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return rt(r) +} + +// InstrumentRoundTripperInFlight is a middleware that wraps the provided +// http.RoundTripper. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.RoundTripper. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + gauge.Inc() + defer gauge.Dec() + return next.RoundTrip(r) + }) +} + +// InstrumentRoundTripperCounter is a middleware that wraps the provided +// http.RoundTripper to observe the request result with the provided CounterVec. +// The CounterVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. Partitioning of the CounterVec happens by HTTP status code +// and/or HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped RoundTripper panics or returns a non-nil error, the Counter +// is not incremented. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(counter) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + resp, err := next.RoundTrip(r) + if err == nil { + counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() + } + return resp, err + }) +} + +// InstrumentRoundTripperDuration is a middleware that wraps the provided +// http.RoundTripper to observe the request duration with the provided +// ObserverVec. The ObserverVec must have zero, one, or two non-const +// non-curried labels. For those, the only allowed label names are "code" and +// "method". The function panics otherwise. The Observe method of the Observer +// in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped RoundTripper panics or returns a non-nil error, no values are +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(obs) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + resp, err := next.RoundTrip(r) + if err == nil { + obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) + } + return resp, err + }) +} + +// InstrumentTrace is used to offer flexibility in instrumenting the available +// httptrace.ClientTrace hook functions. Each function is passed a float64 +// representing the time in seconds since the start of the http request. A user +// may choose to use separately buckets Histograms, or implement custom +// instance labels on a per function basis. +type InstrumentTrace struct { + GotConn func(float64) + PutIdleConn func(float64) + GotFirstResponseByte func(float64) + Got100Continue func(float64) + DNSStart func(float64) + DNSDone func(float64) + ConnectStart func(float64) + ConnectDone func(float64) + TLSHandshakeStart func(float64) + TLSHandshakeDone func(float64) + WroteHeaders func(float64) + Wait100Continue func(float64) + WroteRequest func(float64) +} + +// InstrumentRoundTripperTrace is a middleware that wraps the provided +// RoundTripper and reports times to hook functions provided in the +// InstrumentTrace struct. Hook functions that are not present in the provided +// InstrumentTrace struct are ignored. Times reported to the hook functions are +// time since the start of the request. Only with Go1.9+, those times are +// guaranteed to never be negative. (Earlier Go versions are not using a +// monotonic clock.) Note that partitioning of Histograms is expensive and +// should be used judiciously. +// +// For hook functions that receive an error as an argument, no observations are +// made in the event of a non-nil error value. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + + trace := &httptrace.ClientTrace{ + GotConn: func(_ httptrace.GotConnInfo) { + if it.GotConn != nil { + it.GotConn(time.Since(start).Seconds()) + } + }, + PutIdleConn: func(err error) { + if err != nil { + return + } + if it.PutIdleConn != nil { + it.PutIdleConn(time.Since(start).Seconds()) + } + }, + DNSStart: func(_ httptrace.DNSStartInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + DNSDone: func(_ httptrace.DNSDoneInfo) { + if it.DNSDone != nil { + it.DNSDone(time.Since(start).Seconds()) + } + }, + ConnectStart: func(_, _ string) { + if it.ConnectStart != nil { + it.ConnectStart(time.Since(start).Seconds()) + } + }, + ConnectDone: func(_, _ string, err error) { + if err != nil { + return + } + if it.ConnectDone != nil { + it.ConnectDone(time.Since(start).Seconds()) + } + }, + GotFirstResponseByte: func() { + if it.GotFirstResponseByte != nil { + it.GotFirstResponseByte(time.Since(start).Seconds()) + } + }, + Got100Continue: func() { + if it.Got100Continue != nil { + it.Got100Continue(time.Since(start).Seconds()) + } + }, + TLSHandshakeStart: func() { + if it.TLSHandshakeStart != nil { + it.TLSHandshakeStart(time.Since(start).Seconds()) + } + }, + TLSHandshakeDone: func(_ tls.ConnectionState, err error) { + if err != nil { + return + } + if it.TLSHandshakeDone != nil { + it.TLSHandshakeDone(time.Since(start).Seconds()) + } + }, + WroteHeaders: func() { + if it.WroteHeaders != nil { + it.WroteHeaders(time.Since(start).Seconds()) + } + }, + Wait100Continue: func() { + if it.Wait100Continue != nil { + it.Wait100Continue(time.Since(start).Seconds()) + } + }, + WroteRequest: func(_ httptrace.WroteRequestInfo) { + if it.WroteRequest != nil { + it.WroteRequest(time.Since(start).Seconds()) + } + }, + } + r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) + + return next.RoundTrip(r) + }) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go new file mode 100644 index 0000000000..9db2438053 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -0,0 +1,447 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "errors" + "net/http" + "strconv" + "strings" + "time" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +// magicString is used for the hacky label test in checkLabels. Remove once fixed. +const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" + +// InstrumentHandlerInFlight is a middleware that wraps the provided +// http.Handler. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.Handler. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + g.Inc() + defer g.Dec() + next.ServeHTTP(w, r) + }) +} + +// InstrumentHandlerDuration is a middleware that wraps the provided +// http.Handler to observe the request duration with the provided ObserverVec. +// The ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the request duration in seconds. Partitioning happens by HTTP +// status code and/or HTTP method if the respective instance label names are +// present in the ObserverVec. For unpartitioned observations, use an +// ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + + obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + next.ServeHTTP(w, r) + obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) + }) +} + +// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler +// to observe the request result with the provided CounterVec. The CounterVec +// must have zero, one, or two non-const non-curried labels. For those, the only +// allowed label names are "code" and "method". The function panics +// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or +// HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, the Counter is not incremented. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(counter) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + counter.With(labels(code, method, r.Method, d.Status())).Inc() + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + counter.With(labels(code, method, r.Method, 0)).Inc() + }) +} + +// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided +// http.Handler to observe with the provided ObserverVec the request duration +// until the response headers are written. The ObserverVec must have zero, one, +// or two non-const non-curried labels. For those, the only allowed label names +// are "code" and "method". The function panics otherwise. The Observe method of +// the Observer in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped Handler panics before calling WriteHeader, no value is +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, func(status int) { + obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) + }) + next.ServeHTTP(d, r) + }) +} + +// InstrumentHandlerRequestSize is a middleware that wraps the provided +// http.Handler to observe the request size with the provided ObserverVec. The +// ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the request size in bytes. Partitioning happens by HTTP status +// code and/or HTTP method if the respective instance label names are present in +// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) + }) +} + +// InstrumentHandlerResponseSize is a middleware that wraps the provided +// http.Handler to observe the response size with the provided ObserverVec. The +// ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the response size in bytes. Partitioning happens by HTTP status +// code and/or HTTP method if the respective instance label names are present in +// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { + code, method := checkLabels(obs) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) + }) +} + +func checkLabels(c prometheus.Collector) (code bool, method bool) { + // TODO(beorn7): Remove this hacky way to check for instance labels + // once Descriptors can have their dimensionality queried. + var ( + desc *prometheus.Desc + m prometheus.Metric + pm dto.Metric + lvs []string + ) + + // Get the Desc from the Collector. + descc := make(chan *prometheus.Desc, 1) + c.Describe(descc) + + select { + case desc = <-descc: + default: + panic("no description provided by collector") + } + select { + case <-descc: + panic("more than one description provided by collector") + default: + } + + close(descc) + + // Create a ConstMetric with the Desc. Since we don't know how many + // variable labels there are, try for as long as it needs. + for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { + m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) + } + + // Write out the metric into a proto message and look at the labels. + // If the value is not the magicString, it is a constLabel, which doesn't interest us. + // If the label is curried, it doesn't interest us. + // In all other cases, only "code" or "method" is allowed. + if err := m.Write(&pm); err != nil { + panic("error checking metric for labels") + } + for _, label := range pm.Label { + name, value := label.GetName(), label.GetValue() + if value != magicString || isLabelCurried(c, name) { + continue + } + switch name { + case "code": + code = true + case "method": + method = true + default: + panic("metric partitioned with non-supported labels") + } + } + return +} + +func isLabelCurried(c prometheus.Collector, label string) bool { + // This is even hackier than the label test above. + // We essentially try to curry again and see if it works. + // But for that, we need to type-convert to the two + // types we use here, ObserverVec or *CounterVec. + switch v := c.(type) { + case *prometheus.CounterVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + case prometheus.ObserverVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + default: + panic("unsupported metric vec type") + } + return true +} + +// emptyLabels is a one-time allocation for non-partitioned metrics to avoid +// unnecessary allocations on each request. +var emptyLabels = prometheus.Labels{} + +func labels(code, method bool, reqMethod string, status int) prometheus.Labels { + if !(code || method) { + return emptyLabels + } + labels := prometheus.Labels{} + + if code { + labels["code"] = sanitizeCode(status) + } + if method { + labels["method"] = sanitizeMethod(reqMethod) + } + + return labels +} + +func computeApproximateRequestSize(r *http.Request) int { + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + return s +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +// If the wrapped http.Handler has not set a status code, i.e. the value is +// currently 0, santizeCode will return 200, for consistency with behavior in +// the stdlib. +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200, 0: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 32a3986b06..c05d6ee1b3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -15,15 +15,23 @@ package prometheus import ( "bytes" - "errors" "fmt" + "io/ioutil" "os" + "path/filepath" + "runtime" "sort" + "strings" "sync" + "unicode/utf8" + "github.com/cespare/xxhash/v2" "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/internal" ) const ( @@ -35,13 +43,14 @@ const ( // DefaultRegisterer and DefaultGatherer are the implementations of the // Registerer and Gatherer interface a number of convenience functions in this // package act on. Initially, both variables point to the same Registry, which -// has a process collector (see NewProcessCollector) and a Go collector (see -// NewGoCollector) already registered. This approach to keep default instances -// as global state mirrors the approach of other packages in the Go standard -// library. Note that there are caveats. Change the variables with caution and -// only if you understand the consequences. Users who want to avoid global state -// altogether should not use the convenience function and act on custom -// instances instead. +// has a process collector (currently on Linux only, see NewProcessCollector) +// and a Go collector (see NewGoCollector, in particular the note about +// stop-the-world implication with Go versions older than 1.9) already +// registered. This approach to keep default instances as global state mirrors +// the approach of other packages in the Go standard library. Note that there +// are caveats. Change the variables with caution and only if you understand the +// consequences. Users who want to avoid global state altogether should not use +// the convenience functions and act on custom instances instead. var ( defaultRegistry = NewRegistry() DefaultRegisterer Registerer = defaultRegistry @@ -49,7 +58,7 @@ var ( ) func init() { - MustRegister(NewProcessCollector(os.Getpid(), "")) + MustRegister(NewProcessCollector(ProcessCollectorOpts{})) MustRegister(NewGoCollector()) } @@ -65,7 +74,8 @@ func NewRegistry() *Registry { // NewPedanticRegistry returns a registry that checks during collection if each // collected Metric is consistent with its reported Desc, and if the Desc has -// actually been registered with the registry. +// actually been registered with the registry. Unchecked Collectors (those whose +// Describe method does not yield any descriptors) are excluded from the check. // // Usually, a Registry will be happy as long as the union of all collected // Metrics is consistent and valid even if some metrics are not consistent with @@ -80,7 +90,7 @@ func NewPedanticRegistry() *Registry { // Registerer is the interface for the part of a registry in charge of // registering and unregistering. Users of custom registries should use -// Registerer as type for registration purposes (rather then the Registry type +// Registerer as type for registration purposes (rather than the Registry type // directly). In that way, they are free to use custom Registerer implementation // (e.g. for testing purposes). type Registerer interface { @@ -95,8 +105,13 @@ type Registerer interface { // returned error is an instance of AlreadyRegisteredError, which // contains the previously registered Collector. // - // It is in general not safe to register the same Collector multiple - // times concurrently. + // A Collector whose Describe method does not yield any Desc is treated + // as unchecked. Registration will always succeed. No check for + // re-registering (see previous paragraph) is performed. Thus, the + // caller is responsible for not double-registering the same unchecked + // Collector, and for providing a Collector that will not cause + // inconsistent metrics on collection. (This would lead to scrape + // errors.) Register(Collector) error // MustRegister works like Register but registers any number of // Collectors and panics upon the first registration that causes an @@ -105,7 +120,9 @@ type Registerer interface { // Unregister unregisters the Collector that equals the Collector passed // in as an argument. (Two Collectors are considered equal if their // Describe method yields the same set of descriptors.) The function - // returns whether a Collector was unregistered. + // returns whether a Collector was unregistered. Note that an unchecked + // Collector cannot be unregistered (as its Describe method does not + // yield any descriptor). // // Note that even after unregistering, it will not be possible to // register a new Collector that is inconsistent with the unregistered @@ -123,15 +140,23 @@ type Registerer interface { type Gatherer interface { // Gather calls the Collect method of the registered Collectors and then // gathers the collected metrics into a lexicographically sorted slice - // of MetricFamily protobufs. Even if an error occurs, Gather attempts - // to gather as many metrics as possible. Hence, if a non-nil error is - // returned, the returned MetricFamily slice could be nil (in case of a - // fatal error that prevented any meaningful metric collection) or - // contain a number of MetricFamily protobufs, some of which might be - // incomplete, and some might be missing altogether. The returned error - // (which might be a MultiError) explains the details. In scenarios - // where complete collection is critical, the returned MetricFamily - // protobufs should be disregarded if the returned error is non-nil. + // of uniquely named MetricFamily protobufs. Gather ensures that the + // returned slice is valid and self-consistent so that it can be used + // for valid exposition. As an exception to the strict consistency + // requirements described for metric.Desc, Gather will tolerate + // different sets of label names for metrics of the same metric family. + // + // Even if an error occurs, Gather attempts to gather as many metrics as + // possible. Hence, if a non-nil error is returned, the returned + // MetricFamily slice could be nil (in case of a fatal error that + // prevented any meaningful metric collection) or contain a number of + // MetricFamily protobufs, some of which might be incomplete, and some + // might be missing altogether. The returned error (which might be a + // MultiError) explains the details. Note that this is mostly useful for + // debugging purposes. If the gathered protobufs are to be used for + // exposition in actual monitoring, it is almost always better to not + // expose an incomplete result and instead disregard the returned + // MetricFamily protobufs in case the returned error is non-nil. Gather() ([]*dto.MetricFamily, error) } @@ -152,38 +177,6 @@ func MustRegister(cs ...Collector) { DefaultRegisterer.MustRegister(cs...) } -// RegisterOrGet registers the provided Collector with the DefaultRegisterer and -// returns the Collector, unless an equal Collector was registered before, in -// which case that Collector is returned. -// -// Deprecated: RegisterOrGet is merely a convenience function for the -// implementation as described in the documentation for -// AlreadyRegisteredError. As the use case is relatively rare, this function -// will be removed in a future version of this package to clean up the -// namespace. -func RegisterOrGet(c Collector) (Collector, error) { - if err := Register(c); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - return are.ExistingCollector, nil - } - return nil, err - } - return c, nil -} - -// MustRegisterOrGet behaves like RegisterOrGet but panics instead of returning -// an error. -// -// Deprecated: This is deprecated for the same reason RegisterOrGet is. See -// there for details. -func MustRegisterOrGet(c Collector) Collector { - c, err := RegisterOrGet(c) - if err != nil { - panic(err) - } - return c -} - // Unregister removes the registration of the provided Collector from the // DefaultRegisterer. // @@ -201,25 +194,6 @@ func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { return gf() } -// SetMetricFamilyInjectionHook replaces the DefaultGatherer with one that -// gathers from the previous DefaultGatherers but then merges the MetricFamily -// protobufs returned from the provided hook function with the MetricFamily -// protobufs returned from the original DefaultGatherer. -// -// Deprecated: This function manipulates the DefaultGatherer variable. Consider -// the implications, i.e. don't do this concurrently with any uses of the -// DefaultGatherer. In the rare cases where you need to inject MetricFamily -// protobufs directly, it is recommended to use a custom Registry and combine it -// with a custom Gatherer using the Gatherers type (see -// there). SetMetricFamilyInjectionHook only exists for compatibility reasons -// with previous versions of this package. -func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { - DefaultGatherer = Gatherers{ - DefaultGatherer, - GathererFunc(func() ([]*dto.MetricFamily, error) { return hook(), nil }), - } -} - // AlreadyRegisteredError is returned by the Register method if the Collector to // be registered has already been registered before, or a different Collector // that collects the same metrics has been registered before. Registration fails @@ -252,6 +226,13 @@ func (errs MultiError) Error() string { return buf.String() } +// Append appends the provided error if it is not nil. +func (errs *MultiError) Append(err error) { + if err != nil { + *errs = append(*errs, err) + } +} + // MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only // contained error as error if len(errs is 1). In all other cases, it returns // the MultiError directly. This is helpful for returning a MultiError in a way @@ -276,6 +257,7 @@ type Registry struct { collectorsByID map[uint64]Collector // ID is a hash of the descIDs. descIDs map[uint64]struct{} dimHashesByName map[string]uint64 + uncheckedCollectors []Collector pedanticChecksEnabled bool } @@ -285,7 +267,7 @@ func (r *Registry) Register(c Collector) error { descChan = make(chan *Desc, capDescChan) newDescIDs = map[uint64]struct{}{} newDimHashesByName = map[string]uint64{} - collectorID uint64 // Just a sum of all desc IDs. + collectorID uint64 // All desc IDs XOR'd together. duplicateDescErr error ) go func() { @@ -293,8 +275,13 @@ func (r *Registry) Register(c Collector) error { close(descChan) }() r.mtx.Lock() - defer r.mtx.Unlock() - // Coduct various tests... + defer func() { + // Drain channel in case of premature return to not leak a goroutine. + for range descChan { + } + r.mtx.Unlock() + }() + // Conduct various tests... for desc := range descChan { // Is the descriptor valid at all? @@ -307,12 +294,12 @@ func (r *Registry) Register(c Collector) error { if _, exists := r.descIDs[desc.id]; exists { duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) } - // If it is not a duplicate desc in this collector, add it to + // If it is not a duplicate desc in this collector, XOR it to // the collectorID. (We allow duplicate descs within the same // collector, but their existence must be a no-op.) if _, exists := newDescIDs[desc.id]; !exists { newDescIDs[desc.id] = struct{}{} - collectorID += desc.id + collectorID ^= desc.id } // Are all the label names and the help string consistent with @@ -333,14 +320,23 @@ func (r *Registry) Register(c Collector) error { } } } - // Did anything happen at all? + // A Collector yielding no Desc at all is considered unchecked. if len(newDescIDs) == 0 { - return errors.New("collector has no descriptors") + r.uncheckedCollectors = append(r.uncheckedCollectors, c) + return nil } if existing, exists := r.collectorsByID[collectorID]; exists { - return AlreadyRegisteredError{ - ExistingCollector: existing, - NewCollector: c, + switch e := existing.(type) { + case *wrappingCollector: + return AlreadyRegisteredError{ + ExistingCollector: e.unwrapRecursively(), + NewCollector: c, + } + default: + return AlreadyRegisteredError{ + ExistingCollector: e, + NewCollector: c, + } } } // If the collectorID is new, but at least one of the descs existed @@ -365,7 +361,7 @@ func (r *Registry) Unregister(c Collector) bool { var ( descChan = make(chan *Desc, capDescChan) descIDs = map[uint64]struct{}{} - collectorID uint64 // Just a sum of the desc IDs. + collectorID uint64 // All desc IDs XOR'd together. ) go func() { c.Describe(descChan) @@ -373,7 +369,7 @@ func (r *Registry) Unregister(c Collector) bool { }() for desc := range descChan { if _, exists := descIDs[desc.id]; !exists { - collectorID += desc.id + collectorID ^= desc.id descIDs[desc.id] = struct{}{} } } @@ -409,31 +405,25 @@ func (r *Registry) MustRegister(cs ...Collector) { // Gather implements Gatherer. func (r *Registry) Gather() ([]*dto.MetricFamily, error) { var ( - metricChan = make(chan Metric, capMetricChan) - metricHashes = map[uint64]struct{}{} - dimHashes = map[string]uint64{} - wg sync.WaitGroup - errs MultiError // The collected errors to return in the end. - registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + checkedMetricChan = make(chan Metric, capMetricChan) + uncheckedMetricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks ) r.mtx.RLock() + goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) - - // Scatter. - // (Collectors could be complex and slow, so we call them all at once.) - wg.Add(len(r.collectorsByID)) - go func() { - wg.Wait() - close(metricChan) - }() + checkedCollectors := make(chan Collector, len(r.collectorsByID)) + uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) for _, collector := range r.collectorsByID { - go func(collector Collector) { - defer wg.Done() - collector.Collect(metricChan) - }(collector) + checkedCollectors <- collector + } + for _, collector := range r.uncheckedCollectors { + uncheckedCollectors <- collector } - // In case pedantic checks are enabled, we have to copy the map before // giving up the RLock. if r.pedanticChecksEnabled { @@ -442,133 +432,264 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { registeredDescIDs[id] = struct{}{} } } - r.mtx.RUnlock() - // Drain metricChan in case of premature return. + wg.Add(goroutineBudget) + + collectWorker := func() { + for { + select { + case collector := <-checkedCollectors: + collector.Collect(checkedMetricChan) + case collector := <-uncheckedCollectors: + collector.Collect(uncheckedMetricChan) + default: + return + } + wg.Done() + } + } + + // Start the first worker now to make sure at least one is running. + go collectWorker() + goroutineBudget-- + + // Close checkedMetricChan and uncheckedMetricChan once all collectors + // are collected. + go func() { + wg.Wait() + close(checkedMetricChan) + close(uncheckedMetricChan) + }() + + // Drain checkedMetricChan and uncheckedMetricChan in case of premature return. defer func() { - for _ = range metricChan { + if checkedMetricChan != nil { + for range checkedMetricChan { + } + } + if uncheckedMetricChan != nil { + for range uncheckedMetricChan { + } } }() - // Gather. - for metric := range metricChan { - // This could be done concurrently, too, but it required locking - // of metricFamiliesByName (and of metricHashes if checks are - // enabled). Most likely not worth it. - desc := metric.Desc() - dtoMetric := &dto.Metric{} - if err := metric.Write(dtoMetric); err != nil { - errs = append(errs, fmt.Errorf( - "error collecting metric %v: %s", desc, err, + // Copy the channel references so we can nil them out later to remove + // them from the select statements below. + cmc := checkedMetricChan + umc := uncheckedMetricChan + + for { + select { + case metric, ok := <-cmc: + if !ok { + cmc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + registeredDescIDs, )) - continue + case metric, ok := <-umc: + if !ok { + umc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + nil, + )) + default: + if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { + // All collectors are already being worked on or + // we have already as many goroutines started as + // there are collectors. Do the same as above, + // just without the default. + select { + case metric, ok := <-cmc: + if !ok { + cmc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + registeredDescIDs, + )) + case metric, ok := <-umc: + if !ok { + umc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + nil, + )) + } + break + } + // Start more workers. + go collectWorker() + goroutineBudget-- + runtime.Gosched() } - metricFamily, ok := metricFamiliesByName[desc.fqName] - if ok { - if metricFamily.GetHelp() != desc.help { - errs = append(errs, fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), - )) - continue - } - // TODO(beorn7): Simplify switch once Desc has type. - switch metricFamily.GetType() { - case dto.MetricType_COUNTER: - if dtoMetric.Counter == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Counter", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_GAUGE: - if dtoMetric.Gauge == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Gauge", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_SUMMARY: - if dtoMetric.Summary == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Summary", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_UNTYPED: - if dtoMetric.Untyped == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be Untyped", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_HISTOGRAM: - if dtoMetric.Histogram == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Histogram", - desc.fqName, dtoMetric, - )) - continue - } - default: - panic("encountered MetricFamily with invalid type") - } - } else { - metricFamily = &dto.MetricFamily{} - metricFamily.Name = proto.String(desc.fqName) - metricFamily.Help = proto.String(desc.help) - // TODO(beorn7): Simplify switch once Desc has type. - switch { - case dtoMetric.Gauge != nil: - metricFamily.Type = dto.MetricType_GAUGE.Enum() - case dtoMetric.Counter != nil: - metricFamily.Type = dto.MetricType_COUNTER.Enum() - case dtoMetric.Summary != nil: - metricFamily.Type = dto.MetricType_SUMMARY.Enum() - case dtoMetric.Untyped != nil: - metricFamily.Type = dto.MetricType_UNTYPED.Enum() - case dtoMetric.Histogram != nil: - metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() - default: - errs = append(errs, fmt.Errorf( - "empty metric collected: %s", dtoMetric, - )) - continue - } - metricFamiliesByName[desc.fqName] = metricFamily + // Once both checkedMetricChan and uncheckdMetricChan are closed + // and drained, the contraption above will nil out cmc and umc, + // and then we can leave the collect loop here. + if cmc == nil && umc == nil { + break } - if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { - errs = append(errs, err) - continue - } - if r.pedanticChecksEnabled { - // Is the desc registered at all? - if _, exist := registeredDescIDs[desc.id]; !exist { - errs = append(errs, fmt.Errorf( - "collected metric %s %s with unregistered descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - )) - continue - } - if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { - errs = append(errs, err) - continue - } - } - metricFamily.Metric = append(metricFamily.Metric, dtoMetric) } - return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() + return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the +// Prometheus text format, and writes it to a temporary file. Upon success, the +// temporary file is renamed to the provided filename. +// +// This is intended for use with the textfile collector of the node exporter. +// Note that the node exporter expects the filename to be suffixed with ".prom". +func WriteToTextfile(filename string, g Gatherer) error { + tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) + if err != nil { + return err + } + defer os.Remove(tmp.Name()) + + mfs, err := g.Gather() + if err != nil { + return err + } + for _, mf := range mfs { + if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { + return err + } + } + if err := tmp.Close(); err != nil { + return err + } + + if err := os.Chmod(tmp.Name(), 0644); err != nil { + return err + } + return os.Rename(tmp.Name(), filename) +} + +// processMetric is an internal helper method only used by the Gather method. +func processMetric( + metric Metric, + metricFamiliesByName map[string]*dto.MetricFamily, + metricHashes map[uint64]struct{}, + registeredDescIDs map[uint64]struct{}, +) error { + desc := metric.Desc() + // Wrapped metrics collected by an unchecked Collector can have an + // invalid Desc. + if desc.err != nil { + return desc.err + } + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + return fmt.Errorf("error collecting metric %v: %s", desc, err) + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { // Existing name. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + ) + } + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + return fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + return fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + return fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + return fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + ) + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { // New name. + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + return fmt.Errorf("empty metric collected: %s", dtoMetric) + } + if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { + return err + } + metricFamiliesByName[desc.fqName] = metricFamily + } + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { + return err + } + if registeredDescIDs != nil { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + return fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + return err + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + return nil } // Gatherers is a slice of Gatherer instances that implements the Gatherer // interface itself. Its Gather method calls Gather on all Gatherers in the // slice in order and returns the merged results. Errors returned from the -// Gather calles are all returned in a flattened MultiError. Duplicate and +// Gather calls are all returned in a flattened MultiError. Duplicate and // inconsistent Metrics are skipped (first occurrence in slice order wins) and // reported in the returned error. // @@ -588,7 +709,6 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { var ( metricFamiliesByName = map[string]*dto.MetricFamily{} metricHashes = map[uint64]struct{}{} - dimHashes = map[string]uint64{} errs MultiError // The collected errors to return in the end. ) @@ -625,10 +745,14 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { existingMF.Name = mf.Name existingMF.Help = mf.Help existingMF.Type = mf.Type + if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { + errs = append(errs, err) + continue + } metricFamiliesByName[mf.GetName()] = existingMF } for _, m := range mf.Metric { - if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { + if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { errs = append(errs, err) continue } @@ -636,88 +760,80 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { } } } - return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() + return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() } -// metricSorter is a sortable slice of *dto.Metric. -type metricSorter []*dto.Metric - -func (s metricSorter) Len() int { - return len(s) -} - -func (s metricSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s metricSorter) Less(i, j int) bool { - if len(s[i].Label) != len(s[j].Label) { - // This should not happen. The metrics are - // inconsistent. However, we have to deal with the fact, as - // people might use custom collectors or metric family injection - // to create inconsistent metrics. So let's simply compare the - // number of labels in this case. That will still yield - // reproducible sorting. - return len(s[i].Label) < len(s[j].Label) +// checkSuffixCollisions checks for collisions with the “magic” suffixes the +// Prometheus text format and the internal metric representation of the +// Prometheus server add while flattening Summaries and Histograms. +func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { + var ( + newName = mf.GetName() + newType = mf.GetType() + newNameWithoutSuffix = "" + ) + switch { + case strings.HasSuffix(newName, "_count"): + newNameWithoutSuffix = newName[:len(newName)-6] + case strings.HasSuffix(newName, "_sum"): + newNameWithoutSuffix = newName[:len(newName)-4] + case strings.HasSuffix(newName, "_bucket"): + newNameWithoutSuffix = newName[:len(newName)-7] } - for n, lp := range s[i].Label { - vi := lp.GetValue() - vj := s[j].Label[n].GetValue() - if vi != vj { - return vi < vj + if newNameWithoutSuffix != "" { + if existingMF, ok := mfs[newNameWithoutSuffix]; ok { + switch existingMF.GetType() { + case dto.MetricType_SUMMARY: + if !strings.HasSuffix(newName, "_bucket") { + return fmt.Errorf( + "collected metric named %q collides with previously collected summary named %q", + newName, newNameWithoutSuffix, + ) + } + case dto.MetricType_HISTOGRAM: + return fmt.Errorf( + "collected metric named %q collides with previously collected histogram named %q", + newName, newNameWithoutSuffix, + ) + } } } - - // We should never arrive here. Multiple metrics with the same - // label set in the same scrape will lead to undefined ingestion - // behavior. However, as above, we have to provide stable sorting - // here, even for inconsistent metrics. So sort equal metrics - // by their timestamp, with missing timestamps (implying "now") - // coming last. - if s[i].TimestampMs == nil { - return false - } - if s[j].TimestampMs == nil { - return true - } - return s[i].GetTimestampMs() < s[j].GetTimestampMs() -} - -// normalizeMetricFamilies returns a MetricFamily slice whith empty -// MetricFamilies pruned and the remaining MetricFamilies sorted by name within -// the slice, with the contained Metrics sorted within each MetricFamily. -func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { - for _, mf := range metricFamiliesByName { - sort.Sort(metricSorter(mf.Metric)) - } - names := make([]string, 0, len(metricFamiliesByName)) - for name, mf := range metricFamiliesByName { - if len(mf.Metric) > 0 { - names = append(names, name) + if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { + if _, ok := mfs[newName+"_count"]; ok { + return fmt.Errorf( + "collected histogram or summary named %q collides with previously collected metric named %q", + newName, newName+"_count", + ) + } + if _, ok := mfs[newName+"_sum"]; ok { + return fmt.Errorf( + "collected histogram or summary named %q collides with previously collected metric named %q", + newName, newName+"_sum", + ) } } - sort.Strings(names) - result := make([]*dto.MetricFamily, 0, len(names)) - for _, name := range names { - result = append(result, metricFamiliesByName[name]) + if newType == dto.MetricType_HISTOGRAM { + if _, ok := mfs[newName+"_bucket"]; ok { + return fmt.Errorf( + "collected histogram named %q collides with previously collected metric named %q", + newName, newName+"_bucket", + ) + } } - return result + return nil } // checkMetricConsistency checks if the provided Metric is consistent with the -// provided MetricFamily. It also hashed the Metric labels and the MetricFamily -// name. If the resulting hash is alread in the provided metricHashes, an error -// is returned. If not, it is added to metricHashes. The provided dimHashes maps -// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes -// doesn't yet contain a hash for the provided MetricFamily, it is -// added. Otherwise, an error is returned if the existing dimHashes in not equal -// the calculated dimHash. +// provided MetricFamily. It also hashes the Metric labels and the MetricFamily +// name. If the resulting hash is already in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. func checkMetricConsistency( metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, metricHashes map[uint64]struct{}, - dimHashes map[string]uint64, ) error { + name := metricFamily.GetName() + // Type consistency with metric family. if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || @@ -725,42 +841,67 @@ func checkMetricConsistency( metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { return fmt.Errorf( - "collected metric %s %s is not a %s", - metricFamily.GetName(), dtoMetric, metricFamily.GetType(), + "collected metric %q { %s} is not a %s", + name, dtoMetric, metricFamily.GetType(), ) } - // Is the metric unique (i.e. no other metric with the same name and the same label values)? - h := hashNew() - h = hashAdd(h, metricFamily.GetName()) - h = hashAddByte(h, separatorByte) - dh := hashNew() - // Make sure label pairs are sorted. We depend on it for the consistency - // check. - sort.Sort(LabelPairSorter(dtoMetric.Label)) - for _, lp := range dtoMetric.Label { - h = hashAdd(h, lp.GetValue()) - h = hashAddByte(h, separatorByte) - dh = hashAdd(dh, lp.GetName()) - dh = hashAddByte(dh, separatorByte) - } - if _, exists := metricHashes[h]; exists { - return fmt.Errorf( - "collected metric %s %s was collected before with the same name and label values", - metricFamily.GetName(), dtoMetric, - ) - } - if dimHash, ok := dimHashes[metricFamily.GetName()]; ok { - if dimHash != dh { + previousLabelName := "" + for _, labelPair := range dtoMetric.GetLabel() { + labelName := labelPair.GetName() + if labelName == previousLabelName { return fmt.Errorf( - "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family", - metricFamily.GetName(), dtoMetric, + "collected metric %q { %s} has two or more labels with the same name: %s", + name, dtoMetric, labelName, ) } - } else { - dimHashes[metricFamily.GetName()] = dh + if !checkLabelName(labelName) { + return fmt.Errorf( + "collected metric %q { %s} has a label with an invalid name: %s", + name, dtoMetric, labelName, + ) + } + if dtoMetric.Summary != nil && labelName == quantileLabel { + return fmt.Errorf( + "collected metric %q { %s} must not have an explicit %q label", + name, dtoMetric, quantileLabel, + ) + } + if !utf8.ValidString(labelPair.GetValue()) { + return fmt.Errorf( + "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", + name, dtoMetric, labelName, labelPair.GetValue()) + } + previousLabelName = labelName } - metricHashes[h] = struct{}{} + + // Is the metric unique (i.e. no other metric with the same name and the same labels)? + h := xxhash.New() + h.WriteString(name) + h.Write(separatorByteSlice) + // Make sure label pairs are sorted. We depend on it for the consistency + // check. + if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { + // We cannot sort dtoMetric.Label in place as it is immutable by contract. + copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) + copy(copiedLabels, dtoMetric.Label) + sort.Sort(labelPairSorter(copiedLabels)) + dtoMetric.Label = copiedLabels + } + for _, lp := range dtoMetric.Label { + h.WriteString(lp.GetName()) + h.Write(separatorByteSlice) + h.WriteString(lp.GetValue()) + h.Write(separatorByteSlice) + } + hSum := h.Sum64() + if _, exists := metricHashes[hSum]; exists { + return fmt.Errorf( + "collected metric %q { %s} was collected before with the same name and label values", + name, dtoMetric, + ) + } + metricHashes[hSum] = struct{}{} return nil } @@ -778,8 +919,8 @@ func checkDescConsistency( } // Is the desc consistent with the content of the metric? - lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) - lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) + lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) + copy(lpsFromDesc, desc.constLabelPairs) for _, l := range desc.variableLabels { lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ Name: proto.String(l), @@ -791,7 +932,7 @@ func checkDescConsistency( metricFamily.GetName(), dtoMetric, desc, ) } - sort.Sort(LabelPairSorter(lpsFromDesc)) + sort.Sort(labelPairSorter(lpsFromDesc)) for i, lpFromDesc := range lpsFromDesc { lpFromMetric := dtoMetric.Label[i] if lpFromDesc.GetName() != lpFromMetric.GetName() || diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index bce05bf9a0..ae42e761a1 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -16,8 +16,10 @@ package prometheus import ( "fmt" "math" + "runtime" "sort" "sync" + "sync/atomic" "time" "github.com/beorn7/perks/quantile" @@ -36,7 +38,10 @@ const quantileLabel = "quantile" // // A typical use-case is the observation of request latencies. By default, a // Summary provides the median, the 90th and the 99th percentile of the latency -// as rank estimations. +// as rank estimations. However, the default behavior will change in the +// upcoming v1.0.0 of the library. There will be no rank estimations at all by +// default. For a sane transition, it is recommended to set the desired rank +// estimations explicitly. // // Note that the rank estimations cannot be aggregated in a meaningful way with // the Prometheus query language (i.e. you cannot average or add them). If you @@ -53,13 +58,8 @@ type Summary interface { Observe(float64) } -// DefObjectives are the default Summary quantile values. -var ( - DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} - - errQuantileLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in summaries", quantileLabel, - ) +var errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, ) // Default values for SummaryOpts. @@ -75,8 +75,10 @@ const ( ) // SummaryOpts bundles the options for creating a Summary metric. It is -// mandatory to set Name and Help to a non-empty string. All other fields are -// optional and can safely be left at their zero value. +// mandatory to set Name to a non-empty string. While all other fields are +// optional and can safely be left at their zero value, it is recommended to set +// a help string and to explicitly set the Objectives field to the desired value +// as the default value will change in the upcoming v1.0.0 of the library. type SummaryOpts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Summary (created by joining these components with @@ -87,35 +89,34 @@ type SummaryOpts struct { Subsystem string Name string - // Help provides information about this Summary. Mandatory! + // Help provides information about this Summary. // // Metrics with the same fully-qualified name must have the same Help // string. Help string - // ConstLabels are used to attach fixed labels to this - // Summary. Summaries with the same fully-qualified name must have the - // same label names in their ConstLabels. + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a - // SummaryVec. ConstLabels serve only special purposes. One is for the - // special case where the value of a label does not change during the - // lifetime of a process, e.g. if the revision of the running binary is - // put into a label. Another, more advanced purpose is if more than one - // Collector needs to collect Summaries with the same fully-qualified - // name. In that case, those Summaries must differ in the values of - // their ConstLabels. See the Collector examples. + // Due to the way a Summary is represented in the Prometheus text format + // and how it is handled by the Prometheus server internally, “quantile” + // is an illegal label name. Construction of a Summary or SummaryVec + // will panic if this label name is used in ConstLabels. // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels ConstLabels Labels // Objectives defines the quantile rank estimates with their respective - // absolute error. If Objectives[q] = e, then the value reported - // for q will be the φ-quantile value for some φ between q-e and q+e. - // The default value is DefObjectives. + // absolute error. If Objectives[q] = e, then the value reported for q + // will be the φ-quantile value for some φ between q-e and q+e. The + // default value is an empty map, resulting in a summary without + // quantiles. Objectives map[float64]float64 // MaxAge defines the duration for which an observation stays relevant @@ -139,7 +140,7 @@ type SummaryOpts struct { BufCap uint32 } -// Great fuck-up with the sliding-window decay algorithm... The Merge method of +// Problem with the sliding-window decay algorithm... The Merge method of // perk/quantile is actually not working as advertised - and it might be // unfixable, as the underlying algorithm is apparently not capable of merging // summaries in the first place. To avoid using Merge, we are currently adding @@ -169,7 +170,7 @@ func NewSummary(opts SummaryOpts) Summary { func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { if len(desc.variableLabels) != len(labelValues) { - panic(errInconsistentCardinality) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) } for _, n := range desc.variableLabels { @@ -183,8 +184,8 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { } } - if len(opts.Objectives) == 0 { - opts.Objectives = DefObjectives + if opts.Objectives == nil { + opts.Objectives = map[float64]float64{} } if opts.MaxAge < 0 { @@ -202,6 +203,17 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { opts.BufCap = DefBufCap } + if len(opts.Objectives) == 0 { + // Use the lock-free implementation of a Summary without objectives. + s := &noObjectivesSummary{ + desc: desc, + labelPairs: makeLabelPairs(desc, labelValues), + counts: [2]*summaryCounts{{}, {}}, + } + s.init(s) // Init self-collection. + return s + } + s := &summary{ desc: desc, @@ -370,6 +382,116 @@ func (s *summary) swapBufs(now time.Time) { } } +type summaryCounts struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 +} + +type noObjectivesSummary struct { + // countAndHotIdx enables lock-free writes with use of atomic updates. + // The most significant bit is the hot index [0 or 1] of the count field + // below. Observe calls update the hot one. All remaining bits count the + // number of Observe calls. Observe starts by incrementing this counter, + // and finish by incrementing the count field in the respective + // summaryCounts, as a marker for completion. + // + // Calls of the Write method (which are non-mutating reads from the + // perspective of the summary) swap the hot–cold under the writeMtx + // lock. A cooldown is awaited (while locked) by comparing the number of + // observations with the initiation count. Once they match, then the + // last observation on the now cool one has completed. All cool fields must + // be merged into the new hot before releasing writeMtx. + + // Fields with atomic access first! See alignment constraint: + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 + + selfCollector + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. + + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*summaryCounts + + labelPairs []*dto.LabelPair +} + +func (s *noObjectivesSummary) Desc() *Desc { + return s.desc +} + +func (s *noObjectivesSummary) Observe(v float64) { + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&s.countAndHotIdx, 1) + hotCounts := s.counts[n>>63] + + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) +} + +func (s *noObjectivesSummary) Write(out *dto.Metric) error { + // For simplicity, we protect this whole method by a mutex. It is not in + // the hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it, if possible at + // all. + s.writeMtx.Lock() + defer s.writeMtx.Unlock() + + // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) + // without touching the count bits. See the struct comments for a full + // description of the algorithm. + n := atomic.AddUint64(&s.countAndHotIdx, 1<<63) + // count is contained unchanged in the lower 63 bits. + count := n & ((1 << 63) - 1) + // The most significant bit tells us which counts is hot. The complement + // is thus the cold one. + hotCounts := s.counts[n>>63] + coldCounts := s.counts[(^n)>>63] + + // Await cooldown. + for count != atomic.LoadUint64(&coldCounts.count) { + runtime.Gosched() // Let observations get work done. + } + + sum := &dto.Summary{ + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + } + + out.Summary = sum + out.Label = s.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + return nil +} + type quantSort []*dto.Quantile func (s quantSort) Len() int { @@ -390,13 +512,21 @@ func (s quantSort) Less(i, j int) bool { // (e.g. HTTP request latencies, partitioned by status code and method). Create // instances with NewSummaryVec. type SummaryVec struct { - *MetricVec + *metricVec } // NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and -// partitioned by the given label names. At least one label name must be -// provided. +// partitioned by the given label names. +// +// Due to the way a Summary is represented in the Prometheus text format and how +// it is handled by the Prometheus server internally, “quantile” is an illegal +// label name. NewSummaryVec will panic if this label name is used. func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + for _, ln := range labelNames { + if ln == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, @@ -404,47 +534,116 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { opts.ConstLabels, ) return &SummaryVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + metricVec: newMetricVec(desc, func(lvs ...string) Metric { return newSummary(desc, opts, lvs...) }), } } -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Summary and not a -// Metric so that no type conversion is required. -func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) +// GetMetricWithLabelValues returns the Summary for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Summary is created. +// +// It is possible to call this method without using the returned Summary to only +// create the new Summary but leave it at its starting value, a Summary without +// any observations. +// +// Keeping the Summary for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Summary from the SummaryVec. In that case, +// the Summary will still exist, but it will not be exported anymore, even if a +// Summary with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) if metric != nil { - return metric.(Summary), err + return metric.(Observer), err } return nil, err } -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Summary and not a Metric so that no -// type conversion is required. -func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { - metric, err := m.MetricVec.GetMetricWith(labels) +// GetMetricWith returns the Summary for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Summary is created. Implications of +// creating a Summary without using it and keeping the Summary for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) if metric != nil { - return metric.(Summary), err + return metric.(Observer), err } return nil, err } // WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like // myVec.WithLabelValues("404", "GET").Observe(42.21) -func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { - return m.MetricVec.WithLabelValues(lvs...).(Summary) +func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { + s, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return s } // With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (m *SummaryVec) With(labels Labels) Summary { - return m.MetricVec.With(labels).(Summary) +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *SummaryVec) With(labels Labels) Observer { + s, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return s +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the SummaryVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &SummaryVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec } type constSummary struct { @@ -497,7 +696,7 @@ func (s *constSummary) Write(out *dto.Metric) error { // map[float64]float64{0.5: 0.23, 0.99: 0.56} // // NewConstSummary returns an error if the length of labelValues is not -// consistent with the variable labels in Desc. +// consistent with the variable labels in Desc or if Desc is invalid. func NewConstSummary( desc *Desc, count uint64, @@ -505,8 +704,11 @@ func NewConstSummary( quantiles map[float64]float64, labelValues ...string, ) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err } return &constSummary{ desc: desc, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go new file mode 100644 index 0000000000..8d5f105233 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -0,0 +1,54 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "time" + +// Timer is a helper type to time functions. Use NewTimer to create new +// instances. +type Timer struct { + begin time.Time + observer Observer +} + +// NewTimer creates a new Timer. The provided Observer is used to observe a +// duration in seconds. Timer is usually used to time a function call in the +// following way: +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } +func NewTimer(o Observer) *Timer { + return &Timer{ + begin: time.Now(), + observer: o, + } +} + +// ObserveDuration records the duration passed since the Timer was created with +// NewTimer. It calls the Observe method of the Observer provided during +// construction with the duration in seconds as an argument. The observed +// duration is also returned. ObserveDuration is usually called with a defer +// statement. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func (t *Timer) ObserveDuration() time.Duration { + d := time.Since(t.begin) + if t.observer != nil { + t.observer.Observe(d.Seconds()) + } + return d +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go index 5faf7e6e3e..0f9ce63f40 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -13,108 +13,12 @@ package prometheus -// Untyped is a Metric that represents a single numerical value that can -// arbitrarily go up and down. -// -// An Untyped metric works the same as a Gauge. The only difference is that to -// no type information is implied. -// -// To create Untyped instances, use NewUntyped. -type Untyped interface { - Metric - Collector - - // Set sets the Untyped metric to an arbitrary value. - Set(float64) - // Inc increments the Untyped metric by 1. - Inc() - // Dec decrements the Untyped metric by 1. - Dec() - // Add adds the given value to the Untyped metric. (The value can be - // negative, resulting in a decrease.) - Add(float64) - // Sub subtracts the given value from the Untyped metric. (The value can - // be negative, resulting in an increase.) - Sub(float64) -} - // UntypedOpts is an alias for Opts. See there for doc comments. type UntypedOpts Opts -// NewUntyped creates a new Untyped metric from the provided UntypedOpts. -func NewUntyped(opts UntypedOpts) Untyped { - return newValue(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), UntypedValue, 0) -} - -// UntypedVec is a Collector that bundles a set of Untyped metrics that all -// share the same Desc, but have different values for their variable -// labels. This is used if you want to count the same thing partitioned by -// various dimensions. Create instances with NewUntypedVec. -type UntypedVec struct { - *MetricVec -} - -// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &UntypedVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newValue(desc, UntypedValue, 0, lvs...) - }), - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns an Untyped and not a -// Metric so that no type conversion is required. -func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Untyped), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns an Untyped and not a Metric so that no -// type conversion is required. -func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Untyped), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped { - return m.MetricVec.WithLabelValues(lvs...).(Untyped) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *UntypedVec) With(labels Labels) Untyped { - return m.MetricVec.With(labels).(Untyped) -} - -// UntypedFunc is an Untyped whose value is determined at collect time by -// calling a provided function. +// UntypedFunc works like GaugeFunc but the collected metric is of type +// "Untyped". UntypedFunc is useful to mirror an external metric of unknown +// type. // // To create UntypedFunc instances, use NewUntypedFunc. type UntypedFunc interface { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index a944c37754..2be470ce15 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -14,21 +14,22 @@ package prometheus import ( - "errors" "fmt" - "math" "sort" - "sync/atomic" - - dto "github.com/prometheus/client_model/go" + "time" + "unicode/utf8" "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + + dto "github.com/prometheus/client_model/go" ) // ValueType is an enumeration of metric types that represent a simple value. type ValueType int -// Possible values for the ValueType enum. +// Possible values for the ValueType enum. Use UntypedValue to mark a metric +// with an unknown type. const ( _ ValueType = iota CounterValue @@ -36,77 +37,6 @@ const ( UntypedValue ) -var errInconsistentCardinality = errors.New("inconsistent label cardinality") - -// value is a generic metric for simple values. It implements Metric, Collector, -// Counter, Gauge, and Untyped. Its effective type is determined by -// ValueType. This is a low-level building block used by the library to back the -// implementations of Counter, Gauge, and Untyped. -type value struct { - // valBits containst the bits of the represented float64 value. It has - // to go first in the struct to guarantee alignment for atomic - // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - - selfCollector - - desc *Desc - valType ValueType - labelPairs []*dto.LabelPair -} - -// newValue returns a newly allocated value with the given Desc, ValueType, -// sample value and label values. It panics if the number of label -// values is different from the number of variable labels in Desc. -func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value { - if len(labelValues) != len(desc.variableLabels) { - panic(errInconsistentCardinality) - } - result := &value{ - desc: desc, - valType: valueType, - valBits: math.Float64bits(val), - labelPairs: makeLabelPairs(desc, labelValues), - } - result.init(result) - return result -} - -func (v *value) Desc() *Desc { - return v.desc -} - -func (v *value) Set(val float64) { - atomic.StoreUint64(&v.valBits, math.Float64bits(val)) -} - -func (v *value) Inc() { - v.Add(1) -} - -func (v *value) Dec() { - v.Add(-1) -} - -func (v *value) Add(val float64) { - for { - oldBits := atomic.LoadUint64(&v.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + val) - if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) { - return - } - } -} - -func (v *value) Sub(val float64) { - v.Add(val * -1) -} - -func (v *value) Write(out *dto.Metric) error { - val := math.Float64frombits(atomic.LoadUint64(&v.valBits)) - return populateMetric(v.valType, val, v.labelPairs, out) -} - // valueFunc is a generic metric for simple values retrieved on collect time // from a function. It implements Metric and Collector. Its effective type is // determined by ValueType. This is a low-level building block used by the @@ -143,7 +73,7 @@ func (v *valueFunc) Desc() *Desc { } func (v *valueFunc) Write(out *dto.Metric) error { - return populateMetric(v.valType, v.function(), v.labelPairs, out) + return populateMetric(v.valType, v.function(), v.labelPairs, nil, out) } // NewConstMetric returns a metric with one fixed value that cannot be @@ -151,10 +81,14 @@ func (v *valueFunc) Write(out *dto.Metric) error { // operations. However, when implementing custom Collectors, it is useful as a // throw-away metric that is generated on the fly to send it to Prometheus in // the Collect method. NewConstMetric returns an error if the length of -// labelValues is not consistent with the variable labels in Desc. +// labelValues is not consistent with the variable labels in Desc or if Desc is +// invalid. func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err } return &constMetric{ desc: desc, @@ -186,19 +120,20 @@ func (m *constMetric) Desc() *Desc { } func (m *constMetric) Write(out *dto.Metric) error { - return populateMetric(m.valType, m.val, m.labelPairs, out) + return populateMetric(m.valType, m.val, m.labelPairs, nil, out) } func populateMetric( t ValueType, v float64, labelPairs []*dto.LabelPair, + e *dto.Exemplar, m *dto.Metric, ) error { m.Label = labelPairs switch t { case CounterValue: - m.Counter = &dto.Counter{Value: proto.Float64(v)} + m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e} case GaugeValue: m.Gauge = &dto.Gauge{Value: proto.Float64(v)} case UntypedValue: @@ -226,9 +161,44 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { Value: proto.String(labelValues[i]), }) } - for _, lp := range desc.constLabelPairs { - labelPairs = append(labelPairs, lp) - } - sort.Sort(LabelPairSorter(labelPairs)) + labelPairs = append(labelPairs, desc.constLabelPairs...) + sort.Sort(labelPairSorter(labelPairs)) return labelPairs } + +// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels. +const ExemplarMaxRunes = 64 + +// newExemplar creates a new dto.Exemplar from the provided values. An error is +// returned if any of the label names or values are invalid or if the total +// number of runes in the label names and values exceeds ExemplarMaxRunes. +func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) { + e := &dto.Exemplar{} + e.Value = proto.Float64(value) + tsProto, err := ptypes.TimestampProto(ts) + if err != nil { + return nil, err + } + e.Timestamp = tsProto + labelPairs := make([]*dto.LabelPair, 0, len(l)) + var runes int + for name, value := range l { + if !checkLabelName(name) { + return nil, fmt.Errorf("exemplar label name %q is invalid", name) + } + runes += utf8.RuneCountInString(name) + if !utf8.ValidString(value) { + return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value) + } + runes += utf8.RuneCountInString(value) + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(name), + Value: proto.String(value), + }) + } + if runes > ExemplarMaxRunes { + return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes) + } + e.Label = labelPairs + return e, nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 7f3eef9a46..d53848dc48 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -20,33 +20,192 @@ import ( "github.com/prometheus/common/model" ) -// MetricVec is a Collector to bundle metrics of the same name that -// differ in their label values. MetricVec is usually not used directly but as a -// building block for implementations of vectors of a given metric -// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already -// provided in this package. -type MetricVec struct { - mtx sync.RWMutex // Protects the children. - children map[uint64][]metricWithLabelValues - desc *Desc +// metricVec is a Collector to bundle metrics of the same name that differ in +// their label values. metricVec is not used directly (and therefore +// unexported). It is used as a building block for implementations of vectors of +// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. +// It also handles label currying. +type metricVec struct { + *metricMap - newMetric func(labelValues ...string) Metric - hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling + curry []curriedLabelValue + + // hashAdd and hashAddByte can be replaced for testing collision handling. + hashAdd func(h uint64, s string) uint64 hashAddByte func(h uint64, b byte) uint64 } -// newMetricVec returns an initialized MetricVec. The concrete value is -// returned for embedding into another struct. -func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { - return &MetricVec{ - children: map[uint64][]metricWithLabelValues{}, - desc: desc, - newMetric: newMetric, +// newMetricVec returns an initialized metricVec. +func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { + return &metricVec{ + metricMap: &metricMap{ + metrics: map[uint64][]metricWithLabelValues{}, + desc: desc, + newMetric: newMetric, + }, hashAdd: hashAdd, hashAddByte: hashAddByte, } } +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *metricVec) DeleteLabelValues(lvs ...string) bool { + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *metricVec) Delete(labels Labels) bool { + h, err := m.hashLabels(labels) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) +} + +// Without explicit forwarding of Describe, Collect, Reset, those methods won't +// show up in GoDoc. + +// Describe implements Collector. +func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) } + +// Collect implements Collector. +func (m *metricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) } + +// Reset deletes all metrics in this vector. +func (m *metricVec) Reset() { m.metricMap.Reset() } + +func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { + var ( + newCurry []curriedLabelValue + oldCurry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { + if ok { + return nil, fmt.Errorf("label name %q is already curried", label) + } + newCurry = append(newCurry, oldCurry[iCurry]) + iCurry++ + } else { + if !ok { + continue // Label stays uncurried. + } + newCurry = append(newCurry, curriedLabelValue{i, val}) + } + } + if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { + return nil, fmt.Errorf("%d unknown label(s) found during currying", l) + } + + return &metricVec{ + metricMap: m.metricMap, + curry: newCurry, + hashAdd: m.hashAdd, + hashAddByte: m.hashAddByte, + }, nil +} + +func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil +} + +func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil +} + +func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { + if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iVals, iCurry int + ) + for i := 0; i < len(m.desc.variableLabels); i++ { + if iCurry < len(curry) && curry[iCurry].index == i { + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + h = m.hashAdd(h, vals[iVals]) + iVals++ + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +func (m *metricVec) hashLabels(labels Labels) (uint64, error) { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(curry) && curry[iCurry].index == i { + if ok { + return 0, fmt.Errorf("label name %q is already curried", label) + } + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = m.hashAdd(h, val) + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + // metricWithLabelValues provides the metric and its label values for // disambiguation on hash collision. type metricWithLabelValues struct { @@ -54,166 +213,72 @@ type metricWithLabelValues struct { metric Metric } -// Describe implements Collector. The length of the returned slice -// is always one. -func (m *MetricVec) Describe(ch chan<- *Desc) { +// curriedLabelValue sets the curried value for a label at the given index. +type curriedLabelValue struct { + index int + value string +} + +// metricMap is a helper for metricVec and shared between differently curried +// metricVecs. +type metricMap struct { + mtx sync.RWMutex // Protects metrics. + metrics map[uint64][]metricWithLabelValues + desc *Desc + newMetric func(labelValues ...string) Metric +} + +// Describe implements Collector. It will send exactly one Desc to the provided +// channel. +func (m *metricMap) Describe(ch chan<- *Desc) { ch <- m.desc } // Collect implements Collector. -func (m *MetricVec) Collect(ch chan<- Metric) { +func (m *metricMap) Collect(ch chan<- Metric) { m.mtx.RLock() defer m.mtx.RUnlock() - for _, metrics := range m.children { + for _, metrics := range m.metrics { for _, metric := range metrics { ch <- metric.metric } } } -// GetMetricWithLabelValues returns the Metric for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Metric is created. -// -// It is possible to call this method without using the returned Metric to only -// create the new Metric but leave it at its start value (e.g. a Summary or -// Histogram without any observations). See also the SummaryVec example. -// -// Keeping the Metric for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Metric from the MetricVec. In that case, the -// Metric will still exist, but it will not be exported anymore, even if a -// Metric with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { - h, err := m.hashLabelValues(lvs) - if err != nil { - return nil, err - } - - return m.getOrCreateMetricWithLabelValues(h, lvs), nil -} - -// GetMetricWith returns the Metric for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Metric is created. Implications of -// creating a Metric without using it and keeping the Metric for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc. -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { - h, err := m.hashLabels(labels) - if err != nil { - return nil, err - } - - return m.getOrCreateMetricWithLabels(h, labels), nil -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics if an error -// occurs. The method allows neat syntax like: -// httpReqs.WithLabelValues("404", "POST").Inc() -func (m *MetricVec) WithLabelValues(lvs ...string) Metric { - metric, err := m.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return metric -} - -// With works as GetMetricWith, but panics if an error occurs. The method allows -// neat syntax like: -// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc() -func (m *MetricVec) With(labels Labels) Metric { - metric, err := m.GetMetricWith(labels) - if err != nil { - panic(err) - } - return metric -} - -// DeleteLabelValues removes the metric where the variable labels are the same -// as those passed in as labels (same order as the VariableLabels in Desc). It -// returns true if a metric was deleted. -// -// It is not an error if the number of label values is not the same as the -// number of VariableLabels in Desc. However, such inconsistent label count can -// never match an actual Metric, so the method will always return false in that -// case. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider Delete(Labels) as an -// alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the CounterVec example. -func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { +// Reset deletes all metrics in this vector. +func (m *metricMap) Reset() { m.mtx.Lock() defer m.mtx.Unlock() - h, err := m.hashLabelValues(lvs) - if err != nil { - return false + for h := range m.metrics { + delete(m.metrics, h) } - return m.deleteByHashWithLabelValues(h, lvs) -} - -// Delete deletes the metric where the variable labels are the same as those -// passed in as labels. It returns true if a metric was deleted. -// -// It is not an error if the number and names of the Labels are inconsistent -// with those of the VariableLabels in the Desc of the MetricVec. However, such -// inconsistent Labels can never match an actual Metric, so the method will -// always return false in that case. -// -// This method is used for the same purpose as DeleteLabelValues(...string). See -// there for pros and cons of the two methods. -func (m *MetricVec) Delete(labels Labels) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - h, err := m.hashLabels(labels) - if err != nil { - return false - } - - return m.deleteByHashWithLabels(h, labels) } // deleteByHashWithLabelValues removes the metric from the hash bucket h. If // there are multiple matches in the bucket, use lvs to select a metric and // remove only that metric. -func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool { - metrics, ok := m.children[h] +func (m *metricMap) deleteByHashWithLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] if !ok { return false } - i := m.findMetricWithLabelValues(metrics, lvs) + i := findMetricWithLabelValues(metrics, lvs, curry) if i >= len(metrics) { return false } if len(metrics) > 1 { - m.children[h] = append(metrics[:i], metrics[i+1:]...) + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) } else { - delete(m.children, h) + delete(m.metrics, h) } return true } @@ -221,69 +286,38 @@ func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool { // deleteByHashWithLabels removes the metric from the hash bucket h. If there // are multiple matches in the bucket, use lvs to select a metric and remove // only that metric. -func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool { - metrics, ok := m.children[h] +func (m *metricMap) deleteByHashWithLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] if !ok { return false } - i := m.findMetricWithLabels(metrics, labels) + i := findMetricWithLabels(m.desc, metrics, labels, curry) if i >= len(metrics) { return false } if len(metrics) > 1 { - m.children[h] = append(metrics[:i], metrics[i+1:]...) + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) } else { - delete(m.children, h) + delete(m.metrics, h) } return true } -// Reset deletes all metrics in this vector. -func (m *MetricVec) Reset() { - m.mtx.Lock() - defer m.mtx.Unlock() - - for h := range m.children { - delete(m.children, h) - } -} - -func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { - if len(vals) != len(m.desc.variableLabels) { - return 0, errInconsistentCardinality - } - h := hashNew() - for _, val := range vals { - h = m.hashAdd(h, val) - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { - if len(labels) != len(m.desc.variableLabels) { - return 0, errInconsistentCardinality - } - h := hashNew() - for _, label := range m.desc.variableLabels { - val, ok := labels[label] - if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label) - } - h = m.hashAdd(h, val) - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - // getOrCreateMetricWithLabelValues retrieves the metric by hash and label value // or creates it and returns the new one. // // This function holds the mutex. -func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric { +func (m *metricMap) getOrCreateMetricWithLabelValues( + hash uint64, lvs []string, curry []curriedLabelValue, +) Metric { m.mtx.RLock() - metric, ok := m.getMetricWithLabelValues(hash, lvs) + metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) m.mtx.RUnlock() if ok { return metric @@ -291,13 +325,11 @@ func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) m.mtx.Lock() defer m.mtx.Unlock() - metric, ok = m.getMetricWithLabelValues(hash, lvs) + metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) if !ok { - // Copy to avoid allocation in case wo don't go down this code path. - copiedLVs := make([]string, len(lvs)) - copy(copiedLVs, lvs) - metric = m.newMetric(copiedLVs...) - m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric}) + inlinedLVs := inlineLabelValues(lvs, curry) + metric = m.newMetric(inlinedLVs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) } return metric } @@ -306,9 +338,11 @@ func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) // or creates it and returns the new one. // // This function holds the mutex. -func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric { +func (m *metricMap) getOrCreateMetricWithLabels( + hash uint64, labels Labels, curry []curriedLabelValue, +) Metric { m.mtx.RLock() - metric, ok := m.getMetricWithLabels(hash, labels) + metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) m.mtx.RUnlock() if ok { return metric @@ -316,33 +350,37 @@ func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metr m.mtx.Lock() defer m.mtx.Unlock() - metric, ok = m.getMetricWithLabels(hash, labels) + metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) if !ok { - lvs := m.extractLabelValues(labels) + lvs := extractLabelValues(m.desc, labels, curry) metric = m.newMetric(lvs...) - m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric}) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) } return metric } -// getMetricWithLabelValues gets a metric while handling possible collisions in -// the hash space. Must be called while holding read mutex. -func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) { - metrics, ok := m.children[h] +// getMetricWithHashAndLabelValues gets a metric while handling possible +// collisions in the hash space. Must be called while holding the read mutex. +func (m *metricMap) getMetricWithHashAndLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] if ok { - if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) { + if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { return metrics[i].metric, true } } return nil, false } -// getMetricWithLabels gets a metric while handling possible collisions in +// getMetricWithHashAndLabels gets a metric while handling possible collisions in // the hash space. Must be called while holding read mutex. -func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) { - metrics, ok := m.children[h] +func (m *metricMap) getMetricWithHashAndLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] if ok { - if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) { + if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { return metrics[i].metric, true } } @@ -351,9 +389,11 @@ func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) // findMetricWithLabelValues returns the index of the matching metric or // len(metrics) if not found. -func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int { +func findMetricWithLabelValues( + metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, +) int { for i, metric := range metrics { - if m.matchLabelValues(metric.values, lvs) { + if matchLabelValues(metric.values, lvs, curry) { return i } } @@ -362,32 +402,51 @@ func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, l // findMetricWithLabels returns the index of the matching metric or len(metrics) // if not found. -func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int { +func findMetricWithLabels( + desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, +) int { for i, metric := range metrics { - if m.matchLabels(metric.values, labels) { + if matchLabels(desc, metric.values, labels, curry) { return i } } return len(metrics) } -func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool { - if len(values) != len(lvs) { +func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { + if len(values) != len(lvs)+len(curry) { return false } + var iLVs, iCurry int for i, v := range values { - if v != lvs[i] { + if iCurry < len(curry) && curry[iCurry].index == i { + if v != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if v != lvs[iLVs] { return false } + iLVs++ } return true } -func (m *MetricVec) matchLabels(values []string, labels Labels) bool { - if len(labels) != len(values) { +func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { + if len(values) != len(labels)+len(curry) { return false } - for i, k := range m.desc.variableLabels { + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + if values[i] != curry[iCurry].value { + return false + } + iCurry++ + continue + } if values[i] != labels[k] { return false } @@ -395,10 +454,31 @@ func (m *MetricVec) matchLabels(values []string, labels Labels) bool { return true } -func (m *MetricVec) extractLabelValues(labels Labels) []string { - labelValues := make([]string, len(labels)) - for i, k := range m.desc.variableLabels { +func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { + labelValues := make([]string, len(labels)+len(curry)) + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } labelValues[i] = labels[k] } return labelValues } + +func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { + labelValues := make([]string, len(lvs)+len(curry)) + var iCurry, iLVs int + for i := range labelValues { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = lvs[iLVs] + iLVs++ + } + return labelValues +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go new file mode 100644 index 0000000000..e303eef6d3 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -0,0 +1,200 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sort" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// WrapRegistererWith returns a Registerer wrapping the provided +// Registerer. Collectors registered with the returned Registerer will be +// registered with the wrapped Registerer in a modified way. The modified +// Collector adds the provided Labels to all Metrics it collects (as +// ConstLabels). The Metrics collected by the unmodified Collector must not +// duplicate any of those labels. +// +// WrapRegistererWith provides a way to add fixed labels to a subset of +// Collectors. It should not be used to add fixed labels to all metrics exposed. +// +// Conflicts between Collectors registered through the original Registerer with +// Collectors registered through the wrapping Registerer will still be +// detected. Any AlreadyRegisteredError returned by the Register method of +// either Registerer will contain the ExistingCollector in the form it was +// provided to the respective registry. +// +// The Collector example demonstrates a use of WrapRegistererWith. +func WrapRegistererWith(labels Labels, reg Registerer) Registerer { + return &wrappingRegisterer{ + wrappedRegisterer: reg, + labels: labels, + } +} + +// WrapRegistererWithPrefix returns a Registerer wrapping the provided +// Registerer. Collectors registered with the returned Registerer will be +// registered with the wrapped Registerer in a modified way. The modified +// Collector adds the provided prefix to the name of all Metrics it collects. +// +// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of +// a sub-system. To make this work, register metrics of the sub-system with the +// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful +// to use the same prefix for all metrics exposed. In particular, do not prefix +// metric names that are standardized across applications, as that would break +// horizontal monitoring, for example the metrics provided by the Go collector +// (see NewGoCollector) and the process collector (see NewProcessCollector). (In +// fact, those metrics are already prefixed with “go_” or “process_”, +// respectively.) +// +// Conflicts between Collectors registered through the original Registerer with +// Collectors registered through the wrapping Registerer will still be +// detected. Any AlreadyRegisteredError returned by the Register method of +// either Registerer will contain the ExistingCollector in the form it was +// provided to the respective registry. +func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { + return &wrappingRegisterer{ + wrappedRegisterer: reg, + prefix: prefix, + } +} + +type wrappingRegisterer struct { + wrappedRegisterer Registerer + prefix string + labels Labels +} + +func (r *wrappingRegisterer) Register(c Collector) error { + return r.wrappedRegisterer.Register(&wrappingCollector{ + wrappedCollector: c, + prefix: r.prefix, + labels: r.labels, + }) +} + +func (r *wrappingRegisterer) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +func (r *wrappingRegisterer) Unregister(c Collector) bool { + return r.wrappedRegisterer.Unregister(&wrappingCollector{ + wrappedCollector: c, + prefix: r.prefix, + labels: r.labels, + }) +} + +type wrappingCollector struct { + wrappedCollector Collector + prefix string + labels Labels +} + +func (c *wrappingCollector) Collect(ch chan<- Metric) { + wrappedCh := make(chan Metric) + go func() { + c.wrappedCollector.Collect(wrappedCh) + close(wrappedCh) + }() + for m := range wrappedCh { + ch <- &wrappingMetric{ + wrappedMetric: m, + prefix: c.prefix, + labels: c.labels, + } + } +} + +func (c *wrappingCollector) Describe(ch chan<- *Desc) { + wrappedCh := make(chan *Desc) + go func() { + c.wrappedCollector.Describe(wrappedCh) + close(wrappedCh) + }() + for desc := range wrappedCh { + ch <- wrapDesc(desc, c.prefix, c.labels) + } +} + +func (c *wrappingCollector) unwrapRecursively() Collector { + switch wc := c.wrappedCollector.(type) { + case *wrappingCollector: + return wc.unwrapRecursively() + default: + return wc + } +} + +type wrappingMetric struct { + wrappedMetric Metric + prefix string + labels Labels +} + +func (m *wrappingMetric) Desc() *Desc { + return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) +} + +func (m *wrappingMetric) Write(out *dto.Metric) error { + if err := m.wrappedMetric.Write(out); err != nil { + return err + } + if len(m.labels) == 0 { + // No wrapping labels. + return nil + } + for ln, lv := range m.labels { + out.Label = append(out.Label, &dto.LabelPair{ + Name: proto.String(ln), + Value: proto.String(lv), + }) + } + sort.Sort(labelPairSorter(out.Label)) + return nil +} + +func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { + constLabels := Labels{} + for _, lp := range desc.constLabelPairs { + constLabels[*lp.Name] = *lp.Value + } + for ln, lv := range labels { + if _, alreadyUsed := constLabels[ln]; alreadyUsed { + return &Desc{ + fqName: desc.fqName, + help: desc.help, + variableLabels: desc.variableLabels, + constLabelPairs: desc.constLabelPairs, + err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), + } + } + constLabels[ln] = lv + } + // NewDesc will do remaining validations. + newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) + // Propagate errors if there was any. This will override any errer + // created by NewDesc above, i.e. earlier errors get precedence. + if desc.err != nil { + newDesc.err = desc.err + } + return newDesc +} diff --git a/vendor/github.com/prometheus/client_model/README.md b/vendor/github.com/prometheus/client_model/README.md index a710042db5..e100975827 100644 --- a/vendor/github.com/prometheus/client_model/README.md +++ b/vendor/github.com/prometheus/client_model/README.md @@ -1,26 +1,34 @@ -# Background -Under most circumstances, manually downloading this repository should never -be required. +# Deprecation note -# Prerequisites -# Base -* [Google Protocol Buffers](https://developers.google.com/protocol-buffers) +This repository used to contain the [protocol +buffer](https://developers.google.com/protocol-buffers) code that defined both +the data model and the exposition format of Prometheus metrics. -## Java -* [Apache Maven](http://maven.apache.org) -* [Prometheus Maven Repository](https://github.com/prometheus/io.prometheus-maven-repository) checked out into ../io.prometheus-maven-repository +Starting with v2.0.0, the [Prometheus +server](https://github.com/prometheus/prometheus) does not ingest the +protobuf-based exposition format anymore. Currently, all but one of the +[official instrumentation +libraries](https://prometheus.io/docs/instrumenting/clientlibs/) do not expose +the protobuf-based exposition format. The [Go instrumentation +library](https://github.com/prometheus/client_golang), however, has been built +around the protobuf-based data model. As a byproduct thereof, it is still able +to expose the protobuf-based exposition format. The Go instrumentation library +is the only remaining repository within the [Prometheus GitHub +org](https://github.com/prometheus) directly using the prometheus/client_model +repository. -## Go -* [Go](http://golang.org) -* [goprotobuf](https://code.google.com/p/goprotobuf) +Therefore, formerly existing support for languages other than Go (namely C++, +Java, Python, Ruby) has been removed from this repository. If you are a 3rd +party user of those languages, you can go back to [commit +14fe0d1](https://github.com/prometheus/client_model/commit/14fe0d1b01d4d5fc031dd4bec1823bd3ebbe8016) +to keep using the old code, or you can consume +[`metrics.proto`](https://github.com/prometheus/client_model/blob/master/metrics.proto) +directly with your own protobuf tooling. Note, however, that changes of +`metrics.proto` after [commit +14fe0d1](https://github.com/prometheus/client_model/commit/14fe0d1b01d4d5fc031dd4bec1823bd3ebbe8016) +are solely informed by requirements of the Go instrumentation library and will +not take into account any requirements of other languages or stability concerns +for the protobuf-based exposition format. -## Ruby -* [Ruby](https://www.ruby-lang.org) -* [bundler](https://rubygems.org/gems/bundler) - -# Building - $ make - -# Getting Started - * The Go source code is periodically indexed: [Go Protocol Buffer Model](http://godoc.org/github.com/prometheus/client_model/go). - * All of the core developers are accessible via the [Prometheus Developers Mailinglist](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). +Check out the [OpenMetrics project](https://openmetrics.io/) for the future of +the data model and exposition format used by Prometheus and others. diff --git a/vendor/github.com/prometheus/client_model/go.mod b/vendor/github.com/prometheus/client_model/go.mod new file mode 100644 index 0000000000..e7e0a86ccc --- /dev/null +++ b/vendor/github.com/prometheus/client_model/go.mod @@ -0,0 +1,8 @@ +module github.com/prometheus/client_model + +go 1.9 + +require ( + github.com/golang/protobuf v1.2.0 + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f // indirect +) diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index b065f8683f..2f4930d9dd 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -1,34 +1,26 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: metrics.proto -// DO NOT EDIT! -/* -Package io_prometheus_client is a generated protocol buffer package. - -It is generated from these files: - metrics.proto - -It has these top-level messages: - LabelPair - Gauge - Counter - Quantile - Summary - Untyped - Histogram - Bucket - Metric - MetricFamily -*/ package io_prometheus_client -import proto "github.com/golang/protobuf/proto" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal +var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + type MetricType int32 const ( @@ -46,6 +38,7 @@ var MetricType_name = map[int32]string{ 3: "UNTYPED", 4: "HISTOGRAM", } + var MetricType_value = map[string]int32{ "COUNTER": 0, "GAUGE": 1, @@ -59,9 +52,11 @@ func (x MetricType) Enum() *MetricType { *p = x return p } + func (x MetricType) String() string { return proto.EnumName(MetricType_name, int32(x)) } + func (x *MetricType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") if err != nil { @@ -71,15 +66,42 @@ func (x *MetricType) UnmarshalJSON(data []byte) error { return nil } +func (MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{0} +} + type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *LabelPair) Reset() { *m = LabelPair{} } func (m *LabelPair) String() string { return proto.CompactTextString(m) } func (*LabelPair) ProtoMessage() {} +func (*LabelPair) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{0} +} + +func (m *LabelPair) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelPair.Unmarshal(m, b) +} +func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) +} +func (m *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(m, src) +} +func (m *LabelPair) XXX_Size() int { + return xxx_messageInfo_LabelPair.Size(m) +} +func (m *LabelPair) XXX_DiscardUnknown() { + xxx_messageInfo_LabelPair.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelPair proto.InternalMessageInfo func (m *LabelPair) GetName() string { if m != nil && m.Name != nil { @@ -96,13 +118,36 @@ func (m *LabelPair) GetValue() string { } type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Gauge) Reset() { *m = Gauge{} } func (m *Gauge) String() string { return proto.CompactTextString(m) } func (*Gauge) ProtoMessage() {} +func (*Gauge) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{1} +} + +func (m *Gauge) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Gauge.Unmarshal(m, b) +} +func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) +} +func (m *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(m, src) +} +func (m *Gauge) XXX_Size() int { + return xxx_messageInfo_Gauge.Size(m) +} +func (m *Gauge) XXX_DiscardUnknown() { + xxx_messageInfo_Gauge.DiscardUnknown(m) +} + +var xxx_messageInfo_Gauge proto.InternalMessageInfo func (m *Gauge) GetValue() float64 { if m != nil && m.Value != nil { @@ -112,13 +157,37 @@ func (m *Gauge) GetValue() float64 { } type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Counter) Reset() { *m = Counter{} } func (m *Counter) String() string { return proto.CompactTextString(m) } func (*Counter) ProtoMessage() {} +func (*Counter) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{2} +} + +func (m *Counter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Counter.Unmarshal(m, b) +} +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Counter.Marshal(b, m, deterministic) +} +func (m *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(m, src) +} +func (m *Counter) XXX_Size() int { + return xxx_messageInfo_Counter.Size(m) +} +func (m *Counter) XXX_DiscardUnknown() { + xxx_messageInfo_Counter.DiscardUnknown(m) +} + +var xxx_messageInfo_Counter proto.InternalMessageInfo func (m *Counter) GetValue() float64 { if m != nil && m.Value != nil { @@ -127,15 +196,45 @@ func (m *Counter) GetValue() float64 { return 0 } +func (m *Counter) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Quantile) Reset() { *m = Quantile{} } func (m *Quantile) String() string { return proto.CompactTextString(m) } func (*Quantile) ProtoMessage() {} +func (*Quantile) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{3} +} + +func (m *Quantile) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Quantile.Unmarshal(m, b) +} +func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) +} +func (m *Quantile) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantile.Merge(m, src) +} +func (m *Quantile) XXX_Size() int { + return xxx_messageInfo_Quantile.Size(m) +} +func (m *Quantile) XXX_DiscardUnknown() { + xxx_messageInfo_Quantile.DiscardUnknown(m) +} + +var xxx_messageInfo_Quantile proto.InternalMessageInfo func (m *Quantile) GetQuantile() float64 { if m != nil && m.Quantile != nil { @@ -152,15 +251,38 @@ func (m *Quantile) GetValue() float64 { } type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_unrecognized []byte `json:"-"` + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Summary) Reset() { *m = Summary{} } func (m *Summary) String() string { return proto.CompactTextString(m) } func (*Summary) ProtoMessage() {} +func (*Summary) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{4} +} + +func (m *Summary) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Summary.Unmarshal(m, b) +} +func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Summary.Marshal(b, m, deterministic) +} +func (m *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(m, src) +} +func (m *Summary) XXX_Size() int { + return xxx_messageInfo_Summary.Size(m) +} +func (m *Summary) XXX_DiscardUnknown() { + xxx_messageInfo_Summary.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary proto.InternalMessageInfo func (m *Summary) GetSampleCount() uint64 { if m != nil && m.SampleCount != nil { @@ -184,13 +306,36 @@ func (m *Summary) GetQuantile() []*Quantile { } type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Untyped) Reset() { *m = Untyped{} } func (m *Untyped) String() string { return proto.CompactTextString(m) } func (*Untyped) ProtoMessage() {} +func (*Untyped) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{5} +} + +func (m *Untyped) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Untyped.Unmarshal(m, b) +} +func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) +} +func (m *Untyped) XXX_Merge(src proto.Message) { + xxx_messageInfo_Untyped.Merge(m, src) +} +func (m *Untyped) XXX_Size() int { + return xxx_messageInfo_Untyped.Size(m) +} +func (m *Untyped) XXX_DiscardUnknown() { + xxx_messageInfo_Untyped.DiscardUnknown(m) +} + +var xxx_messageInfo_Untyped proto.InternalMessageInfo func (m *Untyped) GetValue() float64 { if m != nil && m.Value != nil { @@ -200,15 +345,38 @@ func (m *Untyped) GetValue() float64 { } type Histogram struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` - XXX_unrecognized []byte `json:"-"` + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Histogram) Reset() { *m = Histogram{} } func (m *Histogram) String() string { return proto.CompactTextString(m) } func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{6} +} + +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Histogram.Unmarshal(m, b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return xxx_messageInfo_Histogram.Size(m) +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo func (m *Histogram) GetSampleCount() uint64 { if m != nil && m.SampleCount != nil { @@ -232,14 +400,38 @@ func (m *Histogram) GetBucket() []*Bucket { } type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"` - XXX_unrecognized []byte `json:"-"` + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Bucket) Reset() { *m = Bucket{} } func (m *Bucket) String() string { return proto.CompactTextString(m) } func (*Bucket) ProtoMessage() {} +func (*Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{7} +} + +func (m *Bucket) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Bucket.Unmarshal(m, b) +} +func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) +} +func (m *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(m, src) +} +func (m *Bucket) XXX_Size() int { + return xxx_messageInfo_Bucket.Size(m) +} +func (m *Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_Bucket proto.InternalMessageInfo func (m *Bucket) GetCumulativeCount() uint64 { if m != nil && m.CumulativeCount != nil { @@ -255,20 +447,105 @@ func (m *Bucket) GetUpperBound() float64 { return 0 } +func (m *Bucket) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +type Exemplar struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Exemplar) Reset() { *m = Exemplar{} } +func (m *Exemplar) String() string { return proto.CompactTextString(m) } +func (*Exemplar) ProtoMessage() {} +func (*Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{8} +} + +func (m *Exemplar) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Exemplar.Unmarshal(m, b) +} +func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) +} +func (m *Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exemplar.Merge(m, src) +} +func (m *Exemplar) XXX_Size() int { + return xxx_messageInfo_Exemplar.Size(m) +} +func (m *Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Exemplar proto.InternalMessageInfo + +func (m *Exemplar) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Exemplar) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +func (m *Exemplar) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"` - XXX_unrecognized []byte `json:"-"` + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Metric) Reset() { *m = Metric{} } func (m *Metric) String() string { return proto.CompactTextString(m) } func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{9} +} + +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo func (m *Metric) GetLabel() []*LabelPair { if m != nil { @@ -320,16 +597,39 @@ func (m *Metric) GetTimestampMs() int64 { } type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *MetricFamily) Reset() { *m = MetricFamily{} } func (m *MetricFamily) String() string { return proto.CompactTextString(m) } func (*MetricFamily) ProtoMessage() {} +func (*MetricFamily) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{10} +} + +func (m *MetricFamily) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricFamily.Unmarshal(m, b) +} +func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) +} +func (m *MetricFamily) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricFamily.Merge(m, src) +} +func (m *MetricFamily) XXX_Size() int { + return xxx_messageInfo_MetricFamily.Size(m) +} +func (m *MetricFamily) XXX_DiscardUnknown() { + xxx_messageInfo_MetricFamily.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricFamily proto.InternalMessageInfo func (m *MetricFamily) GetName() string { if m != nil && m.Name != nil { @@ -361,4 +661,63 @@ func (m *MetricFamily) GetMetric() []*Metric { func init() { proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) + proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") + proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") + proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") + proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") + proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") + proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") + proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") + proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") + proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") + proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") +} + +func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) } + +var fileDescriptor_6039342a2ba47b72 = []byte{ + // 665 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55, + 0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2, + 0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e, + 0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa, + 0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66, + 0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4, + 0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45, + 0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a, + 0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d, + 0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b, + 0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22, + 0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79, + 0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0, + 0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00, + 0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01, + 0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe, + 0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55, + 0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f, + 0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31, + 0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16, + 0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e, + 0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c, + 0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f, + 0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57, + 0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64, + 0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76, + 0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7, + 0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95, + 0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed, + 0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33, + 0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07, + 0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72, + 0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56, + 0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6, + 0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f, + 0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f, + 0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27, + 0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83, + 0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24, + 0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00, } diff --git a/vendor/github.com/prometheus/common/README.md b/vendor/github.com/prometheus/common/README.md index 47985e4ad2..4dd257bddb 100644 --- a/vendor/github.com/prometheus/common/README.md +++ b/vendor/github.com/prometheus/common/README.md @@ -6,7 +6,11 @@ components and libraries. * **config**: Common configuration structures * **expfmt**: Decoding and encoding for the exposition format -* **log**: A logging wrapper around [logrus](https://github.com/sirupsen/logrus) * **model**: Shared data structures +* **promlog**: A logging wrapper around [go-kit/log](https://github.com/go-kit/kit/tree/master/log) * **route**: A routing wrapper around [httprouter](https://github.com/julienschmidt/httprouter) using `context.Context` +* **server**: Common servers * **version**: Version information and metrics + +## Deprecated +* **log**: A logging wrapper around [logrus](https://github.com/sirupsen/logrus) diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 11839ed65c..bd4e347454 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -30,17 +30,38 @@ type Encoder interface { Encode(*dto.MetricFamily) error } -type encoder func(*dto.MetricFamily) error - -func (e encoder) Encode(v *dto.MetricFamily) error { - return e(v) +// Closer is implemented by Encoders that need to be closed to finalize +// encoding. (For example, OpenMetrics needs a final `# EOF` line.) +// +// Note that all Encoder implementations returned from this package implement +// Closer, too, even if the Close call is a no-op. This happens in preparation +// for adding a Close method to the Encoder interface directly in a (mildly +// breaking) release in the future. +type Closer interface { + Close() error } -// Negotiate returns the Content-Type based on the given Accept header. -// If no appropriate accepted type is found, FmtText is returned. +type encoderCloser struct { + encode func(*dto.MetricFamily) error + close func() error +} + +func (ec encoderCloser) Encode(v *dto.MetricFamily) error { + return ec.encode(v) +} + +func (ec encoderCloser) Close() error { + return ec.close() +} + +// Negotiate returns the Content-Type based on the given Accept header. If no +// appropriate accepted type is found, FmtText is returned (which is the +// Prometheus text format). This function will never negotiate FmtOpenMetrics, +// as the support is still experimental. To include the option to negotiate +// FmtOpenMetrics, use NegotiateOpenMetrics. func Negotiate(h http.Header) Format { for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { - // Check for protocol buffer + ver := ac.Params["version"] if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": @@ -51,8 +72,6 @@ func Negotiate(h http.Header) Format { return FmtProtoCompact } } - // Check for text format. - ver := ac.Params["version"] if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { return FmtText } @@ -60,29 +79,84 @@ func Negotiate(h http.Header) Format { return FmtText } -// NewEncoder returns a new encoder based on content type negotiation. +// NegotiateIncludingOpenMetrics works like Negotiate but includes +// FmtOpenMetrics as an option for the result. Note that this function is +// temporary and will disappear once FmtOpenMetrics is fully supported and as +// such may be negotiated by the normal Negotiate function. +func NegotiateIncludingOpenMetrics(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + ver := ac.Params["version"] + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") { + return FmtOpenMetrics + } + } + return FmtText +} + +// NewEncoder returns a new encoder based on content type negotiation. All +// Encoder implementations returned by NewEncoder also implement Closer, and +// callers should always call the Close method. It is currently only required +// for FmtOpenMetrics, but a future (breaking) release will add the Close method +// to the Encoder interface directly. The current version of the Encoder +// interface is kept for backwards compatibility. func NewEncoder(w io.Writer, format Format) Encoder { switch format { case FmtProtoDelim: - return encoder(func(v *dto.MetricFamily) error { - _, err := pbutil.WriteDelimited(w, v) - return err - }) + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := pbutil.WriteDelimited(w, v) + return err + }, + close: func() error { return nil }, + } case FmtProtoCompact: - return encoder(func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, v.String()) - return err - }) + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, v.String()) + return err + }, + close: func() error { return nil }, + } case FmtProtoText: - return encoder(func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) - return err - }) + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + return err + }, + close: func() error { return nil }, + } case FmtText: - return encoder(func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, v) - return err - }) + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := MetricFamilyToText(w, v) + return err + }, + close: func() error { return nil }, + } + case FmtOpenMetrics: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := MetricFamilyToOpenMetrics(w, v) + return err + }, + close: func() error { + _, err := FinalizeOpenMetrics(w) + return err + }, + } } - panic("expfmt.NewEncoder: unknown format") + panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format)) } diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index c71bcb9816..0f176fa64f 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -19,10 +19,12 @@ type Format string // Constants to assemble the Content-Type values for the different wire protocols. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + OpenMetricsVersion = "0.0.1" // The Content-Type values for the different wire protocols. FmtUnknown Format = `` @@ -30,6 +32,7 @@ const ( FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` FmtProtoText Format = ProtoFmt + ` encoding=text` FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8` ) const ( diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go new file mode 100644 index 0000000000..8a9313a3be --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -0,0 +1,527 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + "github.com/golang/protobuf/ptypes" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the +// OpenMetrics text format and writes the resulting lines to 'out'. It returns +// the number of bytes written and any error encountered. The output will have +// the same order as the input, no further sorting is performed. Furthermore, +// this function assumes the input is already sanitized and does not perform any +// sanity checks. If the input contains duplicate metrics or invalid metric or +// label names, the conversion will result in invalid text format output. +// +// This function fulfills the type 'expfmt.encoder'. +// +// Note that OpenMetrics requires a final `# EOF` line. Since this function acts +// on individual metric families, it is the responsibility of the caller to +// append this line to 'out' once all metric families have been written. +// Conveniently, this can be done by calling FinalizeOpenMetrics. +// +// The output should be fully OpenMetrics compliant. However, there are a few +// missing features and peculiarities to avoid complications when switching from +// Prometheus to OpenMetrics or vice versa: +// +// - Counters are expected to have the `_total` suffix in their metric name. In +// the output, the suffix will be truncated from the `# TYPE` and `# HELP` +// line. A counter with a missing `_total` suffix is not an error. However, +// its type will be set to `unknown` in that case to avoid invalid OpenMetrics +// output. +// +// - No support for the following (optional) features: `# UNIT` line, `_created` +// line, info type, stateset type, gaugehistogram type. +// +// - The size of exemplar labels is not checked (i.e. it's possible to create +// exemplars that are larger than allowed by the OpenMetrics specification). +// +// - The value of Counters is not checked. (OpenMetrics doesn't allow counters +// with a `NaN` value.) +func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { + name := in.GetName() + if name == "" { + return 0, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Try the interface upgrade. If it doesn't work, we'll use a + // bufio.Writer from the sync.Pool. + w, ok := out.(enhancedWriter) + if !ok { + b := bufPool.Get().(*bufio.Writer) + b.Reset(out) + w = b + defer func() { + bErr := b.Flush() + if err == nil { + err = bErr + } + bufPool.Put(b) + }() + } + + var ( + n int + metricType = in.GetType() + shortName = name + ) + if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") { + shortName = name[:len(name)-6] + } + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err = w.WriteString("# HELP ") + written += n + if err != nil { + return + } + n, err = w.WriteString(shortName) + written += n + if err != nil { + return + } + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Help, true) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + n, err = w.WriteString("# TYPE ") + written += n + if err != nil { + return + } + n, err = w.WriteString(shortName) + written += n + if err != nil { + return + } + switch metricType { + case dto.MetricType_COUNTER: + if strings.HasSuffix(name, "_total") { + n, err = w.WriteString(" counter\n") + } else { + n, err = w.WriteString(" unknown\n") + } + case dto.MetricType_GAUGE: + n, err = w.WriteString(" gauge\n") + case dto.MetricType_SUMMARY: + n, err = w.WriteString(" summary\n") + case dto.MetricType_UNTYPED: + n, err = w.WriteString(" unknown\n") + case dto.MetricType_HISTOGRAM: + n, err = w.WriteString(" histogram\n") + default: + return written, fmt.Errorf("unknown metric type %s", metricType.String()) + } + written += n + if err != nil { + return + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + // Note that we have ensured above that either the name + // ends on `_total` or that the rendered type is + // `unknown`. Therefore, no `_total` must be added here. + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Counter.GetValue(), 0, false, + metric.Counter.Exemplar, + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Gauge.GetValue(), 0, false, + nil, + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Untyped.GetValue(), 0, false, + nil, + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeOpenMetricsSample( + w, name, "", metric, + model.QuantileLabel, q.GetQuantile(), + q.GetValue(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + } + n, err = writeOpenMetricsSample( + w, name, "_sum", metric, "", 0, + metric.Summary.GetSampleSum(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + n, err = writeOpenMetricsSample( + w, name, "_count", metric, "", 0, + 0, metric.Summary.GetSampleCount(), true, + nil, + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, b := range metric.Histogram.Bucket { + n, err = writeOpenMetricsSample( + w, name, "_bucket", metric, + model.BucketLabel, b.GetUpperBound(), + 0, b.GetCumulativeCount(), true, + b.Exemplar, + ) + written += n + if err != nil { + return + } + if math.IsInf(b.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeOpenMetricsSample( + w, name, "_bucket", metric, + model.BucketLabel, math.Inf(+1), + 0, metric.Histogram.GetSampleCount(), true, + nil, + ) + written += n + if err != nil { + return + } + } + n, err = writeOpenMetricsSample( + w, name, "_sum", metric, "", 0, + metric.Histogram.GetSampleSum(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + n, err = writeOpenMetricsSample( + w, name, "_count", metric, "", 0, + 0, metric.Histogram.GetSampleCount(), true, + nil, + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return + } + } + return +} + +// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics. +func FinalizeOpenMetrics(w io.Writer) (written int, err error) { + return w.Write([]byte("# EOF\n")) +} + +// writeOpenMetricsSample writes a single sample in OpenMetrics text format to +// w, given the metric name, the metric proto message itself, optionally an +// additional label name with a float64 value (use empty string as label name if +// not required), the value (optionally as float64 or uint64, determined by +// useIntValue), and optionally an exemplar (use nil if not required). The +// function returns the number of bytes written and any error encountered. +func writeOpenMetricsSample( + w enhancedWriter, + name, suffix string, + metric *dto.Metric, + additionalLabelName string, additionalLabelValue float64, + floatValue float64, intValue uint64, useIntValue bool, + exemplar *dto.Exemplar, +) (int, error) { + var written int + n, err := w.WriteString(name) + written += n + if err != nil { + return written, err + } + if suffix != "" { + n, err = w.WriteString(suffix) + written += n + if err != nil { + return written, err + } + } + n, err = writeOpenMetricsLabelPairs( + w, metric.Label, additionalLabelName, additionalLabelValue, + ) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + if useIntValue { + n, err = writeUint(w, intValue) + } else { + n, err = writeOpenMetricsFloat(w, floatValue) + } + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + // TODO(beorn7): Format this directly without converting to a float first. + n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000) + written += n + if err != nil { + return written, err + } + } + if exemplar != nil { + n, err = writeExemplar(w, exemplar) + written += n + if err != nil { + return written, err + } + } + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float +// in OpenMetrics style. +func writeOpenMetricsLabelPairs( + w enhancedWriter, + in []*dto.LabelPair, + additionalLabelName string, additionalLabelValue float64, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var ( + written int + separator byte = '{' + ) + for _, lp := range in { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(lp.GetName()) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeEscapedString(w, lp.GetValue(), true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(additionalLabelName) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeOpenMetricsFloat(w, additionalLabelValue) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + } + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeExemplar writes the provided exemplar in OpenMetrics format to w. The +// function returns the number of bytes written and any error encountered. +func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { + written := 0 + n, err := w.WriteString(" # ") + written += n + if err != nil { + return written, err + } + n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeOpenMetricsFloat(w, e.GetValue()) + written += n + if err != nil { + return written, err + } + if e.Timestamp != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + ts, err := ptypes.Timestamp((*e).Timestamp) + if err != nil { + return written, err + } + // TODO(beorn7): Format this directly from components of ts to + // avoid overflow/underflow and precision issues of the float + // conversion. + n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9) + written += n + if err != nil { + return written, err + } + } + return written, nil +} + +// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting +// number would otherwise contain neither a "." nor an "e". +func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) { + switch { + case f == 1: + return w.WriteString("1.0") + case f == 0: + return w.WriteString("0.0") + case f == -1: + return w.WriteString("-1.0") + case math.IsNaN(f): + return w.WriteString("NaN") + case math.IsInf(f, +1): + return w.WriteString("+Inf") + case math.IsInf(f, -1): + return w.WriteString("-Inf") + default: + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + if !bytes.ContainsAny(*bp, "e.") { + *bp = append(*bp, '.', '0') + } + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err + } +} + +// writeUint is like writeInt just for uint64. +func writeUint(w enhancedWriter, u uint64) (int, error) { + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendUint((*bp)[:0], u, 10) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index f11321cd0c..5ba503b065 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -14,13 +14,45 @@ package expfmt import ( + "bufio" "fmt" "io" + "io/ioutil" "math" + "strconv" "strings" + "sync" + + "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" +) + +// enhancedWriter has all the enhanced write functions needed here. bufio.Writer +// implements it. +type enhancedWriter interface { + io.Writer + WriteRune(r rune) (n int, err error) + WriteString(s string) (n int, err error) + WriteByte(c byte) error +} + +const ( + initialNumBufSize = 24 +) + +var ( + bufPool = sync.Pool{ + New: func() interface{} { + return bufio.NewWriter(ioutil.Discard) + }, + } + numBufPool = sync.Pool{ + New: func() interface{} { + b := make([]byte, 0, initialNumBufSize) + return &b + }, + } ) // MetricFamilyToText converts a MetricFamily proto message into text format and @@ -32,37 +64,90 @@ import ( // will result in invalid text format output. // // This method fulfills the type 'prometheus.encoder'. -func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { - var written int - +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { // Fail-fast checks. if len(in.Metric) == 0 { - return written, fmt.Errorf("MetricFamily has no metrics: %s", in) + return 0, fmt.Errorf("MetricFamily has no metrics: %s", in) } name := in.GetName() if name == "" { - return written, fmt.Errorf("MetricFamily has no name: %s", in) + return 0, fmt.Errorf("MetricFamily has no name: %s", in) } + // Try the interface upgrade. If it doesn't work, we'll use a + // bufio.Writer from the sync.Pool. + w, ok := out.(enhancedWriter) + if !ok { + b := bufPool.Get().(*bufio.Writer) + b.Reset(out) + w = b + defer func() { + bErr := b.Flush() + if err == nil { + err = bErr + } + bufPool.Put(b) + }() + } + + var n int + // Comments, first HELP, then TYPE. if in.Help != nil { - n, err := fmt.Fprintf( - out, "# HELP %s %s\n", - name, escapeString(*in.Help, false), - ) + n, err = w.WriteString("# HELP ") written += n if err != nil { - return written, err + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Help, false) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return } } - metricType := in.GetType() - n, err := fmt.Fprintf( - out, "# TYPE %s %s\n", - name, strings.ToLower(metricType.String()), - ) + n, err = w.WriteString("# TYPE ") written += n if err != nil { - return written, err + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + metricType := in.GetType() + switch metricType { + case dto.MetricType_COUNTER: + n, err = w.WriteString(" counter\n") + case dto.MetricType_GAUGE: + n, err = w.WriteString(" gauge\n") + case dto.MetricType_SUMMARY: + n, err = w.WriteString(" summary\n") + case dto.MetricType_UNTYPED: + n, err = w.WriteString(" untyped\n") + case dto.MetricType_HISTOGRAM: + n, err = w.WriteString(" histogram\n") + default: + return written, fmt.Errorf("unknown metric type %s", metricType.String()) + } + written += n + if err != nil { + return } // Finally the samples, one line for each. @@ -75,9 +160,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { ) } n, err = writeSample( - name, metric, "", "", + w, name, "", metric, "", 0, metric.Counter.GetValue(), - out, ) case dto.MetricType_GAUGE: if metric.Gauge == nil { @@ -86,9 +170,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { ) } n, err = writeSample( - name, metric, "", "", + w, name, "", metric, "", 0, metric.Gauge.GetValue(), - out, ) case dto.MetricType_UNTYPED: if metric.Untyped == nil { @@ -97,9 +180,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { ) } n, err = writeSample( - name, metric, "", "", + w, name, "", metric, "", 0, metric.Untyped.GetValue(), - out, ) case dto.MetricType_SUMMARY: if metric.Summary == nil { @@ -109,29 +191,26 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { } for _, q := range metric.Summary.Quantile { n, err = writeSample( - name, metric, - model.QuantileLabel, fmt.Sprint(q.GetQuantile()), + w, name, "", metric, + model.QuantileLabel, q.GetQuantile(), q.GetValue(), - out, ) written += n if err != nil { - return written, err + return } } n, err = writeSample( - name+"_sum", metric, "", "", + w, name, "_sum", metric, "", 0, metric.Summary.GetSampleSum(), - out, ) - if err != nil { - return written, err - } written += n + if err != nil { + return + } n, err = writeSample( - name+"_count", metric, "", "", + w, name, "_count", metric, "", 0, float64(metric.Summary.GetSampleCount()), - out, ) case dto.MetricType_HISTOGRAM: if metric.Histogram == nil { @@ -140,46 +219,42 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { ) } infSeen := false - for _, q := range metric.Histogram.Bucket { + for _, b := range metric.Histogram.Bucket { n, err = writeSample( - name+"_bucket", metric, - model.BucketLabel, fmt.Sprint(q.GetUpperBound()), - float64(q.GetCumulativeCount()), - out, + w, name, "_bucket", metric, + model.BucketLabel, b.GetUpperBound(), + float64(b.GetCumulativeCount()), ) written += n if err != nil { - return written, err + return } - if math.IsInf(q.GetUpperBound(), +1) { + if math.IsInf(b.GetUpperBound(), +1) { infSeen = true } } if !infSeen { n, err = writeSample( - name+"_bucket", metric, - model.BucketLabel, "+Inf", + w, name, "_bucket", metric, + model.BucketLabel, math.Inf(+1), float64(metric.Histogram.GetSampleCount()), - out, ) - if err != nil { - return written, err - } written += n + if err != nil { + return + } } n, err = writeSample( - name+"_sum", metric, "", "", + w, name, "_sum", metric, "", 0, metric.Histogram.GetSampleSum(), - out, ) - if err != nil { - return written, err - } written += n + if err != nil { + return + } n, err = writeSample( - name+"_count", metric, "", "", + w, name, "_count", metric, "", 0, float64(metric.Histogram.GetSampleCount()), - out, ) default: return written, fmt.Errorf( @@ -188,116 +263,203 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { } written += n if err != nil { - return written, err + return } } - return written, nil + return } -// writeSample writes a single sample in text format to out, given the metric +// writeSample writes a single sample in text format to w, given the metric // name, the metric proto message itself, optionally an additional label name -// and value (use empty strings if not required), and the value. The function -// returns the number of bytes written and any error encountered. +// with a float64 value (use empty string as label name if not required), and +// the value. The function returns the number of bytes written and any error +// encountered. func writeSample( - name string, + w enhancedWriter, + name, suffix string, metric *dto.Metric, - additionalLabelName, additionalLabelValue string, + additionalLabelName string, additionalLabelValue float64, value float64, - out io.Writer, ) (int, error) { var written int - n, err := fmt.Fprint(out, name) + n, err := w.WriteString(name) written += n if err != nil { return written, err } - n, err = labelPairsToText( - metric.Label, - additionalLabelName, additionalLabelValue, - out, - ) - written += n - if err != nil { - return written, err - } - n, err = fmt.Fprintf(out, " %v", value) - written += n - if err != nil { - return written, err - } - if metric.TimestampMs != nil { - n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) + if suffix != "" { + n, err = w.WriteString(suffix) written += n if err != nil { return written, err } } - n, err = out.Write([]byte{'\n'}) + n, err = writeLabelPairs( + w, metric.Label, additionalLabelName, additionalLabelValue, + ) written += n if err != nil { return written, err } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeFloat(w, value) + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeInt(w, *metric.TimestampMs) + written += n + if err != nil { + return written, err + } + } + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } return written, nil } -// labelPairsToText converts a slice of LabelPair proto messages plus the +// writeLabelPairs converts a slice of LabelPair proto messages plus the // explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'out'. An empty slice in combination with an -// empty string 'additionalLabelName' results in nothing being -// written. Otherwise, the label pairs are written, escaped as required by the -// text format, and enclosed in '{...}'. The function returns the number of -// bytes written and any error encountered. -func labelPairsToText( +// text format and writes it to 'w'. An empty slice in combination with an empty +// string 'additionalLabelName' results in nothing being written. Otherwise, the +// label pairs are written, escaped as required by the text format, and enclosed +// in '{...}'. The function returns the number of bytes written and any error +// encountered. +func writeLabelPairs( + w enhancedWriter, in []*dto.LabelPair, - additionalLabelName, additionalLabelValue string, - out io.Writer, + additionalLabelName string, additionalLabelValue float64, ) (int, error) { if len(in) == 0 && additionalLabelName == "" { return 0, nil } - var written int - separator := '{' + var ( + written int + separator byte = '{' + ) for _, lp := range in { - n, err := fmt.Fprintf( - out, `%c%s="%s"`, - separator, lp.GetName(), escapeString(lp.GetValue(), true), - ) + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(lp.GetName()) written += n if err != nil { return written, err } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeEscapedString(w, lp.GetValue(), true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } separator = ',' } if additionalLabelName != "" { - n, err := fmt.Fprintf( - out, `%c%s="%s"`, - separator, additionalLabelName, - escapeString(additionalLabelValue, true), - ) + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(additionalLabelName) written += n if err != nil { return written, err } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeFloat(w, additionalLabelValue) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } } - n, err := out.Write([]byte{'}'}) - written += n + err := w.WriteByte('}') + written++ if err != nil { return written, err } return written, nil } +// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. var ( - escape = strings.NewReplacer("\\", `\\`, "\n", `\n`) - escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) + escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`) + quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) ) -// escapeString replaces '\' by '\\', new line character by '\n', and - if -// includeDoubleQuote is true - '"' by '\"'. -func escapeString(v string, includeDoubleQuote bool) string { +func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { if includeDoubleQuote { - return escapeWithDoubleQuote.Replace(v) + return quotedEscaper.WriteString(w, v) } - - return escape.Replace(v) + return escaper.WriteString(w, v) +} + +// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes +// a few common cases for increased efficiency. For non-hardcoded cases, it uses +// strconv.AppendFloat to avoid allocations, similar to writeInt. +func writeFloat(w enhancedWriter, f float64) (int, error) { + switch { + case f == 1: + return 1, w.WriteByte('1') + case f == 0: + return 1, w.WriteByte('0') + case f == -1: + return w.WriteString("-1") + case math.IsNaN(f): + return w.WriteString("NaN") + case math.IsInf(f, +1): + return w.WriteString("+Inf") + case math.IsInf(f, -1): + return w.WriteString("-Inf") + default: + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err + } +} + +// writeInt is equivalent to fmt.Fprint with an int64 argument but uses +// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid +// allocations. +func writeInt(w enhancedWriter, i int64) (int, error) { + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendInt((*bp)[:0], i, 10) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err } diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index b86290afa3..342e5940d0 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -325,7 +325,7 @@ func (p *TextParser) startLabelValue() stateFn { // - Other labels have to be added to currentLabels for signature calculation. if p.currentMF.GetType() == dto.MetricType_SUMMARY { if p.currentLabelPair.GetName() == model.QuantileLabel { - if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) return nil @@ -337,7 +337,7 @@ func (p *TextParser) startLabelValue() stateFn { // Similar special treatment of histograms. if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { if p.currentLabelPair.GetName() == model.BucketLabel { - if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) return nil @@ -359,7 +359,7 @@ func (p *TextParser) startLabelValue() stateFn { } return p.readingValue default: - p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) return nil } } @@ -392,7 +392,7 @@ func (p *TextParser) readingValue() stateFn { if p.readTokenUntilWhitespace(); p.err != nil { return nil // Unexpected end of input. } - value, err := strconv.ParseFloat(p.currentToken.String(), 64) + value, err := parseFloat(p.currentToken.String()) if err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) @@ -755,3 +755,10 @@ func histogramMetricName(name string) string { return name } } + +func parseFloat(s string) (float64, error) { + if strings.ContainsAny(s, "pP_") { + return 0, fmt.Errorf("unsupported character in float") + } + return strconv.ParseFloat(s, 64) +} diff --git a/vendor/github.com/prometheus/common/go.mod b/vendor/github.com/prometheus/common/go.mod new file mode 100644 index 0000000000..de597e7135 --- /dev/null +++ b/vendor/github.com/prometheus/common/go.mod @@ -0,0 +1,22 @@ +module github.com/prometheus/common + +require ( + github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect + github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 // indirect + github.com/go-kit/kit v0.9.0 + github.com/go-logfmt/logfmt v0.4.0 // indirect + github.com/golang/protobuf v1.3.2 + github.com/julienschmidt/httprouter v1.2.0 + github.com/matttproud/golang_protobuf_extensions v1.0.1 + github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 + github.com/pkg/errors v0.8.1 + github.com/prometheus/client_golang v1.0.0 + github.com/prometheus/client_model v0.2.0 + github.com/sirupsen/logrus v1.4.2 + golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 // indirect + golang.org/x/sys v0.0.0-20190422165155-953cdadca894 + gopkg.in/alecthomas/kingpin.v2 v2.2.6 + gopkg.in/yaml.v2 v2.2.4 +) + +go 1.11 diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go index 648b38cb65..26e92288c7 100644 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -1,12 +1,12 @@ /* +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + HTTP Content-Type Autonegotiation. The functions in this package implement the behaviour specified in http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index f7250909b9..00804b7fed 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -21,7 +21,6 @@ import ( ) var ( - separator = []byte{0} // MetricNameRE is a regular expression matching valid metric // names. Note that the IsValidMetricName function performs the same // check but faster than a match with this regular expression. diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index 74ed5a9f7e..7b0064fdb2 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -43,7 +43,7 @@ const ( // (1970-01-01 00:00 UTC) excluding leap seconds. type Time int64 -// Interval describes and interval between two timestamps. +// Interval describes an interval between two timestamps. type Interval struct { Start, End Time } @@ -150,7 +150,13 @@ func (t *Time) UnmarshalJSON(b []byte) error { return err } - *t = Time(v + va) + // If the value was something like -0.1 the negative is lost in the + // parsing because of the leading zero, this ensures that we capture it. + if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 { + *t = Time(v+va) * -1 + } else { + *t = Time(v + va) + } default: return fmt.Errorf("invalid time %q", string(b)) diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md index 2095494719..55d1e3261c 100644 --- a/vendor/github.com/prometheus/procfs/README.md +++ b/vendor/github.com/prometheus/procfs/README.md @@ -1,7 +1,7 @@ # procfs -This procfs package provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. +This package provides functions to retrieve system, kernel, and process +metrics from the pseudo-filesystems /proc and /sys. *WARNING*: This package is a work in progress. Its API may still break in backwards-incompatible ways without warnings. Use it at your own risk. @@ -9,3 +9,53 @@ backwards-incompatible ways without warnings. Use it at your own risk. [![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) [![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) + +## Usage + +The procfs library is organized by packages based on whether the gathered data is coming from +/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, +/sys, or both. For example, cpu statistics are gathered from +`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount +point is initialized, and then the stat information is read. + +```go +fs, err := procfs.NewFS("/proc") +stats, err := fs.Stat() +``` + +Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems. + +```go + fs, err := blockdevice.NewFS("/proc", "/sys") + stats, err := fs.ProcDiskstats() +``` + +## Package Organization + +The packages in this project are organized according to (1) whether the data comes from the `/proc` or +`/sys` filesystem and (2) the type of information being retrieved. For example, most process information +can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives +is available in the `blockdevices` sub-package. + +## Building and Testing + +The procfs library is intended to be built as part of another application, so there are no distributable binaries. +However, most of the API includes unit tests which can be run with `make test`. + +### Updating Test Fixtures + +The procfs library includes a set of test fixtures which include many example files from +the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file +which is extracted automatically during testing. To add/update the test fixtures, first +ensure the `fixtures` directory is up to date by removing the existing directory and then +extracting the ttar file using `make fixtures/.unpacked` or just `make test`. + +```bash +rm -rf fixtures +make test +``` + +Next, make the required changes to the extracted files in the `fixtures` directory. When +the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file +based on the updated `fixtures` directory. And finally, verify the changes using +`git diff fixtures.ttar`. diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go new file mode 100644 index 0000000000..916c9182a8 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -0,0 +1,85 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "net" + "strings" +) + +// ARPEntry contains a single row of the columnar data represented in +// /proc/net/arp. +type ARPEntry struct { + // IP address + IPAddr net.IP + // MAC address + HWAddr net.HardwareAddr + // Name of the device + Device string +} + +// GatherARPEntries retrieves all the ARP entries, parse the relevant columns, +// and then return a slice of ARPEntry's. +func (fs FS) GatherARPEntries() ([]ARPEntry, error) { + data, err := ioutil.ReadFile(fs.proc.Path("net/arp")) + if err != nil { + return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err) + } + + return parseARPEntries(data) +} + +func parseARPEntries(data []byte) ([]ARPEntry, error) { + lines := strings.Split(string(data), "\n") + entries := make([]ARPEntry, 0) + var err error + const ( + expectedDataWidth = 6 + expectedHeaderWidth = 9 + ) + for _, line := range lines { + columns := strings.Fields(line) + width := len(columns) + + if width == expectedHeaderWidth || width == 0 { + continue + } else if width == expectedDataWidth { + entry, err := parseARPEntry(columns) + if err != nil { + return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err) + } + entries = append(entries, entry) + } else { + return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth) + } + + } + + return entries, err +} + +func parseARPEntry(columns []string) (ARPEntry, error) { + ip := net.ParseIP(columns[0]) + mac := net.HardwareAddr(columns[3]) + + entry := ARPEntry{ + IPAddr: ip, + HWAddr: mac, + Device: columns[5], + } + + return entry, nil +} diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go index d3a8268078..10bd067a0a 100644 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -31,19 +31,9 @@ type BuddyInfo struct { Sizes []float64 } -// NewBuddyInfo reads the buddyinfo statistics. -func NewBuddyInfo() ([]BuddyInfo, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return nil, err - } - - return fs.NewBuddyInfo() -} - -// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. -func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { - file, err := os.Open(fs.Path("buddyinfo")) +// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. +func (fs FS) BuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.proc.Path("buddyinfo")) if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go new file mode 100644 index 0000000000..2e02215528 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// CPUInfo contains general information about a system CPU found in /proc/cpuinfo +type CPUInfo struct { + Processor uint + VendorID string + CPUFamily string + Model string + ModelName string + Stepping string + Microcode string + CPUMHz float64 + CacheSize string + PhysicalID string + Siblings uint + CoreID string + CPUCores uint + APICID string + InitialAPICID string + FPU string + FPUException string + CPUIDLevel uint + WP string + Flags []string + Bugs []string + BogoMips float64 + CLFlushSize uint + CacheAlignment uint + AddressSizes string + PowerManagement string +} + +// CPUInfo returns information about current system CPUs. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) CPUInfo() ([]CPUInfo, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo")) + if err != nil { + return nil, err + } + return parseCPUInfo(data) +} + +// parseCPUInfo parses data from /proc/cpuinfo +func parseCPUInfo(info []byte) ([]CPUInfo, error) { + cpuinfo := []CPUInfo{} + i := -1 + scanner := bufio.NewScanner(bytes.NewReader(info)) + for scanner.Scan() { + line := scanner.Text() + if strings.TrimSpace(line) == "" { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "vendor_id": + cpuinfo[i].VendorID = field[1] + case "cpu family": + cpuinfo[i].CPUFamily = field[1] + case "model": + cpuinfo[i].Model = field[1] + case "model name": + cpuinfo[i].ModelName = field[1] + case "stepping": + cpuinfo[i].Stepping = field[1] + case "microcode": + cpuinfo[i].Microcode = field[1] + case "cpu MHz": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + case "cache size": + cpuinfo[i].CacheSize = field[1] + case "physical id": + cpuinfo[i].PhysicalID = field[1] + case "siblings": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Siblings = uint(v) + case "core id": + cpuinfo[i].CoreID = field[1] + case "cpu cores": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUCores = uint(v) + case "apicid": + cpuinfo[i].APICID = field[1] + case "initial apicid": + cpuinfo[i].InitialAPICID = field[1] + case "fpu": + cpuinfo[i].FPU = field[1] + case "fpu_exception": + cpuinfo[i].FPUException = field[1] + case "cpuid level": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUIDLevel = uint(v) + case "wp": + cpuinfo[i].WP = field[1] + case "flags": + cpuinfo[i].Flags = strings.Fields(field[1]) + case "bugs": + cpuinfo[i].Bugs = strings.Fields(field[1]) + case "bogomips": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + case "clflush size": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CLFlushSize = uint(v) + case "cache_alignment": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CacheAlignment = uint(v) + case "address sizes": + cpuinfo[i].AddressSizes = field[1] + case "power management": + cpuinfo[i].PowerManagement = field[1] + } + } + return cpuinfo, nil + +} diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go new file mode 100644 index 0000000000..a958933757 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -0,0 +1,153 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Crypto holds info parsed from /proc/crypto. +type Crypto struct { + Alignmask *uint64 + Async bool + Blocksize *uint64 + Chunksize *uint64 + Ctxsize *uint64 + Digestsize *uint64 + Driver string + Geniv string + Internal string + Ivsize *uint64 + Maxauthsize *uint64 + MaxKeysize *uint64 + MinKeysize *uint64 + Module string + Name string + Priority *int64 + Refcnt *int64 + Seedsize *uint64 + Selftest string + Type string + Walksize *uint64 +} + +// Crypto parses an crypto-file (/proc/crypto) and returns a slice of +// structs containing the relevant info. More information available here: +// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html +func (fs FS) Crypto() ([]Crypto, error) { + path := fs.proc.Path("crypto") + b, err := util.ReadFileNoStat(path) + if err != nil { + return nil, fmt.Errorf("error reading crypto %s: %s", path, err) + } + + crypto, err := parseCrypto(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("error parsing crypto %s: %s", path, err) + } + + return crypto, nil +} + +// parseCrypto parses a /proc/crypto stream into Crypto elements. +func parseCrypto(r io.Reader) ([]Crypto, error) { + var out []Crypto + + s := bufio.NewScanner(r) + for s.Scan() { + text := s.Text() + switch { + case strings.HasPrefix(text, "name"): + // Each crypto element begins with its name. + out = append(out, Crypto{}) + case text == "": + continue + } + + kv := strings.Split(text, ":") + if len(kv) != 2 { + return nil, fmt.Errorf("malformed crypto line: %q", text) + } + + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) + + // Parse the key/value pair into the currently focused element. + c := &out[len(out)-1] + if err := c.parseKV(k, v); err != nil { + return nil, err + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + return out, nil +} + +// parseKV parses a key/value pair into the appropriate field of c. +func (c *Crypto) parseKV(k, v string) error { + vp := util.NewValueParser(v) + + switch k { + case "async": + // Interpret literal yes as true. + c.Async = v == "yes" + case "blocksize": + c.Blocksize = vp.PUInt64() + case "chunksize": + c.Chunksize = vp.PUInt64() + case "digestsize": + c.Digestsize = vp.PUInt64() + case "driver": + c.Driver = v + case "geniv": + c.Geniv = v + case "internal": + c.Internal = v + case "ivsize": + c.Ivsize = vp.PUInt64() + case "maxauthsize": + c.Maxauthsize = vp.PUInt64() + case "max keysize": + c.MaxKeysize = vp.PUInt64() + case "min keysize": + c.MinKeysize = vp.PUInt64() + case "module": + c.Module = v + case "name": + c.Name = v + case "priority": + c.Priority = vp.PInt64() + case "refcnt": + c.Refcnt = vp.PInt64() + case "seedsize": + c.Seedsize = vp.PUInt64() + case "selftest": + c.Selftest = v + case "type": + c.Type = v + case "walksize": + c.Walksize = vp.PUInt64() + } + + return vp.Err() +} diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index b6c6b2ce1f..0102ab0fd8 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -14,69 +14,30 @@ package procfs import ( - "fmt" - "os" - "path" - - "github.com/prometheus/procfs/nfs" - "github.com/prometheus/procfs/xfs" + "github.com/prometheus/procfs/internal/fs" ) -// FS represents the pseudo-filesystem proc, which provides an interface to +// FS represents the pseudo-filesystem sys, which provides an interface to // kernel data structures. -type FS string +type FS struct { + proc fs.FS +} // DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = "/proc" +const DefaultMountPoint = fs.DefaultProcMountPoint -// NewFS returns a new FS mounted under the given mountPoint. It will error -// if the mount point can't be read. +// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. +// It will error if the mount point directory can't be read or is a file. +func NewDefaultFS() (FS, error) { + return NewFS(DefaultMountPoint) +} + +// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error +// if the mount point directory can't be read or is a file. func NewFS(mountPoint string) (FS, error) { - info, err := os.Stat(mountPoint) + fs, err := fs.NewFS(mountPoint) if err != nil { - return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + return FS{}, err } - if !info.IsDir() { - return "", fmt.Errorf("mount point %s is not a directory", mountPoint) - } - - return FS(mountPoint), nil -} - -// Path returns the path of the given subsystem relative to the procfs root. -func (fs FS) Path(p ...string) string { - return path.Join(append([]string{string(fs)}, p...)...) -} - -// XFSStats retrieves XFS filesystem runtime statistics. -func (fs FS) XFSStats() (*xfs.Stats, error) { - f, err := os.Open(fs.Path("fs/xfs/stat")) - if err != nil { - return nil, err - } - defer f.Close() - - return xfs.ParseStats(f) -} - -// NFSClientRPCStats retrieves NFS client RPC statistics. -func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) { - f, err := os.Open(fs.Path("net/rpc/nfs")) - if err != nil { - return nil, err - } - defer f.Close() - - return nfs.ParseClientRPCStats(f) -} - -// NFSdServerRPCStats retrieves NFS daemon RPC statistics. -func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) { - f, err := os.Open(fs.Path("net/rpc/nfsd")) - if err != nil { - return nil, err - } - defer f.Close() - - return nfs.ParseServerRPCStats(f) + return FS{fs}, nil } diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod new file mode 100644 index 0000000000..ded48253cd --- /dev/null +++ b/vendor/github.com/prometheus/procfs/go.mod @@ -0,0 +1,9 @@ +module github.com/prometheus/procfs + +go 1.12 + +require ( + github.com/google/go-cmp v0.3.1 + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e + golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e +) diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go new file mode 100644 index 0000000000..565e89e42c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -0,0 +1,55 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "os" + "path/filepath" +) + +const ( + // DefaultProcMountPoint is the common mount point of the proc filesystem. + DefaultProcMountPoint = "/proc" + + // DefaultSysMountPoint is the common mount point of the sys filesystem. + DefaultSysMountPoint = "/sys" + + // DefaultConfigfsMountPoint is the common mount point of the configfs + DefaultConfigfsMountPoint = "/sys/kernel/config" +) + +// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an +// interface to kernel data structures. +type FS string + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path appends the given path elements to the filesystem path, adding separators +// as necessary. +func (fs FS) Path(p ...string) string { + return filepath.Join(append([]string{string(fs)}, p...)...) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index 1ad21c91a3..755591d9a5 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -13,7 +13,11 @@ package util -import "strconv" +import ( + "io/ioutil" + "strconv" + "strings" +) // ParseUint32s parses a slice of strings into a slice of uint32s. func ParseUint32s(ss []string) ([]uint32, error) { @@ -44,3 +48,41 @@ func ParseUint64s(ss []string) ([]uint64, error) { return us, nil } + +// ParsePInt64s parses a slice of strings into a slice of int64 pointers. +func ParsePInt64s(ss []string) ([]*int64, error) { + us := make([]*int64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, &u) + } + + return us, nil +} + +// ReadUintFromFile reads a file and attempts to parse a uint64 from it. +func ReadUintFromFile(path string) (uint64, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} + +// ParseBool parses a string into a boolean pointer. +func ParseBool(b string) *bool { + var truth bool + switch b { + case "enabled": + truth = true + case "disabled": + truth = false + default: + return nil + } + return &truth +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go new file mode 100644 index 0000000000..8051161b2a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go @@ -0,0 +1,38 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "io" + "io/ioutil" + "os" +) + +// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file. +// This is similar to ioutil.ReadFile but without the call to os.Stat, because +// many files in /proc and /sys report incorrect file sizes (either 0 or 4096). +// Reads a max file size of 512kB. For files larger than this, a scanner +// should be used. +func ReadFileNoStat(filename string) ([]byte, error) { + const maxBufferSize = 1024 * 512 + + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + + reader := io.LimitReader(f, maxBufferSize) + return ioutil.ReadAll(reader) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go new file mode 100644 index 0000000000..c07de0b6c9 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -0,0 +1,48 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux,!appengine + +package util + +import ( + "bytes" + "os" + "syscall" +) + +// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. +// https://github.com/prometheus/node_exporter/pull/728/files +// +// Note that this function will not read files larger than 128 bytes. +func SysReadFile(file string) (string, error) { + f, err := os.Open(file) + if err != nil { + return "", err + } + defer f.Close() + + // On some machines, hwmon drivers are broken and return EAGAIN. This causes + // Go's ioutil.ReadFile implementation to poll forever. + // + // Since we either want to read data or bail immediately, do the simplest + // possible read using syscall directly. + const sysFileBufferSize = 128 + b := make([]byte, sysFileBufferSize) + n, err := syscall.Read(int(f.Fd()), b) + if err != nil { + return "", err + } + + return string(bytes.TrimSpace(b[:n])), nil +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go new file mode 100644 index 0000000000..bd55b45377 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go @@ -0,0 +1,26 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux,appengine !linux + +package util + +import ( + "fmt" +) + +// SysReadFile is here implemented as a noop for builds that do not support +// the read syscall. For example Windows, or Linux on Google App Engine. +func SysReadFile(file string) (string, error) { + return "", fmt.Errorf("not supported on this platform") +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go new file mode 100644 index 0000000000..fe2355d3c6 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go @@ -0,0 +1,91 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "strconv" +) + +// TODO(mdlayher): util packages are an anti-pattern and this should be moved +// somewhere else that is more focused in the future. + +// A ValueParser enables parsing a single string into a variety of data types +// in a concise and safe way. The Err method must be invoked after invoking +// any other methods to ensure a value was successfully parsed. +type ValueParser struct { + v string + err error +} + +// NewValueParser creates a ValueParser using the input string. +func NewValueParser(v string) *ValueParser { + return &ValueParser{v: v} +} + +// Int interprets the underlying value as an int and returns that value. +func (vp *ValueParser) Int() int { return int(vp.int64()) } + +// PInt64 interprets the underlying value as an int64 and returns a pointer to +// that value. +func (vp *ValueParser) PInt64() *int64 { + if vp.err != nil { + return nil + } + + v := vp.int64() + return &v +} + +// int64 interprets the underlying value as an int64 and returns that value. +// TODO: export if/when necessary. +func (vp *ValueParser) int64() int64 { + if vp.err != nil { + return 0 + } + + // A base value of zero makes ParseInt infer the correct base using the + // string's prefix, if any. + const base = 0 + v, err := strconv.ParseInt(vp.v, base, 64) + if err != nil { + vp.err = err + return 0 + } + + return v +} + +// PUInt64 interprets the underlying value as an uint64 and returns a pointer to +// that value. +func (vp *ValueParser) PUInt64() *uint64 { + if vp.err != nil { + return nil + } + + // A base value of zero makes ParseInt infer the correct base using the + // string's prefix, if any. + const base = 0 + v, err := strconv.ParseUint(vp.v, base, 64) + if err != nil { + vp.err = err + return nil + } + + return &v +} + +// Err returns the last error, if any, encountered by the ValueParser. +func (vp *ValueParser) Err() error { + return vp.err +} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index e36d4a3bd0..89e447746c 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -15,6 +15,7 @@ package procfs import ( "bufio" + "bytes" "encoding/hex" "errors" "fmt" @@ -24,6 +25,8 @@ import ( "os" "strconv" "strings" + + "github.com/prometheus/procfs/internal/util" ) // IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. @@ -62,29 +65,18 @@ type IPVSBackendStatus struct { Weight uint64 } -// NewIPVSStats reads the IPVS statistics. -func NewIPVSStats() (IPVSStats, error) { - fs, err := NewFS(DefaultMountPoint) +// IPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) IPVSStats() (IPVSStats, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats")) if err != nil { return IPVSStats{}, err } - return fs.NewIPVSStats() -} - -// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. -func (fs FS) NewIPVSStats() (IPVSStats, error) { - file, err := os.Open(fs.Path("net/ip_vs_stats")) - if err != nil { - return IPVSStats{}, err - } - defer file.Close() - - return parseIPVSStats(file) + return parseIPVSStats(bytes.NewReader(data)) } // parseIPVSStats performs the actual parsing of `ip_vs_stats`. -func parseIPVSStats(file io.Reader) (IPVSStats, error) { +func parseIPVSStats(r io.Reader) (IPVSStats, error) { var ( statContent []byte statLines []string @@ -92,7 +84,7 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) { stats IPVSStats ) - statContent, err := ioutil.ReadAll(file) + statContent, err := ioutil.ReadAll(r) if err != nil { return IPVSStats{}, err } @@ -131,19 +123,9 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) { return stats, nil } -// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. -func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return []IPVSBackendStatus{}, err - } - - return fs.NewIPVSBackendStatus() -} - -// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. -func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { - file, err := os.Open(fs.Path("net/ip_vs")) +// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.proc.Path("net/ip_vs")) if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go new file mode 100644 index 0000000000..00bbe14417 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -0,0 +1,62 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// LoadAvg represents an entry in /proc/loadavg +type LoadAvg struct { + Load1 float64 + Load5 float64 + Load15 float64 +} + +// LoadAvg returns loadavg from /proc. +func (fs FS) LoadAvg() (*LoadAvg, error) { + path := fs.proc.Path("loadavg") + + data, err := util.ReadFileNoStat(path) + if err != nil { + return nil, err + } + return parseLoad(data) +} + +// Parse /proc loadavg and return 1m, 5m and 15m. +func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { + loads := make([]float64, 3) + parts := strings.Fields(string(loadavgBytes)) + if len(parts) < 3 { + return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %s", string(loadavgBytes)) + } + + var err error + for i, load := range parts[0:3] { + loads[i], err = strconv.ParseFloat(load, 64) + if err != nil { + return nil, fmt.Errorf("could not parse load '%s': %s", load, err) + } + } + return &LoadAvg{ + Load1: loads[0], + Load5: loads[1], + Load15: loads[2], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index 9dc19583d8..2af3ada180 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -22,8 +22,8 @@ import ( ) var ( - statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) - buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) + statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) + recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`) ) // MDStat holds info parsed from /proc/mdstat. @@ -34,117 +34,160 @@ type MDStat struct { ActivityState string // Number of active disks. DisksActive int64 - // Total number of disks the device consists of. + // Total number of disks the device requires. DisksTotal int64 + // Number of failed disks. + DisksFailed int64 + // Spare disks in the device. + DisksSpare int64 // Number of blocks the device holds. BlocksTotal int64 // Number of blocks on the device that are in sync. BlocksSynced int64 } -// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. -func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { - mdStatusFilePath := fs.Path("mdstat") - content, err := ioutil.ReadFile(mdStatusFilePath) +// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of +// structs containing the relevant info. More information available here: +// https://raid.wiki.kernel.org/index.php/Mdstat +func (fs FS) MDStat() ([]MDStat, error) { + data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) if err != nil { - return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) } + mdstat, err := parseMDStat(data) + if err != nil { + return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) + } + return mdstat, nil +} - mdStates := []MDStat{} - lines := strings.Split(string(content), "\n") - for i, l := range lines { - if l == "" { - continue - } - if l[0] == ' ' { - continue - } - if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { +// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of +// structs containing the relevant info. +func parseMDStat(mdStatData []byte) ([]MDStat, error) { + mdStats := []MDStat{} + lines := strings.Split(string(mdStatData), "\n") + + for i, line := range lines { + if strings.TrimSpace(line) == "" || line[0] == ' ' || + strings.HasPrefix(line, "Personalities") || + strings.HasPrefix(line, "unused") { continue } - mainLine := strings.Split(l, " ") - if len(mainLine) < 3 { - return mdStates, fmt.Errorf("error parsing mdline: %s", l) + deviceFields := strings.Fields(line) + if len(deviceFields) < 3 { + return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) } - mdName := mainLine[0] - activityState := mainLine[2] + mdName := deviceFields[0] // mdx + state := deviceFields[2] // active or inactive if len(lines) <= i+3 { - return mdStates, fmt.Errorf( - "error parsing %s: too few lines for md device %s", - mdStatusFilePath, + return nil, fmt.Errorf( + "error parsing %s: too few lines for md device", mdName, ) } - active, total, size, err := evalStatusline(lines[i+1]) + // Failed disks have the suffix (F) & Spare disks have the suffix (S). + fail := int64(strings.Count(line, "(F)")) + spare := int64(strings.Count(line, "(S)")) + active, total, size, err := evalStatusLine(lines[i], lines[i+1]) + if err != nil { - return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + return nil, fmt.Errorf("error parsing md device lines: %s", err) } - // j is the line number of the syncing-line. - j := i + 2 + syncLineIdx := i + 2 if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line - j = i + 3 + syncLineIdx++ } // If device is syncing at the moment, get the number of currently // synced bytes, otherwise that number equals the size of the device. syncedBlocks := size - if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { - syncedBlocks, err = evalBuildline(lines[j]) - if err != nil { - return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + recovering := strings.Contains(lines[syncLineIdx], "recovery") + resyncing := strings.Contains(lines[syncLineIdx], "resync") + + // Append recovery and resyncing state info. + if recovering || resyncing { + if recovering { + state = "recovering" + } else { + state = "resyncing" + } + + // Handle case when resync=PENDING or resync=DELAYED. + if strings.Contains(lines[syncLineIdx], "PENDING") || + strings.Contains(lines[syncLineIdx], "DELAYED") { + syncedBlocks = 0 + } else { + syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx]) + if err != nil { + return nil, fmt.Errorf("error parsing sync line in md device %s: %s", mdName, err) + } } } - mdStates = append(mdStates, MDStat{ + mdStats = append(mdStats, MDStat{ Name: mdName, - ActivityState: activityState, + ActivityState: state, DisksActive: active, + DisksFailed: fail, + DisksSpare: spare, DisksTotal: total, BlocksTotal: size, BlocksSynced: syncedBlocks, }) } - return mdStates, nil + return mdStats, nil } -func evalStatusline(statusline string) (active, total, size int64, err error) { - matches := statuslineRE.FindStringSubmatch(statusline) - if len(matches) != 4 { - return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) +func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) { + + sizeStr := strings.Fields(statusLine)[0] + size, err = strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) } - size, err = strconv.ParseInt(matches[1], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { + // In the device deviceLine, only disks have a number associated with them in []. + total = int64(strings.Count(deviceLine, "[")) + return total, total, size, nil + } + + if strings.Contains(deviceLine, "inactive") { + return 0, 0, size, nil + } + + matches := statusLineRE.FindStringSubmatch(statusLine) + if len(matches) != 4 { + return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) } total, err = strconv.ParseInt(matches[2], 10, 64) if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) } active, err = strconv.ParseInt(matches[3], 10, 64) if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) } return active, total, size, nil } -func evalBuildline(buildline string) (syncedBlocks int64, err error) { - matches := buildlineRE.FindStringSubmatch(buildline) +func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) { + matches := recoveryLineRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return 0, fmt.Errorf("unexpected buildline: %s", buildline) + return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) } syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) if err != nil { - return 0, fmt.Errorf("%s in buildline: %s", err, buildline) + return 0, fmt.Errorf("%s in recoveryLine: %s", err, recoveryLine) } return syncedBlocks, nil diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go new file mode 100644 index 0000000000..50dab4bcd5 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -0,0 +1,277 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Meminfo represents memory statistics. +type Meminfo struct { + // Total usable ram (i.e. physical ram minus a few reserved + // bits and the kernel binary code) + MemTotal uint64 + // The sum of LowFree+HighFree + MemFree uint64 + // An estimate of how much memory is available for starting + // new applications, without swapping. Calculated from + // MemFree, SReclaimable, the size of the file LRU lists, and + // the low watermarks in each zone. The estimate takes into + // account that the system needs some page cache to function + // well, and that not all reclaimable slab will be + // reclaimable, due to items being in use. The impact of those + // factors will vary from system to system. + MemAvailable uint64 + // Relatively temporary storage for raw disk blocks shouldn't + // get tremendously large (20MB or so) + Buffers uint64 + Cached uint64 + // Memory that once was swapped out, is swapped back in but + // still also is in the swapfile (if memory is needed it + // doesn't need to be swapped out AGAIN because it is already + // in the swapfile. This saves I/O) + SwapCached uint64 + // Memory that has been used more recently and usually not + // reclaimed unless absolutely necessary. + Active uint64 + // Memory which has been less recently used. It is more + // eligible to be reclaimed for other purposes + Inactive uint64 + ActiveAnon uint64 + InactiveAnon uint64 + ActiveFile uint64 + InactiveFile uint64 + Unevictable uint64 + Mlocked uint64 + // total amount of swap space available + SwapTotal uint64 + // Memory which has been evicted from RAM, and is temporarily + // on the disk + SwapFree uint64 + // Memory which is waiting to get written back to the disk + Dirty uint64 + // Memory which is actively being written back to the disk + Writeback uint64 + // Non-file backed pages mapped into userspace page tables + AnonPages uint64 + // files which have been mapped, such as libraries + Mapped uint64 + Shmem uint64 + // in-kernel data structures cache + Slab uint64 + // Part of Slab, that might be reclaimed, such as caches + SReclaimable uint64 + // Part of Slab, that cannot be reclaimed on memory pressure + SUnreclaim uint64 + KernelStack uint64 + // amount of memory dedicated to the lowest level of page + // tables. + PageTables uint64 + // NFS pages sent to the server, but not yet committed to + // stable storage + NFSUnstable uint64 + // Memory used for block device "bounce buffers" + Bounce uint64 + // Memory used by FUSE for temporary writeback buffers + WritebackTmp uint64 + // Based on the overcommit ratio ('vm.overcommit_ratio'), + // this is the total amount of memory currently available to + // be allocated on the system. This limit is only adhered to + // if strict overcommit accounting is enabled (mode 2 in + // 'vm.overcommit_memory'). + // The CommitLimit is calculated with the following formula: + // CommitLimit = ([total RAM pages] - [total huge TLB pages]) * + // overcommit_ratio / 100 + [total swap pages] + // For example, on a system with 1G of physical RAM and 7G + // of swap with a `vm.overcommit_ratio` of 30 it would + // yield a CommitLimit of 7.3G. + // For more details, see the memory overcommit documentation + // in vm/overcommit-accounting. + CommitLimit uint64 + // The amount of memory presently allocated on the system. + // The committed memory is a sum of all of the memory which + // has been allocated by processes, even if it has not been + // "used" by them as of yet. A process which malloc()'s 1G + // of memory, but only touches 300M of it will show up as + // using 1G. This 1G is memory which has been "committed" to + // by the VM and can be used at any time by the allocating + // application. With strict overcommit enabled on the system + // (mode 2 in 'vm.overcommit_memory'),allocations which would + // exceed the CommitLimit (detailed above) will not be permitted. + // This is useful if one needs to guarantee that processes will + // not fail due to lack of memory once that memory has been + // successfully allocated. + CommittedAS uint64 + // total size of vmalloc memory area + VmallocTotal uint64 + // amount of vmalloc area which is used + VmallocUsed uint64 + // largest contiguous block of vmalloc area which is free + VmallocChunk uint64 + HardwareCorrupted uint64 + AnonHugePages uint64 + ShmemHugePages uint64 + ShmemPmdMapped uint64 + CmaTotal uint64 + CmaFree uint64 + HugePagesTotal uint64 + HugePagesFree uint64 + HugePagesRsvd uint64 + HugePagesSurp uint64 + Hugepagesize uint64 + DirectMap4k uint64 + DirectMap2M uint64 + DirectMap1G uint64 +} + +// Meminfo returns an information about current kernel/system memory statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) Meminfo() (Meminfo, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("meminfo")) + if err != nil { + return Meminfo{}, err + } + + m, err := parseMemInfo(bytes.NewReader(b)) + if err != nil { + return Meminfo{}, fmt.Errorf("failed to parse meminfo: %v", err) + } + + return *m, nil +} + +func parseMemInfo(r io.Reader) (*Meminfo, error) { + var m Meminfo + s := bufio.NewScanner(r) + for s.Scan() { + // Each line has at least a name and value; we ignore the unit. + fields := strings.Fields(s.Text()) + if len(fields) < 2 { + return nil, fmt.Errorf("malformed meminfo line: %q", s.Text()) + } + + v, err := strconv.ParseUint(fields[1], 0, 64) + if err != nil { + return nil, err + } + + switch fields[0] { + case "MemTotal:": + m.MemTotal = v + case "MemFree:": + m.MemFree = v + case "MemAvailable:": + m.MemAvailable = v + case "Buffers:": + m.Buffers = v + case "Cached:": + m.Cached = v + case "SwapCached:": + m.SwapCached = v + case "Active:": + m.Active = v + case "Inactive:": + m.Inactive = v + case "Active(anon):": + m.ActiveAnon = v + case "Inactive(anon):": + m.InactiveAnon = v + case "Active(file):": + m.ActiveFile = v + case "Inactive(file):": + m.InactiveFile = v + case "Unevictable:": + m.Unevictable = v + case "Mlocked:": + m.Mlocked = v + case "SwapTotal:": + m.SwapTotal = v + case "SwapFree:": + m.SwapFree = v + case "Dirty:": + m.Dirty = v + case "Writeback:": + m.Writeback = v + case "AnonPages:": + m.AnonPages = v + case "Mapped:": + m.Mapped = v + case "Shmem:": + m.Shmem = v + case "Slab:": + m.Slab = v + case "SReclaimable:": + m.SReclaimable = v + case "SUnreclaim:": + m.SUnreclaim = v + case "KernelStack:": + m.KernelStack = v + case "PageTables:": + m.PageTables = v + case "NFS_Unstable:": + m.NFSUnstable = v + case "Bounce:": + m.Bounce = v + case "WritebackTmp:": + m.WritebackTmp = v + case "CommitLimit:": + m.CommitLimit = v + case "Committed_AS:": + m.CommittedAS = v + case "VmallocTotal:": + m.VmallocTotal = v + case "VmallocUsed:": + m.VmallocUsed = v + case "VmallocChunk:": + m.VmallocChunk = v + case "HardwareCorrupted:": + m.HardwareCorrupted = v + case "AnonHugePages:": + m.AnonHugePages = v + case "ShmemHugePages:": + m.ShmemHugePages = v + case "ShmemPmdMapped:": + m.ShmemPmdMapped = v + case "CmaTotal:": + m.CmaTotal = v + case "CmaFree:": + m.CmaFree = v + case "HugePages_Total:": + m.HugePagesTotal = v + case "HugePages_Free:": + m.HugePagesFree = v + case "HugePages_Rsvd:": + m.HugePagesRsvd = v + case "HugePages_Surp:": + m.HugePagesSurp = v + case "Hugepagesize:": + m.Hugepagesize = v + case "DirectMap4k:": + m.DirectMap4k = v + case "DirectMap2M:": + m.DirectMap2M = v + case "DirectMap1G:": + m.DirectMap1G = v + } + } + + return &m, nil +} diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go new file mode 100644 index 0000000000..9471136101 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -0,0 +1,180 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A MountInfo is a type that describes the details, options +// for each mount, parsed from /proc/self/mountinfo. +// The fields described in each entry of /proc/self/mountinfo +// is described in the following man page. +// http://man7.org/linux/man-pages/man5/proc.5.html +type MountInfo struct { + // Unique ID for the mount + MountID int + // The ID of the parent mount + ParentID int + // The value of `st_dev` for the files on this FS + MajorMinorVer string + // The pathname of the directory in the FS that forms + // the root for this mount + Root string + // The pathname of the mount point relative to the root + MountPoint string + // Mount options + Options map[string]string + // Zero or more optional fields + OptionalFields map[string]string + // The Filesystem type + FSType string + // FS specific information or "none" + Source string + // Superblock options + SuperOptions map[string]string +} + +// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs. +func parseMountInfo(info []byte) ([]*MountInfo, error) { + mounts := []*MountInfo{} + scanner := bufio.NewScanner(bytes.NewReader(info)) + for scanner.Scan() { + mountString := scanner.Text() + parsedMounts, err := parseMountInfoString(mountString) + if err != nil { + return nil, err + } + mounts = append(mounts, parsedMounts) + } + + err := scanner.Err() + return mounts, err +} + +// Parses a mountinfo file line, and converts it to a MountInfo struct. +// An important check here is to see if the hyphen separator, as if it does not exist, +// it means that the line is malformed. +func parseMountInfoString(mountString string) (*MountInfo, error) { + var err error + + mountInfo := strings.Split(mountString, " ") + mountInfoLength := len(mountInfo) + if mountInfoLength < 11 { + return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) + } + + if mountInfo[mountInfoLength-4] != "-" { + return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4]) + } + + mount := &MountInfo{ + MajorMinorVer: mountInfo[2], + Root: mountInfo[3], + MountPoint: mountInfo[4], + Options: mountOptionsParser(mountInfo[5]), + OptionalFields: nil, + FSType: mountInfo[mountInfoLength-3], + Source: mountInfo[mountInfoLength-2], + SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]), + } + + mount.MountID, err = strconv.Atoi(mountInfo[0]) + if err != nil { + return nil, fmt.Errorf("failed to parse mount ID") + } + mount.ParentID, err = strconv.Atoi(mountInfo[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse parent ID") + } + // Has optional fields, which is a space separated list of values. + // Example: shared:2 master:7 + if mountInfo[6] != "" { + mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) + if err != nil { + return nil, err + } + } + return mount, nil +} + +// mountOptionsIsValidField checks a string against a valid list of optional fields keys. +func mountOptionsIsValidField(s string) bool { + switch s { + case + "shared", + "master", + "propagate_from", + "unbindable": + return true + } + return false +} + +// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings. +func mountOptionsParseOptionalFields(o []string) (map[string]string, error) { + optionalFields := make(map[string]string) + for _, field := range o { + optionSplit := strings.SplitN(field, ":", 2) + value := "" + if len(optionSplit) == 2 { + value = optionSplit[1] + } + if mountOptionsIsValidField(optionSplit[0]) { + optionalFields[optionSplit[0]] = value + } + } + return optionalFields, nil +} + +// Parses the mount options, superblock options. +func mountOptionsParser(mountOptions string) map[string]string { + opts := make(map[string]string) + options := strings.Split(mountOptions, ",") + for _, opt := range options { + splitOption := strings.Split(opt, "=") + if len(splitOption) < 2 { + key := splitOption[0] + opts[key] = "" + } else { + key, value := splitOption[0], splitOption[1] + opts[key] = value + } + } + return opts +} + +// Retrieves mountinfo information from `/proc/self/mountinfo`. +func GetMounts() ([]*MountInfo, error) { + data, err := util.ReadFileNoStat("/proc/self/mountinfo") + if err != nil { + return nil, err + } + return parseMountInfo(data) +} + +// Retrieves mountinfo information from a processes' `/proc//mountinfo`. +func GetProcMounts(pid int) ([]*MountInfo, error) { + data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + return parseMountInfo(data) +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index e95ddbc67c..35b2ef3513 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -39,8 +39,11 @@ const ( statVersion10 = "1.0" statVersion11 = "1.1" - fieldTransport10Len = 10 - fieldTransport11Len = 13 + fieldTransport10TCPLen = 10 + fieldTransport10UDPLen = 7 + + fieldTransport11TCPLen = 13 + fieldTransport11UDPLen = 10 ) // A Mount is a device mount parsed from /proc/[pid]/mountstats. @@ -66,6 +69,8 @@ type MountStats interface { type MountStatsNFS struct { // The version of statistics provided. StatVersion string + // The mount options of the NFS mount. + Opts map[string]string // The age of the NFS mount. Age time.Duration // Statistics related to byte counters for various operations. @@ -176,16 +181,18 @@ type NFSOperationStats struct { // Number of bytes received for this operation, including RPC headers and payload. BytesReceived uint64 // Duration all requests spent queued for transmission before they were sent. - CumulativeQueueTime time.Duration + CumulativeQueueMilliseconds uint64 // Duration it took to get a reply back after the request was transmitted. - CumulativeTotalResponseTime time.Duration + CumulativeTotalResponseMilliseconds uint64 // Duration from when a request was enqueued to when it was completely handled. - CumulativeTotalRequestTime time.Duration + CumulativeTotalRequestMilliseconds uint64 } // A NFSTransportStats contains statistics for the NFS mount RPC requests and // responses. type NFSTransportStats struct { + // The transport protocol used for the NFS mount. + Protocol string // The local port used for the NFS mount. Port uint64 // Number of times the client has had to establish a connection from scratch @@ -197,7 +204,7 @@ type NFSTransportStats struct { // spent waiting for connections to the server to be established. ConnectIdleTime uint64 // Duration since the NFS mount last saw any RPC traffic. - IdleTime time.Duration + IdleTimeSeconds uint64 // Number of RPC requests for this mount sent to the NFS server. Sends uint64 // Number of RPC responses for this mount received from the NFS server. @@ -312,6 +319,7 @@ func parseMount(ss []string) (*Mount, error) { func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { // Field indicators for parsing specific types of data const ( + fieldOpts = "opts:" fieldAge = "age:" fieldBytes = "bytes:" fieldEvents = "events:" @@ -333,6 +341,18 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e } switch ss[0] { + case fieldOpts: + if stats.Opts == nil { + stats.Opts = map[string]string{} + } + for _, opt := range strings.Split(ss[1], ",") { + split := strings.Split(opt, "=") + if len(split) == 2 { + stats.Opts[split[0]] = split[1] + } else { + stats.Opts[opt] = "" + } + } case fieldAge: // Age integer is in seconds d, err := time.ParseDuration(ss[1] + "s") @@ -360,7 +380,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) } - tstats, err := parseNFSTransportStats(ss[2:], statVersion) + tstats, err := parseNFSTransportStats(ss[1:], statVersion) if err != nil { return nil, err } @@ -504,15 +524,15 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { } ops = append(ops, NFSOperationStats{ - Operation: strings.TrimSuffix(ss[0], ":"), - Requests: ns[0], - Transmissions: ns[1], - MajorTimeouts: ns[2], - BytesSent: ns[3], - BytesReceived: ns[4], - CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, - CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, - CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, + Operation: strings.TrimSuffix(ss[0], ":"), + Requests: ns[0], + Transmissions: ns[1], + MajorTimeouts: ns[2], + BytesSent: ns[3], + BytesReceived: ns[4], + CumulativeQueueMilliseconds: ns[5], + CumulativeTotalResponseMilliseconds: ns[6], + CumulativeTotalRequestMilliseconds: ns[7], }) } @@ -522,13 +542,33 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { // parseNFSTransportStats parses a NFSTransportStats line using an input set of // integer fields matched to a specific stats version. func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { + // Extract the protocol field. It is the only string value in the line + protocol := ss[0] + ss = ss[1:] + switch statVersion { case statVersion10: - if len(ss) != fieldTransport10Len { + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport10TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport10UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) } case statVersion11: - if len(ss) != fieldTransport11Len { + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport11TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport11UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) } default: @@ -536,12 +576,13 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay - // in a v1.0 response. + // in a v1.0 response. Since the stat length is bigger for TCP stats, we use + // the TCP length here. // // Note: slice length must be set to length of v1.1 stats to avoid a panic when // only v1.0 stats are present. // See: https://github.com/prometheus/node_exporter/issues/571. - ns := make([]uint64, fieldTransport11Len) + ns := make([]uint64, fieldTransport11TCPLen) for i, s := range ss { n, err := strconv.ParseUint(s, 10, 64) if err != nil { @@ -551,12 +592,23 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats ns[i] = n } + // The fields differ depending on the transport protocol (TCP or UDP) + // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt + // + // For the udp RPC transport there is no connection count, connect idle time, + // or idle time (fields #3, #4, and #5); all other fields are the same. So + // we set them to 0 here. + if protocol == "udp" { + ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) + } + return &NFSTransportStats{ + Protocol: protocol, Port: ns[0], Bind: ns[1], Connect: ns[2], ConnectIdleTime: ns[3], - IdleTime: time.Duration(ns[4]) * time.Second, + IdleTimeSeconds: ns[4], Sends: ns[5], Receives: ns[6], BadTransactionIDs: ns[7], diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go new file mode 100644 index 0000000000..1e27c83d50 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -0,0 +1,153 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A ConntrackStatEntry represents one line from net/stat/nf_conntrack +// and contains netfilter conntrack statistics at one CPU core +type ConntrackStatEntry struct { + Entries uint64 + Found uint64 + Invalid uint64 + Ignore uint64 + Insert uint64 + InsertFailed uint64 + Drop uint64 + EarlyDrop uint64 + SearchRestart uint64 +} + +// Retrieves netfilter's conntrack statistics, split by CPU cores +func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { + return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) +} + +// Parses a slice of ConntrackStatEntries from the given filepath +func readConntrackStat(path string) ([]ConntrackStatEntry, error) { + // This file is small and can be read with one syscall. + b, err := util.ReadFileNoStat(path) + if err != nil { + // Do not wrap this error so the caller can detect os.IsNotExist and + // similar conditions. + return nil, err + } + + stat, err := parseConntrackStat(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read conntrack stats from %q: %v", path, err) + } + + return stat, nil +} + +// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries +func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { + var entries []ConntrackStatEntry + + scanner := bufio.NewScanner(r) + scanner.Scan() + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + conntrackEntry, err := parseConntrackStatEntry(fields) + if err != nil { + return nil, err + } + entries = append(entries, *conntrackEntry) + } + + return entries, nil +} + +// Parses a ConntrackStatEntry from given array of fields +func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { + if len(fields) != 17 { + return nil, fmt.Errorf("invalid conntrackstat entry, missing fields") + } + entry := &ConntrackStatEntry{} + + entries, err := parseConntrackStatField(fields[0]) + if err != nil { + return nil, err + } + entry.Entries = entries + + found, err := parseConntrackStatField(fields[2]) + if err != nil { + return nil, err + } + entry.Found = found + + invalid, err := parseConntrackStatField(fields[4]) + if err != nil { + return nil, err + } + entry.Invalid = invalid + + ignore, err := parseConntrackStatField(fields[5]) + if err != nil { + return nil, err + } + entry.Ignore = ignore + + insert, err := parseConntrackStatField(fields[8]) + if err != nil { + return nil, err + } + entry.Insert = insert + + insertFailed, err := parseConntrackStatField(fields[9]) + if err != nil { + return nil, err + } + entry.InsertFailed = insertFailed + + drop, err := parseConntrackStatField(fields[10]) + if err != nil { + return nil, err + } + entry.Drop = drop + + earlyDrop, err := parseConntrackStatField(fields[11]) + if err != nil { + return nil, err + } + entry.EarlyDrop = earlyDrop + + searchRestart, err := parseConntrackStatField(fields[16]) + if err != nil { + return nil, err + } + entry.SearchRestart = searchRestart + + return entry, nil +} + +// Parses a uint64 from given hex in string +func parseConntrackStatField(field string) (uint64, error) { + val, err := strconv.ParseUint(field, 16, 64) + if err != nil { + return 0, fmt.Errorf("couldn't parse \"%s\" field: %s", field, err) + } + return val, err +} diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go index 3f2523371a..47a710befb 100644 --- a/vendor/github.com/prometheus/procfs/net_dev.go +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -47,23 +47,13 @@ type NetDevLine struct { // are interface names. type NetDev map[string]NetDevLine -// NewNetDev returns kernel/system statistics read from /proc/net/dev. -func NewNetDev() (NetDev, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return nil, err - } - - return fs.NewNetDev() +// NetDev returns kernel/system statistics read from /proc/net/dev. +func (fs FS) NetDev() (NetDev, error) { + return newNetDev(fs.proc.Path("net/dev")) } -// NewNetDev returns kernel/system statistics read from /proc/net/dev. -func (fs FS) NewNetDev() (NetDev, error) { - return newNetDev(fs.Path("net/dev")) -} - -// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev. -func (p Proc) NewNetDev() (NetDev, error) { +// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev. +func (p Proc) NetDev() (NetDev, error) { return newNetDev(p.path("net/dev")) } @@ -75,7 +65,7 @@ func newNetDev(file string) (NetDev, error) { } defer f.Close() - nd := NetDev{} + netDev := NetDev{} s := bufio.NewScanner(f) for n := 0; s.Scan(); n++ { // Skip the 2 header lines. @@ -83,20 +73,20 @@ func newNetDev(file string) (NetDev, error) { continue } - line, err := nd.parseLine(s.Text()) + line, err := netDev.parseLine(s.Text()) if err != nil { - return nd, err + return netDev, err } - nd[line.Name] = *line + netDev[line.Name] = *line } - return nd, s.Err() + return netDev, s.Err() } // parseLine parses a single line from the /proc/net/dev file. Header lines // must be filtered prior to calling this method. -func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { +func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { parts := strings.SplitN(rawLine, ":", 2) if len(parts) != 2 { return nil, errors.New("invalid net/dev line, missing colon") @@ -185,15 +175,14 @@ func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { // Total aggregates the values across interfaces and returns a new NetDevLine. // The Name field will be a sorted comma separated list of interface names. -func (nd NetDev) Total() NetDevLine { +func (netDev NetDev) Total() NetDevLine { total := NetDevLine{} - names := make([]string, 0, len(nd)) - for _, ifc := range nd { + names := make([]string, 0, len(netDev)) + for _, ifc := range netDev { names = append(names, ifc.Name) total.RxBytes += ifc.RxBytes total.RxPackets += ifc.RxPackets - total.RxPackets += ifc.RxPackets total.RxErrors += ifc.RxErrors total.RxDropped += ifc.RxDropped total.RxFIFO += ifc.RxFIFO diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go new file mode 100644 index 0000000000..f91ef55237 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -0,0 +1,163 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6, +// respectively. +type NetSockstat struct { + // Used is non-nil for IPv4 sockstat results, but nil for IPv6. + Used *int + Protocols []NetSockstatProtocol +} + +// A NetSockstatProtocol contains statistics about a given socket protocol. +// Pointer fields indicate that the value may or may not be present on any +// given protocol. +type NetSockstatProtocol struct { + Protocol string + InUse int + Orphan *int + TW *int + Alloc *int + Mem *int + Memory *int +} + +// NetSockstat retrieves IPv4 socket statistics. +func (fs FS) NetSockstat() (*NetSockstat, error) { + return readSockstat(fs.proc.Path("net", "sockstat")) +} + +// NetSockstat6 retrieves IPv6 socket statistics. +// +// If IPv6 is disabled on this kernel, the returned error can be checked with +// os.IsNotExist. +func (fs FS) NetSockstat6() (*NetSockstat, error) { + return readSockstat(fs.proc.Path("net", "sockstat6")) +} + +// readSockstat opens and parses a NetSockstat from the input file. +func readSockstat(name string) (*NetSockstat, error) { + // This file is small and can be read with one syscall. + b, err := util.ReadFileNoStat(name) + if err != nil { + // Do not wrap this error so the caller can detect os.IsNotExist and + // similar conditions. + return nil, err + } + + stat, err := parseSockstat(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read sockstats from %q: %v", name, err) + } + + return stat, nil +} + +// parseSockstat reads the contents of a sockstat file and parses a NetSockstat. +func parseSockstat(r io.Reader) (*NetSockstat, error) { + var stat NetSockstat + s := bufio.NewScanner(r) + for s.Scan() { + // Expect a minimum of a protocol and one key/value pair. + fields := strings.Split(s.Text(), " ") + if len(fields) < 3 { + return nil, fmt.Errorf("malformed sockstat line: %q", s.Text()) + } + + // The remaining fields are key/value pairs. + kvs, err := parseSockstatKVs(fields[1:]) + if err != nil { + return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %v", s.Text(), err) + } + + // The first field is the protocol. We must trim its colon suffix. + proto := strings.TrimSuffix(fields[0], ":") + switch proto { + case "sockets": + // Special case: IPv4 has a sockets "used" key/value pair that we + // embed at the top level of the structure. + used := kvs["used"] + stat.Used = &used + default: + // Parse all other lines as individual protocols. + nsp := parseSockstatProtocol(kvs) + nsp.Protocol = proto + stat.Protocols = append(stat.Protocols, nsp) + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + return &stat, nil +} + +// parseSockstatKVs parses a string slice into a map of key/value pairs. +func parseSockstatKVs(kvs []string) (map[string]int, error) { + if len(kvs)%2 != 0 { + return nil, errors.New("odd number of fields in key/value pairs") + } + + // Iterate two values at a time to gather key/value pairs. + out := make(map[string]int, len(kvs)/2) + for i := 0; i < len(kvs); i += 2 { + vp := util.NewValueParser(kvs[i+1]) + out[kvs[i]] = vp.Int() + + if err := vp.Err(); err != nil { + return nil, err + } + } + + return out, nil +} + +// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map. +func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol { + var nsp NetSockstatProtocol + for k, v := range kvs { + // Capture the range variable to ensure we get unique pointers for + // each of the optional fields. + v := v + switch k { + case "inuse": + nsp.InUse = v + case "orphan": + nsp.Orphan = &v + case "tw": + nsp.TW = &v + case "alloc": + nsp.Alloc = &v + case "mem": + nsp.Mem = &v + case "memory": + nsp.Memory = &v + } + } + + return nsp +} diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go new file mode 100644 index 0000000000..db5debdf4a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -0,0 +1,102 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// For the proc file format details, +// See: +// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 +// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 +// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. + +// SoftnetStat contains a single row of data from /proc/net/softnet_stat +type SoftnetStat struct { + // Number of processed packets + Processed uint32 + // Number of dropped packets + Dropped uint32 + // Number of times processing packets ran out of quota + TimeSqueezed uint32 +} + +var softNetProcFile = "net/softnet_stat" + +// NetSoftnetStat reads data from /proc/net/softnet_stat. +func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { + b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile)) + if err != nil { + return nil, err + } + + entries, err := parseSoftnet(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %v", err) + } + + return entries, nil +} + +func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { + const minColumns = 9 + + s := bufio.NewScanner(r) + + var stats []SoftnetStat + for s.Scan() { + columns := strings.Fields(s.Text()) + width := len(columns) + + if width < minColumns { + return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) + } + + // We only parse the first three columns at the moment. + us, err := parseHexUint32s(columns[0:3]) + if err != nil { + return nil, err + } + + stats = append(stats, SoftnetStat{ + Processed: us[0], + Dropped: us[1], + TimeSqueezed: us[2], + }) + } + + return stats, nil +} + +func parseHexUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 16, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go new file mode 100644 index 0000000000..d017e3f18d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_udp.go @@ -0,0 +1,229 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "encoding/hex" + "fmt" + "io" + "net" + "os" + "strconv" + "strings" +) + +const ( + // readLimit is used by io.LimitReader while reading the content of the + // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic + // as each line represents a single used socket. + // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. + // With e.g. 150 Byte per line and the maximum number of 65535, + // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP. + readLimit = 4294967296 // Byte -> 4 GiB +) + +type ( + // NetUDP represents the contents of /proc/net/udp{,6} file without the header. + NetUDP []*netUDPLine + + // NetUDPSummary provides already computed values like the total queue lengths or + // the total number of used sockets. In contrast to NetUDP it does not collect + // the parsed lines into a slice. + NetUDPSummary struct { + // TxQueueLength shows the total queue length of all parsed tx_queue lengths. + TxQueueLength uint64 + // RxQueueLength shows the total queue length of all parsed rx_queue lengths. + RxQueueLength uint64 + // UsedSockets shows the total number of parsed lines representing the + // number of used sockets. + UsedSockets uint64 + } + + // netUDPLine represents the fields parsed from a single line + // in /proc/net/udp{,6}. Fields which are not used by UDP are skipped. + // For the proc file format details, see https://linux.die.net/man/5/proc. + netUDPLine struct { + Sl uint64 + LocalAddr net.IP + LocalPort uint64 + RemAddr net.IP + RemPort uint64 + St uint64 + TxQueue uint64 + RxQueue uint64 + UID uint64 + } +) + +// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams +// read from /proc/net/udp. +func (fs FS) NetUDP() (NetUDP, error) { + return newNetUDP(fs.proc.Path("net/udp")) +} + +// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams +// read from /proc/net/udp6. +func (fs FS) NetUDP6() (NetUDP, error) { + return newNetUDP(fs.proc.Path("net/udp6")) +} + +// NetUDPSummary returns already computed statistics like the total queue lengths +// for UDP datagrams read from /proc/net/udp. +func (fs FS) NetUDPSummary() (*NetUDPSummary, error) { + return newNetUDPSummary(fs.proc.Path("net/udp")) +} + +// NetUDP6Summary returns already computed statistics like the total queue lengths +// for UDP datagrams read from /proc/net/udp6. +func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) { + return newNetUDPSummary(fs.proc.Path("net/udp6")) +} + +// newNetUDP creates a new NetUDP{,6} from the contents of the given file. +func newNetUDP(file string) (NetUDP, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + netUDP := NetUDP{} + + lr := io.LimitReader(f, readLimit) + s := bufio.NewScanner(lr) + s.Scan() // skip first line with headers + for s.Scan() { + fields := strings.Fields(s.Text()) + line, err := parseNetUDPLine(fields) + if err != nil { + return nil, err + } + netUDP = append(netUDP, line) + } + if err := s.Err(); err != nil { + return nil, err + } + return netUDP, nil +} + +// newNetUDPSummary creates a new NetUDP{,6} from the contents of the given file. +func newNetUDPSummary(file string) (*NetUDPSummary, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + netUDPSummary := &NetUDPSummary{} + + lr := io.LimitReader(f, readLimit) + s := bufio.NewScanner(lr) + s.Scan() // skip first line with headers + for s.Scan() { + fields := strings.Fields(s.Text()) + line, err := parseNetUDPLine(fields) + if err != nil { + return nil, err + } + netUDPSummary.TxQueueLength += line.TxQueue + netUDPSummary.RxQueueLength += line.RxQueue + netUDPSummary.UsedSockets++ + } + if err := s.Err(); err != nil { + return nil, err + } + return netUDPSummary, nil +} + +// parseNetUDPLine parses a single line, represented by a list of fields. +func parseNetUDPLine(fields []string) (*netUDPLine, error) { + line := &netUDPLine{} + if len(fields) < 8 { + return nil, fmt.Errorf( + "cannot parse net udp socket line as it has less then 8 columns: %s", + strings.Join(fields, " "), + ) + } + var err error // parse error + + // sl + s := strings.Split(fields[0], ":") + if len(s) != 2 { + return nil, fmt.Errorf( + "cannot parse sl field in udp socket line: %s", fields[0]) + } + + if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { + return nil, fmt.Errorf("cannot parse sl value in udp socket line: %s", err) + } + // local_address + l := strings.Split(fields[1], ":") + if len(l) != 2 { + return nil, fmt.Errorf( + "cannot parse local_address field in udp socket line: %s", fields[1]) + } + if line.LocalAddr, err = hex.DecodeString(l[0]); err != nil { + return nil, fmt.Errorf( + "cannot parse local_address value in udp socket line: %s", err) + } + if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { + return nil, fmt.Errorf( + "cannot parse local_address port value in udp socket line: %s", err) + } + + // remote_address + r := strings.Split(fields[2], ":") + if len(r) != 2 { + return nil, fmt.Errorf( + "cannot parse rem_address field in udp socket line: %s", fields[1]) + } + if line.RemAddr, err = hex.DecodeString(r[0]); err != nil { + return nil, fmt.Errorf( + "cannot parse rem_address value in udp socket line: %s", err) + } + if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { + return nil, fmt.Errorf( + "cannot parse rem_address port value in udp socket line: %s", err) + } + + // st + if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { + return nil, fmt.Errorf( + "cannot parse st value in udp socket line: %s", err) + } + + // tx_queue and rx_queue + q := strings.Split(fields[4], ":") + if len(q) != 2 { + return nil, fmt.Errorf( + "cannot parse tx/rx queues in udp socket line as it has a missing colon: %s", + fields[4], + ) + } + if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse tx_queue value in udp socket line: %s", err) + } + if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse rx_queue value in udp socket line: %s", err) + } + + // uid + if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { + return nil, fmt.Errorf( + "cannot parse uid value in udp socket line: %s", err) + } + + return line, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go new file mode 100644 index 0000000000..c55b4b18e4 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -0,0 +1,257 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// For the proc file format details, +// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815 +// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48. + +// Constants for the various /proc/net/unix enumerations. +// TODO: match against x/sys/unix or similar? +const ( + netUnixTypeStream = 1 + netUnixTypeDgram = 2 + netUnixTypeSeqpacket = 5 + + netUnixFlagDefault = 0 + netUnixFlagListen = 1 << 16 + + netUnixStateUnconnected = 1 + netUnixStateConnecting = 2 + netUnixStateConnected = 3 + netUnixStateDisconnected = 4 +) + +// NetUNIXType is the type of the type field. +type NetUNIXType uint64 + +// NetUNIXFlags is the type of the flags field. +type NetUNIXFlags uint64 + +// NetUNIXState is the type of the state field. +type NetUNIXState uint64 + +// NetUNIXLine represents a line of /proc/net/unix. +type NetUNIXLine struct { + KernelPtr string + RefCount uint64 + Protocol uint64 + Flags NetUNIXFlags + Type NetUNIXType + State NetUNIXState + Inode uint64 + Path string +} + +// NetUNIX holds the data read from /proc/net/unix. +type NetUNIX struct { + Rows []*NetUNIXLine +} + +// NetUNIX returns data read from /proc/net/unix. +func (fs FS) NetUNIX() (*NetUNIX, error) { + return readNetUNIX(fs.proc.Path("net/unix")) +} + +// readNetUNIX reads data in /proc/net/unix format from the specified file. +func readNetUNIX(file string) (*NetUNIX, error) { + // This file could be quite large and a streaming read is desirable versus + // reading the entire contents at once. + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + return parseNetUNIX(f) +} + +// parseNetUNIX creates a NetUnix structure from the incoming stream. +func parseNetUNIX(r io.Reader) (*NetUNIX, error) { + // Begin scanning by checking for the existence of Inode. + s := bufio.NewScanner(r) + s.Scan() + + // From the man page of proc(5), it does not contain an Inode field, + // but in actually it exists. This code works for both cases. + hasInode := strings.Contains(s.Text(), "Inode") + + // Expect a minimum number of fields, but Inode and Path are optional: + // Num RefCount Protocol Flags Type St Inode Path + minFields := 6 + if hasInode { + minFields++ + } + + var nu NetUNIX + for s.Scan() { + line := s.Text() + item, err := nu.parseLine(line, hasInode, minFields) + if err != nil { + return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %v", line, err) + } + + nu.Rows = append(nu.Rows, item) + } + + if err := s.Err(); err != nil { + return nil, fmt.Errorf("failed to scan /proc/net/unix data: %v", err) + } + + return &nu, nil +} + +func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) { + fields := strings.Fields(line) + + l := len(fields) + if l < min { + return nil, fmt.Errorf("expected at least %d fields but got %d", min, l) + } + + // Field offsets are as follows: + // Num RefCount Protocol Flags Type St Inode Path + + kernelPtr := strings.TrimSuffix(fields[0], ":") + + users, err := u.parseUsers(fields[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse ref count(%s): %v", fields[1], err) + } + + flags, err := u.parseFlags(fields[3]) + if err != nil { + return nil, fmt.Errorf("failed to parse flags(%s): %v", fields[3], err) + } + + typ, err := u.parseType(fields[4]) + if err != nil { + return nil, fmt.Errorf("failed to parse type(%s): %v", fields[4], err) + } + + state, err := u.parseState(fields[5]) + if err != nil { + return nil, fmt.Errorf("failed to parse state(%s): %v", fields[5], err) + } + + var inode uint64 + if hasInode { + inode, err = u.parseInode(fields[6]) + if err != nil { + return nil, fmt.Errorf("failed to parse inode(%s): %v", fields[6], err) + } + } + + n := &NetUNIXLine{ + KernelPtr: kernelPtr, + RefCount: users, + Type: typ, + Flags: flags, + State: state, + Inode: inode, + } + + // Path field is optional. + if l > min { + // Path occurs at either index 6 or 7 depending on whether inode is + // already present. + pathIdx := 7 + if !hasInode { + pathIdx-- + } + + n.Path = fields[pathIdx] + } + + return n, nil +} + +func (u NetUNIX) parseUsers(s string) (uint64, error) { + return strconv.ParseUint(s, 16, 32) +} + +func (u NetUNIX) parseType(s string) (NetUNIXType, error) { + typ, err := strconv.ParseUint(s, 16, 16) + if err != nil { + return 0, err + } + + return NetUNIXType(typ), nil +} + +func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) { + flags, err := strconv.ParseUint(s, 16, 32) + if err != nil { + return 0, err + } + + return NetUNIXFlags(flags), nil +} + +func (u NetUNIX) parseState(s string) (NetUNIXState, error) { + st, err := strconv.ParseInt(s, 16, 8) + if err != nil { + return 0, err + } + + return NetUNIXState(st), nil +} + +func (u NetUNIX) parseInode(s string) (uint64, error) { + return strconv.ParseUint(s, 10, 64) +} + +func (t NetUNIXType) String() string { + switch t { + case netUnixTypeStream: + return "stream" + case netUnixTypeDgram: + return "dgram" + case netUnixTypeSeqpacket: + return "seqpacket" + } + return "unknown" +} + +func (f NetUNIXFlags) String() string { + switch f { + case netUnixFlagListen: + return "listen" + default: + return "default" + } +} + +func (s NetUNIXState) String() string { + switch s { + case netUnixStateUnconnected: + return "unconnected" + case netUnixStateConnecting: + return "connecting" + case netUnixStateConnected: + return "connected" + case netUnixStateDisconnected: + return "disconnected" + } + return "unknown" +} diff --git a/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/prometheus/procfs/nfs/nfs.go deleted file mode 100644 index 651bf68195..0000000000 --- a/vendor/github.com/prometheus/procfs/nfs/nfs.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package nfs implements parsing of /proc/net/rpc/nfsd. -// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ -package nfs - -// ReplyCache models the "rc" line. -type ReplyCache struct { - Hits uint64 - Misses uint64 - NoCache uint64 -} - -// FileHandles models the "fh" line. -type FileHandles struct { - Stale uint64 - TotalLookups uint64 - AnonLookups uint64 - DirNoCache uint64 - NoDirNoCache uint64 -} - -// InputOutput models the "io" line. -type InputOutput struct { - Read uint64 - Write uint64 -} - -// Threads models the "th" line. -type Threads struct { - Threads uint64 - FullCnt uint64 -} - -// ReadAheadCache models the "ra" line. -type ReadAheadCache struct { - CacheSize uint64 - CacheHistogram []uint64 - NotFound uint64 -} - -// Network models the "net" line. -type Network struct { - NetCount uint64 - UDPCount uint64 - TCPCount uint64 - TCPConnect uint64 -} - -// ClientRPC models the nfs "rpc" line. -type ClientRPC struct { - RPCCount uint64 - Retransmissions uint64 - AuthRefreshes uint64 -} - -// ServerRPC models the nfsd "rpc" line. -type ServerRPC struct { - RPCCount uint64 - BadCnt uint64 - BadFmt uint64 - BadAuth uint64 - BadcInt uint64 -} - -// V2Stats models the "proc2" line. -type V2Stats struct { - Null uint64 - GetAttr uint64 - SetAttr uint64 - Root uint64 - Lookup uint64 - ReadLink uint64 - Read uint64 - WrCache uint64 - Write uint64 - Create uint64 - Remove uint64 - Rename uint64 - Link uint64 - SymLink uint64 - MkDir uint64 - RmDir uint64 - ReadDir uint64 - FsStat uint64 -} - -// V3Stats models the "proc3" line. -type V3Stats struct { - Null uint64 - GetAttr uint64 - SetAttr uint64 - Lookup uint64 - Access uint64 - ReadLink uint64 - Read uint64 - Write uint64 - Create uint64 - MkDir uint64 - SymLink uint64 - MkNod uint64 - Remove uint64 - RmDir uint64 - Rename uint64 - Link uint64 - ReadDir uint64 - ReadDirPlus uint64 - FsStat uint64 - FsInfo uint64 - PathConf uint64 - Commit uint64 -} - -// ClientV4Stats models the nfs "proc4" line. -type ClientV4Stats struct { - Null uint64 - Read uint64 - Write uint64 - Commit uint64 - Open uint64 - OpenConfirm uint64 - OpenNoattr uint64 - OpenDowngrade uint64 - Close uint64 - Setattr uint64 - FsInfo uint64 - Renew uint64 - SetClientID uint64 - SetClientIDConfirm uint64 - Lock uint64 - Lockt uint64 - Locku uint64 - Access uint64 - Getattr uint64 - Lookup uint64 - LookupRoot uint64 - Remove uint64 - Rename uint64 - Link uint64 - Symlink uint64 - Create uint64 - Pathconf uint64 - StatFs uint64 - ReadLink uint64 - ReadDir uint64 - ServerCaps uint64 - DelegReturn uint64 - GetACL uint64 - SetACL uint64 - FsLocations uint64 - ReleaseLockowner uint64 - Secinfo uint64 - FsidPresent uint64 - ExchangeID uint64 - CreateSession uint64 - DestroySession uint64 - Sequence uint64 - GetLeaseTime uint64 - ReclaimComplete uint64 - LayoutGet uint64 - GetDeviceInfo uint64 - LayoutCommit uint64 - LayoutReturn uint64 - SecinfoNoName uint64 - TestStateID uint64 - FreeStateID uint64 - GetDeviceList uint64 - BindConnToSession uint64 - DestroyClientID uint64 - Seek uint64 - Allocate uint64 - DeAllocate uint64 - LayoutStats uint64 - Clone uint64 -} - -// ServerV4Stats models the nfsd "proc4" line. -type ServerV4Stats struct { - Null uint64 - Compound uint64 -} - -// V4Ops models the "proc4ops" line: NFSv4 operations -// Variable list, see: -// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations) -// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations) -// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations) -type V4Ops struct { - //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct? - Op0Unused uint64 - Op1Unused uint64 - Op2Future uint64 - Access uint64 - Close uint64 - Commit uint64 - Create uint64 - DelegPurge uint64 - DelegReturn uint64 - GetAttr uint64 - GetFH uint64 - Link uint64 - Lock uint64 - Lockt uint64 - Locku uint64 - Lookup uint64 - LookupRoot uint64 - Nverify uint64 - Open uint64 - OpenAttr uint64 - OpenConfirm uint64 - OpenDgrd uint64 - PutFH uint64 - PutPubFH uint64 - PutRootFH uint64 - Read uint64 - ReadDir uint64 - ReadLink uint64 - Remove uint64 - Rename uint64 - Renew uint64 - RestoreFH uint64 - SaveFH uint64 - SecInfo uint64 - SetAttr uint64 - Verify uint64 - Write uint64 - RelLockOwner uint64 -} - -// ClientRPCStats models all stats from /proc/net/rpc/nfs. -type ClientRPCStats struct { - Network Network - ClientRPC ClientRPC - V2Stats V2Stats - V3Stats V3Stats - ClientV4Stats ClientV4Stats -} - -// ServerRPCStats models all stats from /proc/net/rpc/nfsd. -type ServerRPCStats struct { - ReplyCache ReplyCache - FileHandles FileHandles - InputOutput InputOutput - Threads Threads - ReadAheadCache ReadAheadCache - Network Network - ServerRPC ServerRPC - V2Stats V2Stats - V3Stats V3Stats - ServerV4Stats ServerV4Stats - V4Ops V4Ops -} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go deleted file mode 100644 index 95a83cc5bc..0000000000 --- a/vendor/github.com/prometheus/procfs/nfs/parse.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nfs - -import ( - "fmt" -) - -func parseReplyCache(v []uint64) (ReplyCache, error) { - if len(v) != 3 { - return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v) - } - - return ReplyCache{ - Hits: v[0], - Misses: v[1], - NoCache: v[2], - }, nil -} - -func parseFileHandles(v []uint64) (FileHandles, error) { - if len(v) != 5 { - return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v) - } - - return FileHandles{ - Stale: v[0], - TotalLookups: v[1], - AnonLookups: v[2], - DirNoCache: v[3], - NoDirNoCache: v[4], - }, nil -} - -func parseInputOutput(v []uint64) (InputOutput, error) { - if len(v) != 2 { - return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v) - } - - return InputOutput{ - Read: v[0], - Write: v[1], - }, nil -} - -func parseThreads(v []uint64) (Threads, error) { - if len(v) != 2 { - return Threads{}, fmt.Errorf("invalid Threads line %q", v) - } - - return Threads{ - Threads: v[0], - FullCnt: v[1], - }, nil -} - -func parseReadAheadCache(v []uint64) (ReadAheadCache, error) { - if len(v) != 12 { - return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v) - } - - return ReadAheadCache{ - CacheSize: v[0], - CacheHistogram: v[1:11], - NotFound: v[11], - }, nil -} - -func parseNetwork(v []uint64) (Network, error) { - if len(v) != 4 { - return Network{}, fmt.Errorf("invalid Network line %q", v) - } - - return Network{ - NetCount: v[0], - UDPCount: v[1], - TCPCount: v[2], - TCPConnect: v[3], - }, nil -} - -func parseServerRPC(v []uint64) (ServerRPC, error) { - if len(v) != 5 { - return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v) - } - - return ServerRPC{ - RPCCount: v[0], - BadCnt: v[1], - BadFmt: v[2], - BadAuth: v[3], - BadcInt: v[4], - }, nil -} - -func parseClientRPC(v []uint64) (ClientRPC, error) { - if len(v) != 3 { - return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v) - } - - return ClientRPC{ - RPCCount: v[0], - Retransmissions: v[1], - AuthRefreshes: v[2], - }, nil -} - -func parseV2Stats(v []uint64) (V2Stats, error) { - values := int(v[0]) - if len(v[1:]) != values || values != 18 { - return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v) - } - - return V2Stats{ - Null: v[1], - GetAttr: v[2], - SetAttr: v[3], - Root: v[4], - Lookup: v[5], - ReadLink: v[6], - Read: v[7], - WrCache: v[8], - Write: v[9], - Create: v[10], - Remove: v[11], - Rename: v[12], - Link: v[13], - SymLink: v[14], - MkDir: v[15], - RmDir: v[16], - ReadDir: v[17], - FsStat: v[18], - }, nil -} - -func parseV3Stats(v []uint64) (V3Stats, error) { - values := int(v[0]) - if len(v[1:]) != values || values != 22 { - return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v) - } - - return V3Stats{ - Null: v[1], - GetAttr: v[2], - SetAttr: v[3], - Lookup: v[4], - Access: v[5], - ReadLink: v[6], - Read: v[7], - Write: v[8], - Create: v[9], - MkDir: v[10], - SymLink: v[11], - MkNod: v[12], - Remove: v[13], - RmDir: v[14], - Rename: v[15], - Link: v[16], - ReadDir: v[17], - ReadDirPlus: v[18], - FsStat: v[19], - FsInfo: v[20], - PathConf: v[21], - Commit: v[22], - }, nil -} - -func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { - values := int(v[0]) - if len(v[1:]) != values { - return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v) - } - - // This function currently supports mapping 59 NFS v4 client stats. Older - // kernels may emit fewer stats, so we must detect this and pad out the - // values to match the expected slice size. - if values < 59 { - newValues := make([]uint64, 60) - copy(newValues, v) - v = newValues - } - - return ClientV4Stats{ - Null: v[1], - Read: v[2], - Write: v[3], - Commit: v[4], - Open: v[5], - OpenConfirm: v[6], - OpenNoattr: v[7], - OpenDowngrade: v[8], - Close: v[9], - Setattr: v[10], - FsInfo: v[11], - Renew: v[12], - SetClientID: v[13], - SetClientIDConfirm: v[14], - Lock: v[15], - Lockt: v[16], - Locku: v[17], - Access: v[18], - Getattr: v[19], - Lookup: v[20], - LookupRoot: v[21], - Remove: v[22], - Rename: v[23], - Link: v[24], - Symlink: v[25], - Create: v[26], - Pathconf: v[27], - StatFs: v[28], - ReadLink: v[29], - ReadDir: v[30], - ServerCaps: v[31], - DelegReturn: v[32], - GetACL: v[33], - SetACL: v[34], - FsLocations: v[35], - ReleaseLockowner: v[36], - Secinfo: v[37], - FsidPresent: v[38], - ExchangeID: v[39], - CreateSession: v[40], - DestroySession: v[41], - Sequence: v[42], - GetLeaseTime: v[43], - ReclaimComplete: v[44], - LayoutGet: v[45], - GetDeviceInfo: v[46], - LayoutCommit: v[47], - LayoutReturn: v[48], - SecinfoNoName: v[49], - TestStateID: v[50], - FreeStateID: v[51], - GetDeviceList: v[52], - BindConnToSession: v[53], - DestroyClientID: v[54], - Seek: v[55], - Allocate: v[56], - DeAllocate: v[57], - LayoutStats: v[58], - Clone: v[59], - }, nil -} - -func parseServerV4Stats(v []uint64) (ServerV4Stats, error) { - values := int(v[0]) - if len(v[1:]) != values || values != 2 { - return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) - } - - return ServerV4Stats{ - Null: v[1], - Compound: v[2], - }, nil -} - -func parseV4Ops(v []uint64) (V4Ops, error) { - values := int(v[0]) - if len(v[1:]) != values || values < 39 { - return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v) - } - - stats := V4Ops{ - Op0Unused: v[1], - Op1Unused: v[2], - Op2Future: v[3], - Access: v[4], - Close: v[5], - Commit: v[6], - Create: v[7], - DelegPurge: v[8], - DelegReturn: v[9], - GetAttr: v[10], - GetFH: v[11], - Link: v[12], - Lock: v[13], - Lockt: v[14], - Locku: v[15], - Lookup: v[16], - LookupRoot: v[17], - Nverify: v[18], - Open: v[19], - OpenAttr: v[20], - OpenConfirm: v[21], - OpenDgrd: v[22], - PutFH: v[23], - PutPubFH: v[24], - PutRootFH: v[25], - Read: v[26], - ReadDir: v[27], - ReadLink: v[28], - Remove: v[29], - Rename: v[30], - Renew: v[31], - RestoreFH: v[32], - SaveFH: v[33], - SecInfo: v[34], - SetAttr: v[35], - Verify: v[36], - Write: v[37], - RelLockOwner: v[38], - } - - return stats, nil -} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go deleted file mode 100644 index c0d3a5ad9b..0000000000 --- a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nfs - -import ( - "bufio" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs -func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { - stats := &ClientRPCStats{} - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(scanner.Text()) - // require at least - if len(parts) < 2 { - return nil, fmt.Errorf("invalid NFS metric line %q", line) - } - - values, err := util.ParseUint64s(parts[1:]) - if err != nil { - return nil, fmt.Errorf("error parsing NFS metric line: %s", err) - } - - switch metricLine := parts[0]; metricLine { - case "net": - stats.Network, err = parseNetwork(values) - case "rpc": - stats.ClientRPC, err = parseClientRPC(values) - case "proc2": - stats.V2Stats, err = parseV2Stats(values) - case "proc3": - stats.V3Stats, err = parseV3Stats(values) - case "proc4": - stats.ClientV4Stats, err = parseClientV4Stats(values) - default: - return nil, fmt.Errorf("unknown NFS metric line %q", metricLine) - } - if err != nil { - return nil, fmt.Errorf("errors parsing NFS metric line: %s", err) - } - } - - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning NFS file: %s", err) - } - - return stats, nil -} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go deleted file mode 100644 index 57bb4a3585..0000000000 --- a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nfs - -import ( - "bufio" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd -func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) { - stats := &ServerRPCStats{} - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(scanner.Text()) - // require at least - if len(parts) < 2 { - return nil, fmt.Errorf("invalid NFSd metric line %q", line) - } - label := parts[0] - - var values []uint64 - var err error - if label == "th" { - if len(parts) < 3 { - return nil, fmt.Errorf("invalid NFSd th metric line %q", line) - } - values, err = util.ParseUint64s(parts[1:3]) - } else { - values, err = util.ParseUint64s(parts[1:]) - } - if err != nil { - return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) - } - - switch metricLine := parts[0]; metricLine { - case "rc": - stats.ReplyCache, err = parseReplyCache(values) - case "fh": - stats.FileHandles, err = parseFileHandles(values) - case "io": - stats.InputOutput, err = parseInputOutput(values) - case "th": - stats.Threads, err = parseThreads(values) - case "ra": - stats.ReadAheadCache, err = parseReadAheadCache(values) - case "net": - stats.Network, err = parseNetwork(values) - case "rpc": - stats.ServerRPC, err = parseServerRPC(values) - case "proc2": - stats.V2Stats, err = parseV2Stats(values) - case "proc3": - stats.V3Stats, err = parseV3Stats(values) - case "proc4": - stats.ServerV4Stats, err = parseServerV4Stats(values) - case "proc4ops": - stats.V4Ops, err = parseV4Ops(values) - default: - return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) - } - if err != nil { - return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) - } - } - - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning NFSd file: %s", err) - } - - return stats, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 7cf5b8acf9..330e472c70 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -20,6 +20,9 @@ import ( "os" "strconv" "strings" + + "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" ) // Proc provides information about a running process. @@ -27,7 +30,7 @@ type Proc struct { // The process ID. PID int - fs FS + fs fs.FS } // Procs represents a list of Proc structs. @@ -52,7 +55,7 @@ func NewProc(pid int) (Proc, error) { if err != nil { return Proc{}, err } - return fs.NewProc(pid) + return fs.Proc(pid) } // AllProcs returns a list of all currently available processes under /proc. @@ -66,28 +69,35 @@ func AllProcs() (Procs, error) { // Self returns a process for the current process. func (fs FS) Self() (Proc, error) { - p, err := os.Readlink(fs.Path("self")) + p, err := os.Readlink(fs.proc.Path("self")) if err != nil { return Proc{}, err } - pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) + pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) if err != nil { return Proc{}, err } - return fs.NewProc(pid) + return fs.Proc(pid) } // NewProc returns a process for the given pid. +// +// Deprecated: use fs.Proc() instead func (fs FS) NewProc(pid int) (Proc, error) { - if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { + return fs.Proc(pid) +} + +// Proc returns a process for the given pid. +func (fs FS) Proc(pid int) (Proc, error) { + if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { return Proc{}, err } - return Proc{PID: pid, fs: fs}, nil + return Proc{PID: pid, fs: fs.proc}, nil } // AllProcs returns a list of all currently available processes. func (fs FS) AllProcs() (Procs, error) { - d, err := os.Open(fs.Path()) + d, err := os.Open(fs.proc.Path()) if err != nil { return Procs{}, err } @@ -104,7 +114,7 @@ func (fs FS) AllProcs() (Procs, error) { if err != nil { continue } - p = append(p, Proc{PID: int(pid), fs: fs}) + p = append(p, Proc{PID: int(pid), fs: fs.proc}) } return p, nil @@ -112,13 +122,7 @@ func (fs FS) AllProcs() (Procs, error) { // CmdLine returns the command line of a process. func (p Proc) CmdLine() ([]string, error) { - f, err := os.Open(p.path("cmdline")) - if err != nil { - return nil, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) + data, err := util.ReadFileNoStat(p.path("cmdline")) if err != nil { return nil, err } @@ -132,13 +136,7 @@ func (p Proc) CmdLine() ([]string, error) { // Comm returns the command name of a process. func (p Proc) Comm() (string, error) { - f, err := os.Open(p.path("comm")) - if err != nil { - return "", err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) + data, err := util.ReadFileNoStat(p.path("comm")) if err != nil { return "", err } @@ -156,6 +154,26 @@ func (p Proc) Executable() (string, error) { return exe, err } +// Cwd returns the absolute path to the current working directory of the process. +func (p Proc) Cwd() (string, error) { + wd, err := os.Readlink(p.path("cwd")) + if os.IsNotExist(err) { + return "", nil + } + + return wd, err +} + +// RootDir returns the absolute path to the process's root directory (as set by chroot) +func (p Proc) RootDir() (string, error) { + rdir, err := os.Readlink(p.path("root")) + if os.IsNotExist(err) { + return "", nil + } + + return rdir, err +} + // FileDescriptors returns the currently open file descriptors of a process. func (p Proc) FileDescriptors() ([]uintptr, error) { names, err := p.fileDescriptors() @@ -218,6 +236,18 @@ func (p Proc) MountStats() ([]*Mount, error) { return parseMountStats(f) } +// MountInfo retrieves mount information for mount points in a +// process's namespace. +// It supplies information missing in `/proc/self/mounts` and +// fixes various other problems with that file too. +func (p Proc) MountInfo() ([]*MountInfo, error) { + data, err := util.ReadFileNoStat(p.path("mountinfo")) + if err != nil { + return nil, err + } + return parseMountInfo(data) +} + func (p Proc) fileDescriptors() ([]string, error) { d, err := os.Open(p.path("fd")) if err != nil { @@ -236,3 +266,33 @@ func (p Proc) fileDescriptors() ([]string, error) { func (p Proc) path(pa ...string) string { return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) } + +// FileDescriptorsInfo retrieves information about all file descriptors of +// the process. +func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + var fdinfos ProcFDInfos + + for _, n := range names { + fdinfo, err := p.FDInfo(n) + if err != nil { + continue + } + fdinfos = append(fdinfos, *fdinfo) + } + + return fdinfos, nil +} + +// Schedstat returns task scheduling information for the process. +func (p Proc) Schedstat() (ProcSchedstat, error) { + contents, err := ioutil.ReadFile(p.path("schedstat")) + if err != nil { + return ProcSchedstat{}, err + } + return parseProcSchedstat(string(contents)) +} diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go new file mode 100644 index 0000000000..6134b3580c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_environ.go @@ -0,0 +1,37 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Environ reads process environments from /proc//environ +func (p Proc) Environ() ([]string, error) { + environments := make([]string, 0) + + data, err := util.ReadFileNoStat(p.path("environ")) + if err != nil { + return environments, err + } + + environments = strings.Split(string(data), "\000") + if len(environments) > 0 { + environments = environments[:len(environments)-1] + } + + return environments, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go new file mode 100644 index 0000000000..0c9c402850 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -0,0 +1,133 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "regexp" + + "github.com/prometheus/procfs/internal/util" +) + +// Regexp variables +var ( + rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) + rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) + rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) + rInotify = regexp.MustCompile(`^inotify`) + rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) +) + +// ProcFDInfo contains represents file descriptor information. +type ProcFDInfo struct { + // File descriptor + FD string + // File offset + Pos string + // File access mode and status flags + Flags string + // Mount point ID + MntID string + // List of inotify lines (structed) in the fdinfo file (kernel 3.8+ only) + InotifyInfos []InotifyInfo +} + +// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty. +func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { + data, err := util.ReadFileNoStat(p.path("fdinfo", fd)) + if err != nil { + return nil, err + } + + var text, pos, flags, mntid string + var inotify []InotifyInfo + + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + text = scanner.Text() + if rPos.MatchString(text) { + pos = rPos.FindStringSubmatch(text)[1] + } else if rFlags.MatchString(text) { + flags = rFlags.FindStringSubmatch(text)[1] + } else if rMntID.MatchString(text) { + mntid = rMntID.FindStringSubmatch(text)[1] + } else if rInotify.MatchString(text) { + newInotify, err := parseInotifyInfo(text) + if err != nil { + return nil, err + } + inotify = append(inotify, *newInotify) + } + } + + i := &ProcFDInfo{ + FD: fd, + Pos: pos, + Flags: flags, + MntID: mntid, + InotifyInfos: inotify, + } + + return i, nil +} + +// InotifyInfo represents a single inotify line in the fdinfo file. +type InotifyInfo struct { + // Watch descriptor number + WD string + // Inode number + Ino string + // Device ID + Sdev string + // Mask of events being monitored + Mask string +} + +// InotifyInfo constructor. Only available on kernel 3.8+. +func parseInotifyInfo(line string) (*InotifyInfo, error) { + m := rInotifyParts.FindStringSubmatch(line) + if len(m) >= 4 { + var mask string + if len(m) == 5 { + mask = m[4] + } + i := &InotifyInfo{ + WD: m[1], + Ino: m[2], + Sdev: m[3], + Mask: mask, + } + return i, nil + } + return nil, errors.New("invalid inode entry: " + line) +} + +// ProcFDInfos represents a list of ProcFDInfo structs. +type ProcFDInfos []ProcFDInfo + +func (p ProcFDInfos) Len() int { return len(p) } +func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD } + +// InotifyWatchLen returns the total number of inotify watches +func (p ProcFDInfos) InotifyWatchLen() (int, error) { + length := 0 + for _, f := range p { + length += len(f.InotifyInfos) + } + + return length, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go index 0251c83bfe..776f349717 100644 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -15,8 +15,8 @@ package procfs import ( "fmt" - "io/ioutil" - "os" + + "github.com/prometheus/procfs/internal/util" ) // ProcIO models the content of /proc//io. @@ -39,17 +39,11 @@ type ProcIO struct { CancelledWriteBytes int64 } -// NewIO creates a new ProcIO instance from a given Proc instance. -func (p Proc) NewIO() (ProcIO, error) { +// IO creates a new ProcIO instance from a given Proc instance. +func (p Proc) IO() (ProcIO, error) { pio := ProcIO{} - f, err := os.Open(p.path("io")) - if err != nil { - return pio, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) + data, err := util.ReadFileNoStat(p.path("io")) if err != nil { return pio, err } diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index f04ba6fda8..91ee24df8b 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -78,7 +78,14 @@ var ( ) // NewLimits returns the current soft limits of the process. +// +// Deprecated: use p.Limits() instead func (p Proc) NewLimits() (ProcLimits, error) { + return p.Limits() +} + +// Limits returns the current soft limits of the process. +func (p Proc) Limits() (ProcLimits, error) { f, err := os.Open(p.path("limits")) if err != nil { return ProcLimits{}, err diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go new file mode 100644 index 0000000000..28d5c6eb1d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -0,0 +1,208 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" + + "golang.org/x/sys/unix" +) + +type ProcMapPermissions struct { + // mapping has the [R]ead flag set + Read bool + // mapping has the [W]rite flag set + Write bool + // mapping has the [X]ecutable flag set + Execute bool + // mapping has the [S]hared flag set + Shared bool + // mapping is marked as [P]rivate (copy on write) + Private bool +} + +// ProcMap contains the process memory-mappings of the process, +// read from /proc/[pid]/maps +type ProcMap struct { + // The start address of current mapping. + StartAddr uintptr + // The end address of the current mapping + EndAddr uintptr + // The permissions for this mapping + Perms *ProcMapPermissions + // The current offset into the file/fd (e.g., shared libs) + Offset int64 + // Device owner of this mapping (major:minor) in Mkdev format. + Dev uint64 + // The inode of the device above + Inode uint64 + // The file or psuedofile (or empty==anonymous) + Pathname string +} + +// parseDevice parses the device token of a line and converts it to a dev_t +// (mkdev) like structure. +func parseDevice(s string) (uint64, error) { + toks := strings.Split(s, ":") + if len(toks) < 2 { + return 0, fmt.Errorf("unexpected number of fields") + } + + major, err := strconv.ParseUint(toks[0], 16, 0) + if err != nil { + return 0, err + } + + minor, err := strconv.ParseUint(toks[1], 16, 0) + if err != nil { + return 0, err + } + + return unix.Mkdev(uint32(major), uint32(minor)), nil +} + +// parseAddress just converts a hex-string to a uintptr +func parseAddress(s string) (uintptr, error) { + a, err := strconv.ParseUint(s, 16, 0) + if err != nil { + return 0, err + } + + return uintptr(a), nil +} + +// parseAddresses parses the start-end address +func parseAddresses(s string) (uintptr, uintptr, error) { + toks := strings.Split(s, "-") + if len(toks) < 2 { + return 0, 0, fmt.Errorf("invalid address") + } + + saddr, err := parseAddress(toks[0]) + if err != nil { + return 0, 0, err + } + + eaddr, err := parseAddress(toks[1]) + if err != nil { + return 0, 0, err + } + + return saddr, eaddr, nil +} + +// parsePermissions parses a token and returns any that are set. +func parsePermissions(s string) (*ProcMapPermissions, error) { + if len(s) < 4 { + return nil, fmt.Errorf("invalid permissions token") + } + + perms := ProcMapPermissions{} + for _, ch := range s { + switch ch { + case 'r': + perms.Read = true + case 'w': + perms.Write = true + case 'x': + perms.Execute = true + case 'p': + perms.Private = true + case 's': + perms.Shared = true + } + } + + return &perms, nil +} + +// parseProcMap will attempt to parse a single line within a proc/[pid]/maps +// buffer. +func parseProcMap(text string) (*ProcMap, error) { + fields := strings.Fields(text) + if len(fields) < 5 { + return nil, fmt.Errorf("truncated procmap entry") + } + + saddr, eaddr, err := parseAddresses(fields[0]) + if err != nil { + return nil, err + } + + perms, err := parsePermissions(fields[1]) + if err != nil { + return nil, err + } + + offset, err := strconv.ParseInt(fields[2], 16, 0) + if err != nil { + return nil, err + } + + device, err := parseDevice(fields[3]) + if err != nil { + return nil, err + } + + inode, err := strconv.ParseUint(fields[4], 10, 0) + if err != nil { + return nil, err + } + + pathname := "" + + if len(fields) >= 5 { + pathname = strings.Join(fields[5:], " ") + } + + return &ProcMap{ + StartAddr: saddr, + EndAddr: eaddr, + Perms: perms, + Offset: offset, + Dev: device, + Inode: inode, + Pathname: pathname, + }, nil +} + +// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the +// process. +func (p Proc) ProcMaps() ([]*ProcMap, error) { + file, err := os.Open(p.path("maps")) + if err != nil { + return nil, err + } + defer file.Close() + + maps := []*ProcMap{} + scan := bufio.NewScanner(file) + + for scan.Scan() { + m, err := parseProcMap(scan.Text()) + if err != nil { + return nil, err + } + + maps = append(maps, m) + } + + return maps, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go index d06c26ebad..c66740ff74 100644 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -29,9 +29,9 @@ type Namespace struct { // Namespaces contains all of the namespaces that the process is contained in. type Namespaces map[string]Namespace -// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the +// Namespaces reads from /proc//ns/* to get the namespaces of which the // process is a member. -func (p Proc) NewNamespaces() (Namespaces, error) { +func (p Proc) Namespaces() (Namespaces, error) { d, err := os.Open(p.path("ns")) if err != nil { return nil, err diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go new file mode 100644 index 0000000000..0d7bee54ca --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -0,0 +1,100 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// The PSI / pressure interface is described at +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt +// Each resource (cpu, io, memory, ...) is exposed as a single file. +// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure. +// Each line contains several averages (over n seconds) and a total in µs. +// +// Example io pressure file: +// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362 +// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134 + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" + +// PSILine is a single line of values as returned by /proc/pressure/* +// The Avg entries are averages over n seconds, as a percentage +// The Total line is in microseconds +type PSILine struct { + Avg10 float64 + Avg60 float64 + Avg300 float64 + Total uint64 +} + +// PSIStats represent pressure stall information from /proc/pressure/* +// Some indicates the share of time in which at least some tasks are stalled +// Full indicates the share of time in which all non-idle tasks are stalled simultaneously +type PSIStats struct { + Some *PSILine + Full *PSILine +} + +// PSIStatsForResource reads pressure stall information for the specified +// resource from /proc/pressure/. At time of writing this can be +// either "cpu", "memory" or "io". +func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { + data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) + if err != nil { + return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource) + } + + return parsePSIStats(resource, bytes.NewReader(data)) +} + +// parsePSIStats parses the specified file for pressure stall information +func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { + psiStats := PSIStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + l := scanner.Text() + prefix := strings.Split(l, " ")[0] + switch prefix { + case "some": + psi := PSILine{} + _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) + if err != nil { + return PSIStats{}, err + } + psiStats.Some = &psi + case "full": + psi := PSILine{} + _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) + if err != nil { + return PSIStats{}, err + } + psiStats.Full = &psi + default: + // If we encounter a line with an unknown prefix, ignore it and move on + // Should new measurement types be added in the future we'll simply ignore them instead + // of erroring on retrieval + continue + } + } + + return psiStats, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 3cf2a9f18f..4517d2e9dd 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -16,8 +16,10 @@ package procfs import ( "bytes" "fmt" - "io/ioutil" "os" + + "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" ) // Originally, this USER_HZ value was dynamically retrieved via a sysconf call @@ -95,22 +97,23 @@ type ProcStat struct { // in clock ticks. Starttime uint64 // Virtual memory size in bytes. - VSize int + VSize uint // Resident set size in pages. RSS int - fs FS + proc fs.FS } // NewStat returns the current status information of the process. +// +// Deprecated: use p.Stat() instead func (p Proc) NewStat() (ProcStat, error) { - f, err := os.Open(p.path("stat")) - if err != nil { - return ProcStat{}, err - } - defer f.Close() + return p.Stat() +} - data, err := ioutil.ReadAll(f) +// Stat returns the current status information of the process. +func (p Proc) Stat() (ProcStat, error) { + data, err := util.ReadFileNoStat(p.path("stat")) if err != nil { return ProcStat{}, err } @@ -118,7 +121,7 @@ func (p Proc) NewStat() (ProcStat, error) { var ( ignore int - s = ProcStat{PID: p.PID, fs: p.fs} + s = ProcStat{PID: p.PID, proc: p.fs} l = bytes.Index(data, []byte("(")) r = bytes.LastIndex(data, []byte(")")) ) @@ -164,7 +167,7 @@ func (p Proc) NewStat() (ProcStat, error) { } // VirtualMemory returns the virtual memory size in bytes. -func (s ProcStat) VirtualMemory() int { +func (s ProcStat) VirtualMemory() uint { return s.VSize } @@ -175,7 +178,8 @@ func (s ProcStat) ResidentMemory() int { // StartTime returns the unix timestamp of the process in seconds. func (s ProcStat) StartTime() (float64, error) { - stat, err := s.fs.NewStat() + fs := FS{proc: s.proc} + stat, err := fs.Stat() if err != nil { return 0, err } diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go new file mode 100644 index 0000000000..c58346d910 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -0,0 +1,166 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcStatus provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStatus struct { + // The process ID. + PID int + // The process name. + Name string + + // Thread group ID. + TGID int + + // Peak virtual memory size. + VmPeak uint64 // nolint:golint + // Virtual memory size. + VmSize uint64 // nolint:golint + // Locked memory size. + VmLck uint64 // nolint:golint + // Pinned memory size. + VmPin uint64 // nolint:golint + // Peak resident set size. + VmHWM uint64 // nolint:golint + // Resident set size (sum of RssAnnon RssFile and RssShmem). + VmRSS uint64 // nolint:golint + // Size of resident anonymous memory. + RssAnon uint64 // nolint:golint + // Size of resident file mappings. + RssFile uint64 // nolint:golint + // Size of resident shared memory. + RssShmem uint64 // nolint:golint + // Size of data segments. + VmData uint64 // nolint:golint + // Size of stack segments. + VmStk uint64 // nolint:golint + // Size of text segments. + VmExe uint64 // nolint:golint + // Shared library code size. + VmLib uint64 // nolint:golint + // Page table entries size. + VmPTE uint64 // nolint:golint + // Size of second-level page tables. + VmPMD uint64 // nolint:golint + // Swapped-out virtual memory size by anonymous private. + VmSwap uint64 // nolint:golint + // Size of hugetlb memory portions + HugetlbPages uint64 + + // Number of voluntary context switches. + VoluntaryCtxtSwitches uint64 + // Number of involuntary context switches. + NonVoluntaryCtxtSwitches uint64 + + // UIDs of the process (Real, effective, saved set, and filesystem UIDs (GIDs)) + UIDs [4]string +} + +// NewStatus returns the current status information of the process. +func (p Proc) NewStatus() (ProcStatus, error) { + data, err := util.ReadFileNoStat(p.path("status")) + if err != nil { + return ProcStatus{}, err + } + + s := ProcStatus{PID: p.PID} + + lines := strings.Split(string(data), "\n") + for _, line := range lines { + if !bytes.Contains([]byte(line), []byte(":")) { + continue + } + + kv := strings.SplitN(line, ":", 2) + + // removes spaces + k := string(strings.TrimSpace(kv[0])) + v := string(strings.TrimSpace(kv[1])) + // removes "kB" + v = string(bytes.Trim([]byte(v), " kB")) + + // value to int when possible + // we can skip error check here, 'cause vKBytes is not used when value is a string + vKBytes, _ := strconv.ParseUint(v, 10, 64) + // convert kB to B + vBytes := vKBytes * 1024 + + s.fillStatus(k, v, vKBytes, vBytes) + } + + return s, nil +} + +func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { + switch k { + case "Tgid": + s.TGID = int(vUint) + case "Name": + s.Name = vString + case "Uid": + copy(s.UIDs[:], strings.Split(vString, "\t")) + case "VmPeak": + s.VmPeak = vUintBytes + case "VmSize": + s.VmSize = vUintBytes + case "VmLck": + s.VmLck = vUintBytes + case "VmPin": + s.VmPin = vUintBytes + case "VmHWM": + s.VmHWM = vUintBytes + case "VmRSS": + s.VmRSS = vUintBytes + case "RssAnon": + s.RssAnon = vUintBytes + case "RssFile": + s.RssFile = vUintBytes + case "RssShmem": + s.RssShmem = vUintBytes + case "VmData": + s.VmData = vUintBytes + case "VmStk": + s.VmStk = vUintBytes + case "VmExe": + s.VmExe = vUintBytes + case "VmLib": + s.VmLib = vUintBytes + case "VmPTE": + s.VmPTE = vUintBytes + case "VmPMD": + s.VmPMD = vUintBytes + case "VmSwap": + s.VmSwap = vUintBytes + case "HugetlbPages": + s.HugetlbPages = vUintBytes + case "voluntary_ctxt_switches": + s.VoluntaryCtxtSwitches = vUint + case "nonvoluntary_ctxt_switches": + s.NonVoluntaryCtxtSwitches = vUint + } +} + +// TotalCtxtSwitches returns the total context switch. +func (s ProcStatus) TotalCtxtSwitches() uint64 { + return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches +} diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go new file mode 100644 index 0000000000..a4c4089ac5 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/schedstat.go @@ -0,0 +1,118 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "os" + "regexp" + "strconv" +) + +var ( + cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`) + procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`) +) + +// Schedstat contains scheduler statistics from /proc/schedstat +// +// See +// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt +// for a detailed description of what these numbers mean. +// +// Note the current kernel documentation claims some of the time units are in +// jiffies when they are actually in nanoseconds since 2.6.23 with the +// introduction of CFS. A fix to the documentation is pending. See +// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473 +type Schedstat struct { + CPUs []*SchedstatCPU +} + +// SchedstatCPU contains the values from one "cpu" line +type SchedstatCPU struct { + CPUNum string + + RunningNanoseconds uint64 + WaitingNanoseconds uint64 + RunTimeslices uint64 +} + +// ProcSchedstat contains the values from /proc//schedstat +type ProcSchedstat struct { + RunningNanoseconds uint64 + WaitingNanoseconds uint64 + RunTimeslices uint64 +} + +// Schedstat reads data from /proc/schedstat +func (fs FS) Schedstat() (*Schedstat, error) { + file, err := os.Open(fs.proc.Path("schedstat")) + if err != nil { + return nil, err + } + defer file.Close() + + stats := &Schedstat{} + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + match := cpuLineRE.FindStringSubmatch(scanner.Text()) + if match != nil { + cpu := &SchedstatCPU{} + cpu.CPUNum = match[1] + + cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64) + if err != nil { + continue + } + + cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64) + if err != nil { + continue + } + + cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64) + if err != nil { + continue + } + + stats.CPUs = append(stats.CPUs, cpu) + } + } + + return stats, nil +} + +func parseProcSchedstat(contents string) (stats ProcSchedstat, err error) { + match := procLineRE.FindStringSubmatch(contents) + + if match != nil { + stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64) + if err != nil { + return + } + + stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64) + if err != nil { + return + } + + stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64) + return + } + + err = errors.New("could not parse schedstat") + return +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 61eb6b0e3c..b2a6fc994c 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -15,11 +15,14 @@ package procfs import ( "bufio" + "bytes" "fmt" "io" - "os" "strconv" "strings" + + "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" ) // CPUStat shows how much time the cpu spend in various stages. @@ -78,16 +81,6 @@ type Stat struct { SoftIRQ SoftIRQStat } -// NewStat returns kernel/system statistics read from /proc/stat. -func NewStat() (Stat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Stat{}, err - } - - return fs.NewStat() -} - // Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). func parseCPUStat(line string) (CPUStat, int64, error) { cpuStat := CPUStat{} @@ -149,19 +142,38 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { return softIRQStat, total, nil } -// NewStat returns an information about current kernel/system statistics. -func (fs FS) NewStat() (Stat, error) { - // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt - - f, err := os.Open(fs.Path("stat")) +// NewStat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// +// Deprecated: use fs.Stat() instead +func NewStat() (Stat, error) { + fs, err := NewFS(fs.DefaultProcMountPoint) + if err != nil { + return Stat{}, err + } + return fs.Stat() +} + +// NewStat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// +// Deprecated: use fs.Stat() instead +func (fs FS) NewStat() (Stat, error) { + return fs.Stat() +} + +// Stat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) Stat() (Stat, error) { + fileName := fs.proc.Path("stat") + data, err := util.ReadFileNoStat(fileName) if err != nil { return Stat{}, err } - defer f.Close() stat := Stat{} - scanner := bufio.NewScanner(f) + scanner := bufio.NewScanner(bytes.NewReader(data)) for scanner.Scan() { line := scanner.Text() parts := strings.Fields(scanner.Text()) @@ -225,7 +237,7 @@ func (fs FS) NewStat() (Stat, error) { } if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) + return Stat{}, fmt.Errorf("couldn't parse %s: %s", fileName, err) } return stat, nil diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go new file mode 100644 index 0000000000..15edc2212b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/swaps.go @@ -0,0 +1,89 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Swap represents an entry in /proc/swaps. +type Swap struct { + Filename string + Type string + Size int + Used int + Priority int +} + +// Swaps returns a slice of all configured swap devices on the system. +func (fs FS) Swaps() ([]*Swap, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("swaps")) + if err != nil { + return nil, err + } + return parseSwaps(data) +} + +func parseSwaps(info []byte) ([]*Swap, error) { + swaps := []*Swap{} + scanner := bufio.NewScanner(bytes.NewReader(info)) + scanner.Scan() // ignore header line + for scanner.Scan() { + swapString := scanner.Text() + parsedSwap, err := parseSwapString(swapString) + if err != nil { + return nil, err + } + swaps = append(swaps, parsedSwap) + } + + err := scanner.Err() + return swaps, err +} + +func parseSwapString(swapString string) (*Swap, error) { + var err error + + swapFields := strings.Fields(swapString) + swapLength := len(swapFields) + if swapLength < 5 { + return nil, fmt.Errorf("too few fields in swap string: %s", swapString) + } + + swap := &Swap{ + Filename: swapFields[0], + Type: swapFields[1], + } + + swap.Size, err = strconv.Atoi(swapFields[2]) + if err != nil { + return nil, fmt.Errorf("invalid swap size: %s", swapFields[2]) + } + swap.Used, err = strconv.Atoi(swapFields[3]) + if err != nil { + return nil, fmt.Errorf("invalid swap used: %s", swapFields[3]) + } + swap.Priority, err = strconv.Atoi(swapFields[4]) + if err != nil { + return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4]) + } + + return swap, nil +} diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go new file mode 100644 index 0000000000..cb13891414 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -0,0 +1,210 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// The VM interface is described at +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// Each setting is exposed as a single file. +// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array +// and numa_zonelist_order (deprecated) which is a string +type VM struct { + AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes + BlockDump *int64 // /proc/sys/vm/block_dump + CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed + DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes + DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio + DirtyBytes *int64 // /proc/sys/vm/dirty_bytes + DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs + DirtyRatio *int64 // /proc/sys/vm/dirty_ratio + DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds + DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs + DropCaches *int64 // /proc/sys/vm/drop_caches + ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold + HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group + LaptopMode *int64 // /proc/sys/vm/laptop_mode + LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout + LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio + MaxMapCount *int64 // /proc/sys/vm/max_map_count + MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill + MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery + MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes + MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio + MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio + MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr + NrHugepages *int64 // /proc/sys/vm/nr_hugepages + NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy + NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages + NumaStat *int64 // /proc/sys/vm/numa_stat + NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order + OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks + OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task + OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes + OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory + OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio + PageCluster *int64 // /proc/sys/vm/page-cluster + PanicOnOom *int64 // /proc/sys/vm/panic_on_oom + PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction + StatInterval *int64 // /proc/sys/vm/stat_interval + Swappiness *int64 // /proc/sys/vm/swappiness + UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes + VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure + WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor + WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor + ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode +} + +// VM reads the VM statistics from the specified `proc` filesystem. +func (fs FS) VM() (*VM, error) { + path := fs.proc.Path("sys/vm") + file, err := os.Stat(path) + if err != nil { + return nil, err + } + if !file.Mode().IsDir() { + return nil, fmt.Errorf("%s is not a directory", path) + } + + files, err := ioutil.ReadDir(path) + if err != nil { + return nil, err + } + + var vm VM + for _, f := range files { + if f.IsDir() { + continue + } + + name := filepath.Join(path, f.Name()) + // ignore errors on read, as there are some write only + // in /proc/sys/vm + value, err := util.SysReadFile(name) + if err != nil { + continue + } + vp := util.NewValueParser(value) + + switch f.Name() { + case "admin_reserve_kbytes": + vm.AdminReserveKbytes = vp.PInt64() + case "block_dump": + vm.BlockDump = vp.PInt64() + case "compact_unevictable_allowed": + vm.CompactUnevictableAllowed = vp.PInt64() + case "dirty_background_bytes": + vm.DirtyBackgroundBytes = vp.PInt64() + case "dirty_background_ratio": + vm.DirtyBackgroundRatio = vp.PInt64() + case "dirty_bytes": + vm.DirtyBytes = vp.PInt64() + case "dirty_expire_centisecs": + vm.DirtyExpireCentisecs = vp.PInt64() + case "dirty_ratio": + vm.DirtyRatio = vp.PInt64() + case "dirtytime_expire_seconds": + vm.DirtytimeExpireSeconds = vp.PInt64() + case "dirty_writeback_centisecs": + vm.DirtyWritebackCentisecs = vp.PInt64() + case "drop_caches": + vm.DropCaches = vp.PInt64() + case "extfrag_threshold": + vm.ExtfragThreshold = vp.PInt64() + case "hugetlb_shm_group": + vm.HugetlbShmGroup = vp.PInt64() + case "laptop_mode": + vm.LaptopMode = vp.PInt64() + case "legacy_va_layout": + vm.LegacyVaLayout = vp.PInt64() + case "lowmem_reserve_ratio": + stringSlice := strings.Fields(value) + pint64Slice := make([]*int64, 0, len(stringSlice)) + for _, value := range stringSlice { + vp := util.NewValueParser(value) + pint64Slice = append(pint64Slice, vp.PInt64()) + } + vm.LowmemReserveRatio = pint64Slice + case "max_map_count": + vm.MaxMapCount = vp.PInt64() + case "memory_failure_early_kill": + vm.MemoryFailureEarlyKill = vp.PInt64() + case "memory_failure_recovery": + vm.MemoryFailureRecovery = vp.PInt64() + case "min_free_kbytes": + vm.MinFreeKbytes = vp.PInt64() + case "min_slab_ratio": + vm.MinSlabRatio = vp.PInt64() + case "min_unmapped_ratio": + vm.MinUnmappedRatio = vp.PInt64() + case "mmap_min_addr": + vm.MmapMinAddr = vp.PInt64() + case "nr_hugepages": + vm.NrHugepages = vp.PInt64() + case "nr_hugepages_mempolicy": + vm.NrHugepagesMempolicy = vp.PInt64() + case "nr_overcommit_hugepages": + vm.NrOvercommitHugepages = vp.PInt64() + case "numa_stat": + vm.NumaStat = vp.PInt64() + case "numa_zonelist_order": + vm.NumaZonelistOrder = value + case "oom_dump_tasks": + vm.OomDumpTasks = vp.PInt64() + case "oom_kill_allocating_task": + vm.OomKillAllocatingTask = vp.PInt64() + case "overcommit_kbytes": + vm.OvercommitKbytes = vp.PInt64() + case "overcommit_memory": + vm.OvercommitMemory = vp.PInt64() + case "overcommit_ratio": + vm.OvercommitRatio = vp.PInt64() + case "page-cluster": + vm.PageCluster = vp.PInt64() + case "panic_on_oom": + vm.PanicOnOom = vp.PInt64() + case "percpu_pagelist_fraction": + vm.PercpuPagelistFraction = vp.PInt64() + case "stat_interval": + vm.StatInterval = vp.PInt64() + case "swappiness": + vm.Swappiness = vp.PInt64() + case "user_reserve_kbytes": + vm.UserReserveKbytes = vp.PInt64() + case "vfs_cache_pressure": + vm.VfsCachePressure = vp.PInt64() + case "watermark_boost_factor": + vm.WatermarkBoostFactor = vp.PInt64() + case "watermark_scale_factor": + vm.WatermarkScaleFactor = vp.PInt64() + case "zone_reclaim_mode": + vm.ZoneReclaimMode = vp.PInt64() + } + if err := vp.Err(); err != nil { + return nil, err + } + } + + return &vm, nil +} diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go index ffe9df50d6..30aa417d53 100644 --- a/vendor/github.com/prometheus/procfs/xfrm.go +++ b/vendor/github.com/prometheus/procfs/xfrm.go @@ -97,7 +97,7 @@ func NewXfrmStat() (XfrmStat, error) { // NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. func (fs FS) NewXfrmStat() (XfrmStat, error) { - file, err := os.Open(fs.Path("net/xfrm_stat")) + file, err := os.Open(fs.proc.Path("net/xfrm_stat")) if err != nil { return XfrmStat{}, err } @@ -113,7 +113,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) { if len(fields) != 2 { return XfrmStat{}, fmt.Errorf( - "couldnt parse %s line %s", file.Name(), s.Text()) + "couldn't parse %s line %s", file.Name(), s.Text()) } name := fields[0] diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go deleted file mode 100644 index 2bc0ef3427..0000000000 --- a/vendor/github.com/prometheus/procfs/xfs/parse.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package xfs - -import ( - "bufio" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ParseStats parses a Stats from an input io.Reader, using the format -// found in /proc/fs/xfs/stat. -func ParseStats(r io.Reader) (*Stats, error) { - const ( - // Fields parsed into stats structures. - fieldExtentAlloc = "extent_alloc" - fieldAbt = "abt" - fieldBlkMap = "blk_map" - fieldBmbt = "bmbt" - fieldDir = "dir" - fieldTrans = "trans" - fieldIg = "ig" - fieldLog = "log" - fieldRw = "rw" - fieldAttr = "attr" - fieldIcluster = "icluster" - fieldVnodes = "vnodes" - fieldBuf = "buf" - fieldXpc = "xpc" - - // Unimplemented at this time due to lack of documentation. - fieldPushAil = "push_ail" - fieldXstrat = "xstrat" - fieldAbtb2 = "abtb2" - fieldAbtc2 = "abtc2" - fieldBmbt2 = "bmbt2" - fieldIbt2 = "ibt2" - fieldFibt2 = "fibt2" - fieldQm = "qm" - fieldDebug = "debug" - ) - - var xfss Stats - - s := bufio.NewScanner(r) - for s.Scan() { - // Expect at least a string label and a single integer value, ex: - // - abt 0 - // - rw 1 2 - ss := strings.Fields(string(s.Bytes())) - if len(ss) < 2 { - continue - } - label := ss[0] - - // Extended precision counters are uint64 values. - if label == fieldXpc { - us, err := util.ParseUint64s(ss[1:]) - if err != nil { - return nil, err - } - - xfss.ExtendedPrecision, err = extendedPrecisionStats(us) - if err != nil { - return nil, err - } - - continue - } - - // All other counters are uint32 values. - us, err := util.ParseUint32s(ss[1:]) - if err != nil { - return nil, err - } - - switch label { - case fieldExtentAlloc: - xfss.ExtentAllocation, err = extentAllocationStats(us) - case fieldAbt: - xfss.AllocationBTree, err = btreeStats(us) - case fieldBlkMap: - xfss.BlockMapping, err = blockMappingStats(us) - case fieldBmbt: - xfss.BlockMapBTree, err = btreeStats(us) - case fieldDir: - xfss.DirectoryOperation, err = directoryOperationStats(us) - case fieldTrans: - xfss.Transaction, err = transactionStats(us) - case fieldIg: - xfss.InodeOperation, err = inodeOperationStats(us) - case fieldLog: - xfss.LogOperation, err = logOperationStats(us) - case fieldRw: - xfss.ReadWrite, err = readWriteStats(us) - case fieldAttr: - xfss.AttributeOperation, err = attributeOperationStats(us) - case fieldIcluster: - xfss.InodeClustering, err = inodeClusteringStats(us) - case fieldVnodes: - xfss.Vnode, err = vnodeStats(us) - case fieldBuf: - xfss.Buffer, err = bufferStats(us) - } - if err != nil { - return nil, err - } - } - - return &xfss, s.Err() -} - -// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s. -func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) { - if l := len(us); l != 4 { - return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l) - } - - return ExtentAllocationStats{ - ExtentsAllocated: us[0], - BlocksAllocated: us[1], - ExtentsFreed: us[2], - BlocksFreed: us[3], - }, nil -} - -// btreeStats builds a BTreeStats from a slice of uint32s. -func btreeStats(us []uint32) (BTreeStats, error) { - if l := len(us); l != 4 { - return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l) - } - - return BTreeStats{ - Lookups: us[0], - Compares: us[1], - RecordsInserted: us[2], - RecordsDeleted: us[3], - }, nil -} - -// BlockMappingStat builds a BlockMappingStats from a slice of uint32s. -func blockMappingStats(us []uint32) (BlockMappingStats, error) { - if l := len(us); l != 7 { - return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l) - } - - return BlockMappingStats{ - Reads: us[0], - Writes: us[1], - Unmaps: us[2], - ExtentListInsertions: us[3], - ExtentListDeletions: us[4], - ExtentListLookups: us[5], - ExtentListCompares: us[6], - }, nil -} - -// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s. -func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) { - if l := len(us); l != 4 { - return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l) - } - - return DirectoryOperationStats{ - Lookups: us[0], - Creates: us[1], - Removes: us[2], - Getdents: us[3], - }, nil -} - -// TransactionStats builds a TransactionStats from a slice of uint32s. -func transactionStats(us []uint32) (TransactionStats, error) { - if l := len(us); l != 3 { - return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l) - } - - return TransactionStats{ - Sync: us[0], - Async: us[1], - Empty: us[2], - }, nil -} - -// InodeOperationStats builds an InodeOperationStats from a slice of uint32s. -func inodeOperationStats(us []uint32) (InodeOperationStats, error) { - if l := len(us); l != 7 { - return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l) - } - - return InodeOperationStats{ - Attempts: us[0], - Found: us[1], - Recycle: us[2], - Missed: us[3], - Duplicate: us[4], - Reclaims: us[5], - AttributeChange: us[6], - }, nil -} - -// LogOperationStats builds a LogOperationStats from a slice of uint32s. -func logOperationStats(us []uint32) (LogOperationStats, error) { - if l := len(us); l != 5 { - return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l) - } - - return LogOperationStats{ - Writes: us[0], - Blocks: us[1], - NoInternalBuffers: us[2], - Force: us[3], - ForceSleep: us[4], - }, nil -} - -// ReadWriteStats builds a ReadWriteStats from a slice of uint32s. -func readWriteStats(us []uint32) (ReadWriteStats, error) { - if l := len(us); l != 2 { - return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l) - } - - return ReadWriteStats{ - Read: us[0], - Write: us[1], - }, nil -} - -// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s. -func attributeOperationStats(us []uint32) (AttributeOperationStats, error) { - if l := len(us); l != 4 { - return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l) - } - - return AttributeOperationStats{ - Get: us[0], - Set: us[1], - Remove: us[2], - List: us[3], - }, nil -} - -// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s. -func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) { - if l := len(us); l != 3 { - return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l) - } - - return InodeClusteringStats{ - Iflush: us[0], - Flush: us[1], - FlushInode: us[2], - }, nil -} - -// VnodeStats builds a VnodeStats from a slice of uint32s. -func vnodeStats(us []uint32) (VnodeStats, error) { - // The attribute "Free" appears to not be available on older XFS - // stats versions. Therefore, 7 or 8 elements may appear in - // this slice. - l := len(us) - if l != 7 && l != 8 { - return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l) - } - - s := VnodeStats{ - Active: us[0], - Allocate: us[1], - Get: us[2], - Hold: us[3], - Release: us[4], - Reclaim: us[5], - Remove: us[6], - } - - // Skip adding free, unless it is present. The zero value will - // be used in place of an actual count. - if l == 7 { - return s, nil - } - - s.Free = us[7] - return s, nil -} - -// BufferStats builds a BufferStats from a slice of uint32s. -func bufferStats(us []uint32) (BufferStats, error) { - if l := len(us); l != 9 { - return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l) - } - - return BufferStats{ - Get: us[0], - Create: us[1], - GetLocked: us[2], - GetLockedWaited: us[3], - BusyLocked: us[4], - MissLocked: us[5], - PageRetries: us[6], - PageFound: us[7], - GetRead: us[8], - }, nil -} - -// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s. -func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) { - if l := len(us); l != 3 { - return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l) - } - - return ExtendedPrecisionStats{ - FlushBytes: us[0], - WriteBytes: us[1], - ReadBytes: us[2], - }, nil -} diff --git a/vendor/github.com/prometheus/procfs/xfs/xfs.go b/vendor/github.com/prometheus/procfs/xfs/xfs.go deleted file mode 100644 index d86794b7ca..0000000000 --- a/vendor/github.com/prometheus/procfs/xfs/xfs.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package xfs provides access to statistics exposed by the XFS filesystem. -package xfs - -// Stats contains XFS filesystem runtime statistics, parsed from -// /proc/fs/xfs/stat. -// -// The names and meanings of each statistic were taken from -// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux -// kernel source. Most counters are uint32s (same data types used in -// xfs_stats.h), but some of the "extended precision stats" are uint64s. -type Stats struct { - // The name of the filesystem used to source these statistics. - // If empty, this indicates aggregated statistics for all XFS - // filesystems on the host. - Name string - - ExtentAllocation ExtentAllocationStats - AllocationBTree BTreeStats - BlockMapping BlockMappingStats - BlockMapBTree BTreeStats - DirectoryOperation DirectoryOperationStats - Transaction TransactionStats - InodeOperation InodeOperationStats - LogOperation LogOperationStats - ReadWrite ReadWriteStats - AttributeOperation AttributeOperationStats - InodeClustering InodeClusteringStats - Vnode VnodeStats - Buffer BufferStats - ExtendedPrecision ExtendedPrecisionStats -} - -// ExtentAllocationStats contains statistics regarding XFS extent allocations. -type ExtentAllocationStats struct { - ExtentsAllocated uint32 - BlocksAllocated uint32 - ExtentsFreed uint32 - BlocksFreed uint32 -} - -// BTreeStats contains statistics regarding an XFS internal B-tree. -type BTreeStats struct { - Lookups uint32 - Compares uint32 - RecordsInserted uint32 - RecordsDeleted uint32 -} - -// BlockMappingStats contains statistics regarding XFS block maps. -type BlockMappingStats struct { - Reads uint32 - Writes uint32 - Unmaps uint32 - ExtentListInsertions uint32 - ExtentListDeletions uint32 - ExtentListLookups uint32 - ExtentListCompares uint32 -} - -// DirectoryOperationStats contains statistics regarding XFS directory entries. -type DirectoryOperationStats struct { - Lookups uint32 - Creates uint32 - Removes uint32 - Getdents uint32 -} - -// TransactionStats contains statistics regarding XFS metadata transactions. -type TransactionStats struct { - Sync uint32 - Async uint32 - Empty uint32 -} - -// InodeOperationStats contains statistics regarding XFS inode operations. -type InodeOperationStats struct { - Attempts uint32 - Found uint32 - Recycle uint32 - Missed uint32 - Duplicate uint32 - Reclaims uint32 - AttributeChange uint32 -} - -// LogOperationStats contains statistics regarding the XFS log buffer. -type LogOperationStats struct { - Writes uint32 - Blocks uint32 - NoInternalBuffers uint32 - Force uint32 - ForceSleep uint32 -} - -// ReadWriteStats contains statistics regarding the number of read and write -// system calls for XFS filesystems. -type ReadWriteStats struct { - Read uint32 - Write uint32 -} - -// AttributeOperationStats contains statistics regarding manipulation of -// XFS extended file attributes. -type AttributeOperationStats struct { - Get uint32 - Set uint32 - Remove uint32 - List uint32 -} - -// InodeClusteringStats contains statistics regarding XFS inode clustering -// operations. -type InodeClusteringStats struct { - Iflush uint32 - Flush uint32 - FlushInode uint32 -} - -// VnodeStats contains statistics regarding XFS vnode operations. -type VnodeStats struct { - Active uint32 - Allocate uint32 - Get uint32 - Hold uint32 - Release uint32 - Reclaim uint32 - Remove uint32 - Free uint32 -} - -// BufferStats contains statistics regarding XFS read/write I/O buffers. -type BufferStats struct { - Get uint32 - Create uint32 - GetLocked uint32 - GetLockedWaited uint32 - BusyLocked uint32 - MissLocked uint32 - PageRetries uint32 - PageFound uint32 - GetRead uint32 -} - -// ExtendedPrecisionStats contains high precision counters used to track the -// total number of bytes read, written, or flushed, during XFS operations. -type ExtendedPrecisionStats struct { - FlushBytes uint64 - WriteBytes uint64 - ReadBytes uint64 -} diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go new file mode 100644 index 0000000000..e941503d5c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -0,0 +1,196 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "regexp" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Zoneinfo holds info parsed from /proc/zoneinfo. +type Zoneinfo struct { + Node string + Zone string + NrFreePages *int64 + Min *int64 + Low *int64 + High *int64 + Scanned *int64 + Spanned *int64 + Present *int64 + Managed *int64 + NrActiveAnon *int64 + NrInactiveAnon *int64 + NrIsolatedAnon *int64 + NrAnonPages *int64 + NrAnonTransparentHugepages *int64 + NrActiveFile *int64 + NrInactiveFile *int64 + NrIsolatedFile *int64 + NrFilePages *int64 + NrSlabReclaimable *int64 + NrSlabUnreclaimable *int64 + NrMlockStack *int64 + NrKernelStack *int64 + NrMapped *int64 + NrDirty *int64 + NrWriteback *int64 + NrUnevictable *int64 + NrShmem *int64 + NrDirtied *int64 + NrWritten *int64 + NumaHit *int64 + NumaMiss *int64 + NumaForeign *int64 + NumaInterleave *int64 + NumaLocal *int64 + NumaOther *int64 + Protection []*int64 +} + +var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) + +// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of +// structs containing the relevant info. More information available here: +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +func (fs FS) Zoneinfo() ([]Zoneinfo, error) { + data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo")) + if err != nil { + return nil, fmt.Errorf("error reading zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err) + } + zoneinfo, err := parseZoneinfo(data) + if err != nil { + return nil, fmt.Errorf("error parsing zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err) + } + return zoneinfo, nil +} + +func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) { + + zoneinfo := []Zoneinfo{} + + zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode")) + for _, block := range zoneinfoBlocks { + var zoneinfoElement Zoneinfo + lines := strings.Split(string(block), "\n") + for _, line := range lines { + + if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil { + zoneinfoElement.Node = nodeZone[1] + zoneinfoElement.Zone = nodeZone[2] + continue + } + if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") { + zoneinfoElement.Zone = "" + continue + } + parts := strings.Fields(strings.TrimSpace(line)) + if len(parts) < 2 { + continue + } + vp := util.NewValueParser(parts[1]) + switch parts[0] { + case "nr_free_pages": + zoneinfoElement.NrFreePages = vp.PInt64() + case "min": + zoneinfoElement.Min = vp.PInt64() + case "low": + zoneinfoElement.Low = vp.PInt64() + case "high": + zoneinfoElement.High = vp.PInt64() + case "scanned": + zoneinfoElement.Scanned = vp.PInt64() + case "spanned": + zoneinfoElement.Spanned = vp.PInt64() + case "present": + zoneinfoElement.Present = vp.PInt64() + case "managed": + zoneinfoElement.Managed = vp.PInt64() + case "nr_active_anon": + zoneinfoElement.NrActiveAnon = vp.PInt64() + case "nr_inactive_anon": + zoneinfoElement.NrInactiveAnon = vp.PInt64() + case "nr_isolated_anon": + zoneinfoElement.NrIsolatedAnon = vp.PInt64() + case "nr_anon_pages": + zoneinfoElement.NrAnonPages = vp.PInt64() + case "nr_anon_transparent_hugepages": + zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64() + case "nr_active_file": + zoneinfoElement.NrActiveFile = vp.PInt64() + case "nr_inactive_file": + zoneinfoElement.NrInactiveFile = vp.PInt64() + case "nr_isolated_file": + zoneinfoElement.NrIsolatedFile = vp.PInt64() + case "nr_file_pages": + zoneinfoElement.NrFilePages = vp.PInt64() + case "nr_slab_reclaimable": + zoneinfoElement.NrSlabReclaimable = vp.PInt64() + case "nr_slab_unreclaimable": + zoneinfoElement.NrSlabUnreclaimable = vp.PInt64() + case "nr_mlock_stack": + zoneinfoElement.NrMlockStack = vp.PInt64() + case "nr_kernel_stack": + zoneinfoElement.NrKernelStack = vp.PInt64() + case "nr_mapped": + zoneinfoElement.NrMapped = vp.PInt64() + case "nr_dirty": + zoneinfoElement.NrDirty = vp.PInt64() + case "nr_writeback": + zoneinfoElement.NrWriteback = vp.PInt64() + case "nr_unevictable": + zoneinfoElement.NrUnevictable = vp.PInt64() + case "nr_shmem": + zoneinfoElement.NrShmem = vp.PInt64() + case "nr_dirtied": + zoneinfoElement.NrDirtied = vp.PInt64() + case "nr_written": + zoneinfoElement.NrWritten = vp.PInt64() + case "numa_hit": + zoneinfoElement.NumaHit = vp.PInt64() + case "numa_miss": + zoneinfoElement.NumaMiss = vp.PInt64() + case "numa_foreign": + zoneinfoElement.NumaForeign = vp.PInt64() + case "numa_interleave": + zoneinfoElement.NumaInterleave = vp.PInt64() + case "numa_local": + zoneinfoElement.NumaLocal = vp.PInt64() + case "numa_other": + zoneinfoElement.NumaOther = vp.PInt64() + case "protection:": + protectionParts := strings.Split(line, ":") + protectionValues := strings.Replace(protectionParts[1], "(", "", 1) + protectionValues = strings.Replace(protectionValues, ")", "", 1) + protectionValues = strings.TrimSpace(protectionValues) + protectionStringMap := strings.Split(protectionValues, ", ") + val, err := util.ParsePInt64s(protectionStringMap) + if err == nil { + zoneinfoElement.Protection = val + } + } + + } + + zoneinfo = append(zoneinfo, zoneinfoElement) + } + return zoneinfo, nil +} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go index 7c498e90d9..a2ecf5c325 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go @@ -42,10 +42,14 @@ type Cipher struct { // The last len bytes of buf are leftover key stream bytes from the previous // XORKeyStream invocation. The size of buf depends on how many blocks are - // computed at a time. + // computed at a time by xorKeyStreamBlocks. buf [bufSize]byte len int + // overflow is set when the counter overflowed, no more blocks can be + // generated, and the next XORKeyStream call should panic. + overflow bool + // The counter-independent results of the first round are cached after they // are computed the first time. precompDone bool @@ -89,6 +93,7 @@ func newUnauthenticatedCipher(c *Cipher, key, nonce []byte) (*Cipher, error) { return nil, errors.New("chacha20: wrong nonce size") } + key, nonce = key[:KeySize], nonce[:NonceSize] // bounds check elimination hint c.key = [8]uint32{ binary.LittleEndian.Uint32(key[0:4]), binary.LittleEndian.Uint32(key[4:8]), @@ -139,15 +144,18 @@ func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) { // SetCounter sets the Cipher counter. The next invocation of XORKeyStream will // behave as if (64 * counter) bytes had been encrypted so far. // -// To prevent accidental counter reuse, SetCounter panics if counter is -// less than the current value. +// To prevent accidental counter reuse, SetCounter panics if counter is less +// than the current value. +// +// Note that the execution time of XORKeyStream is not independent of the +// counter value. func (s *Cipher) SetCounter(counter uint32) { // Internally, s may buffer multiple blocks, which complicates this // implementation slightly. When checking whether the counter has rolled // back, we must use both s.counter and s.len to determine how many blocks // we have already output. outputCounter := s.counter - uint32(s.len)/blockSize - if counter < outputCounter { + if s.overflow || counter < outputCounter { panic("chacha20: SetCounter attempted to rollback counter") } @@ -196,34 +204,52 @@ func (s *Cipher) XORKeyStream(dst, src []byte) { dst[i] = src[i] ^ b } s.len -= len(keyStream) - src = src[len(keyStream):] - dst = dst[len(keyStream):] + dst, src = dst[len(keyStream):], src[len(keyStream):] + } + if len(src) == 0 { + return } - const blocksPerBuf = bufSize / blockSize - numBufs := (uint64(len(src)) + bufSize - 1) / bufSize - if uint64(s.counter)+numBufs*blocksPerBuf >= 1<<32 { + // If we'd need to let the counter overflow and keep generating output, + // panic immediately. If instead we'd only reach the last block, remember + // not to generate any more output after the buffer is drained. + numBlocks := (uint64(len(src)) + blockSize - 1) / blockSize + if s.overflow || uint64(s.counter)+numBlocks > 1<<32 { panic("chacha20: counter overflow") + } else if uint64(s.counter)+numBlocks == 1<<32 { + s.overflow = true } // xorKeyStreamBlocks implementations expect input lengths that are a // multiple of bufSize. Platform-specific ones process multiple blocks at a // time, so have bufSizes that are a multiple of blockSize. - rem := len(src) % bufSize - full := len(src) - rem - + full := len(src) - len(src)%bufSize if full > 0 { s.xorKeyStreamBlocks(dst[:full], src[:full]) } + dst, src = dst[full:], src[full:] + + // If using a multi-block xorKeyStreamBlocks would overflow, use the generic + // one that does one block at a time. + const blocksPerBuf = bufSize / blockSize + if uint64(s.counter)+blocksPerBuf > 1<<32 { + s.buf = [bufSize]byte{} + numBlocks := (len(src) + blockSize - 1) / blockSize + buf := s.buf[bufSize-numBlocks*blockSize:] + copy(buf, src) + s.xorKeyStreamBlocksGeneric(buf, buf) + s.len = len(buf) - copy(dst, buf) + return + } // If we have a partial (multi-)block, pad it for xorKeyStreamBlocks, and // keep the leftover keystream for the next XORKeyStream invocation. - if rem > 0 { + if len(src) > 0 { s.buf = [bufSize]byte{} - copy(s.buf[:], src[full:]) + copy(s.buf[:], src) s.xorKeyStreamBlocks(s.buf[:], s.buf[:]) - s.len = bufSize - copy(dst[full:], s.buf[:]) + s.len = bufSize - copy(dst, s.buf[:]) } } @@ -260,7 +286,9 @@ func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) { s.precompDone = true } - for i := 0; i < len(src); i += blockSize { + // A condition of len(src) > 0 would be sufficient, but this also + // acts as a bounds check elimination hint. + for len(src) >= 64 && len(dst) >= 64 { // The remainder of the first column round. fcr0, fcr4, fcr8, fcr12 := quarterRound(c0, c4, c8, s.counter) @@ -285,49 +313,28 @@ func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) { x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) } - // Finally, add back the initial state to generate the key stream. - x0 += c0 - x1 += c1 - x2 += c2 - x3 += c3 - x4 += c4 - x5 += c5 - x6 += c6 - x7 += c7 - x8 += c8 - x9 += c9 - x10 += c10 - x11 += c11 - x12 += s.counter - x13 += c13 - x14 += c14 - x15 += c15 + // Add back the initial state to generate the key stream, then + // XOR the key stream with the source and write out the result. + addXor(dst[0:4], src[0:4], x0, c0) + addXor(dst[4:8], src[4:8], x1, c1) + addXor(dst[8:12], src[8:12], x2, c2) + addXor(dst[12:16], src[12:16], x3, c3) + addXor(dst[16:20], src[16:20], x4, c4) + addXor(dst[20:24], src[20:24], x5, c5) + addXor(dst[24:28], src[24:28], x6, c6) + addXor(dst[28:32], src[28:32], x7, c7) + addXor(dst[32:36], src[32:36], x8, c8) + addXor(dst[36:40], src[36:40], x9, c9) + addXor(dst[40:44], src[40:44], x10, c10) + addXor(dst[44:48], src[44:48], x11, c11) + addXor(dst[48:52], src[48:52], x12, s.counter) + addXor(dst[52:56], src[52:56], x13, c13) + addXor(dst[56:60], src[56:60], x14, c14) + addXor(dst[60:64], src[60:64], x15, c15) s.counter += 1 - if s.counter == 0 { - panic("chacha20: internal error: counter overflow") - } - in, out := src[i:], dst[i:] - in, out = in[:blockSize], out[:blockSize] // bounds check elimination hint - - // XOR the key stream with the source and write out the result. - xor(out[0:], in[0:], x0) - xor(out[4:], in[4:], x1) - xor(out[8:], in[8:], x2) - xor(out[12:], in[12:], x3) - xor(out[16:], in[16:], x4) - xor(out[20:], in[20:], x5) - xor(out[24:], in[24:], x6) - xor(out[28:], in[28:], x7) - xor(out[32:], in[32:], x8) - xor(out[36:], in[36:], x9) - xor(out[40:], in[40:], x10) - xor(out[44:], in[44:], x11) - xor(out[48:], in[48:], x12) - xor(out[52:], in[52:], x13) - xor(out[56:], in[56:], x14) - xor(out[60:], in[60:], x15) + src, dst = src[blockSize:], dst[blockSize:] } } diff --git a/vendor/golang.org/x/crypto/chacha20/xor.go b/vendor/golang.org/x/crypto/chacha20/xor.go index 0110c9865a..c2d04851e0 100644 --- a/vendor/golang.org/x/crypto/chacha20/xor.go +++ b/vendor/golang.org/x/crypto/chacha20/xor.go @@ -13,10 +13,10 @@ const unaligned = runtime.GOARCH == "386" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x" -// xor reads a little endian uint32 from src, XORs it with u and +// addXor reads a little endian uint32 from src, XORs it with (a + b) and // places the result in little endian byte order in dst. -func xor(dst, src []byte, u uint32) { - _, _ = src[3], dst[3] // eliminate bounds checks +func addXor(dst, src []byte, a, b uint32) { + _, _ = src[3], dst[3] // bounds check elimination hint if unaligned { // The compiler should optimize this code into // 32-bit unaligned little endian loads and stores. @@ -27,15 +27,16 @@ func xor(dst, src []byte, u uint32) { v |= uint32(src[1]) << 8 v |= uint32(src[2]) << 16 v |= uint32(src[3]) << 24 - v ^= u + v ^= a + b dst[0] = byte(v) dst[1] = byte(v >> 8) dst[2] = byte(v >> 16) dst[3] = byte(v >> 24) } else { - dst[0] = src[0] ^ byte(u) - dst[1] = src[1] ^ byte(u>>8) - dst[2] = src[2] ^ byte(u>>16) - dst[3] = src[3] ^ byte(u>>24) + a += b + dst[0] = src[0] ^ byte(a) + dst[1] = src[1] ^ byte(a>>8) + dst[2] = src[2] ^ byte(a>>16) + dst[3] = src[3] ^ byte(a>>24) } } diff --git a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go index b0c2cd0561..d118f30ed5 100644 --- a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go +++ b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go @@ -2,10 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64,!ppc64le gccgo purego +// +build !amd64,!ppc64le,!s390x gccgo purego package poly1305 type mac struct{ macGeneric } - -func newMAC(key *[32]byte) mac { return mac{newMACGeneric(key)} } diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/poly1305/poly1305.go index 066159b797..9d7a6af09f 100644 --- a/vendor/golang.org/x/crypto/poly1305/poly1305.go +++ b/vendor/golang.org/x/crypto/poly1305/poly1305.go @@ -26,7 +26,9 @@ const TagSize = 16 // 16-byte result into out. Authenticating two different messages with the same // key allows an attacker to forge messages at will. func Sum(out *[16]byte, m []byte, key *[32]byte) { - sum(out, m, key) + h := New(key) + h.Write(m) + h.Sum(out[:0]) } // Verify returns true if mac is a valid authenticator for m with the given key. @@ -46,10 +48,9 @@ func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { // two different messages with the same key allows an attacker // to forge messages at will. func New(key *[32]byte) *MAC { - return &MAC{ - mac: newMAC(key), - finalized: false, - } + m := &MAC{} + initialize(key, &m.macState) + return m } // MAC is an io.Writer computing an authentication tag @@ -58,7 +59,7 @@ func New(key *[32]byte) *MAC { // MAC cannot be used like common hash.Hash implementations, // because using a poly1305 key twice breaks its security. // Therefore writing data to a running MAC after calling -// Sum causes it to panic. +// Sum or Verify causes it to panic. type MAC struct { mac // platform-dependent implementation @@ -71,10 +72,10 @@ func (h *MAC) Size() int { return TagSize } // Write adds more data to the running message authentication code. // It never returns an error. // -// It must not be called after the first call of Sum. +// It must not be called after the first call of Sum or Verify. func (h *MAC) Write(p []byte) (n int, err error) { if h.finalized { - panic("poly1305: write to MAC after Sum") + panic("poly1305: write to MAC after Sum or Verify") } return h.mac.Write(p) } @@ -87,3 +88,12 @@ func (h *MAC) Sum(b []byte) []byte { h.finalized = true return append(b, mac[:]...) } + +// Verify returns whether the authenticator of all data written to +// the message authentication code matches the expected value. +func (h *MAC) Verify(expected []byte) bool { + var mac [TagSize]byte + h.mac.Sum(&mac) + h.finalized = true + return subtle.ConstantTimeCompare(expected, mac[:]) == 1 +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go index 35b9e38c90..99e5a1d50e 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go @@ -9,17 +9,6 @@ package poly1305 //go:noescape func update(state *macState, msg []byte) -func sum(out *[16]byte, m []byte, key *[32]byte) { - h := newMAC(key) - h.Write(m) - h.Sum(out) -} - -func newMAC(key *[32]byte) (h mac) { - initialize(key, &h.r, &h.s) - return -} - // mac is a wrapper for macGeneric that redirects calls that would have gone to // updateGeneric to update. // diff --git a/vendor/golang.org/x/crypto/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/poly1305/sum_generic.go index 1187eab78f..c942a65904 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_generic.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_generic.go @@ -31,16 +31,18 @@ func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { h.Sum(out) } -func newMACGeneric(key *[32]byte) (h macGeneric) { - initialize(key, &h.r, &h.s) - return +func newMACGeneric(key *[32]byte) macGeneric { + m := macGeneric{} + initialize(key, &m.macState) + return m } // macState holds numbers in saturated 64-bit little-endian limbs. That is, // the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸. type macState struct { // h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but - // can grow larger during and after rounds. + // can grow larger during and after rounds. It must, however, remain below + // 2 * (2¹³⁰ - 5). h [3]uint64 // r and s are the private key components. r [2]uint64 @@ -97,11 +99,12 @@ const ( rMask1 = 0x0FFFFFFC0FFFFFFC ) -func initialize(key *[32]byte, r, s *[2]uint64) { - r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0 - r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1 - s[0] = binary.LittleEndian.Uint64(key[16:24]) - s[1] = binary.LittleEndian.Uint64(key[24:32]) +// initialize loads the 256-bit key into the two 128-bit secret values r and s. +func initialize(key *[32]byte, m *macState) { + m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0 + m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1 + m.s[0] = binary.LittleEndian.Uint64(key[16:24]) + m.s[1] = binary.LittleEndian.Uint64(key[24:32]) } // uint128 holds a 128-bit number as two 64-bit limbs, for use with the diff --git a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go deleted file mode 100644 index 2e3ae34c7d..0000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build s390x,!go1.11 !amd64,!s390x,!ppc64le gccgo purego - -package poly1305 - -func sum(out *[TagSize]byte, msg []byte, key *[32]byte) { - h := newMAC(key) - h.Write(msg) - h.Sum(out) -} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go index 92597bb8c2..2e7a120b19 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go @@ -9,17 +9,6 @@ package poly1305 //go:noescape func update(state *macState, msg []byte) -func sum(out *[16]byte, m []byte, key *[32]byte) { - h := newMAC(key) - h.Write(m) - h.Sum(out) -} - -func newMAC(key *[32]byte) (h mac) { - initialize(key, &h.r, &h.s) - return -} - // mac is a wrapper for macGeneric that redirects calls that would have gone to // updateGeneric to update. // diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go index 5f91ff84a9..958fedc079 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.11,!gccgo,!purego +// +build !gccgo,!purego package poly1305 @@ -10,30 +10,66 @@ import ( "golang.org/x/sys/cpu" ) -// poly1305vx is an assembly implementation of Poly1305 that uses vector +// updateVX is an assembly implementation of Poly1305 that uses vector // instructions. It must only be called if the vector facility (vx) is // available. //go:noescape -func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]byte) +func updateVX(state *macState, msg []byte) -// poly1305vmsl is an assembly implementation of Poly1305 that uses vector -// instructions, including VMSL. It must only be called if the vector facility (vx) is -// available and if VMSL is supported. -//go:noescape -func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]byte) +// mac is a replacement for macGeneric that uses a larger buffer and redirects +// calls that would have gone to updateGeneric to updateVX if the vector +// facility is installed. +// +// A larger buffer is required for good performance because the vector +// implementation has a higher fixed cost per call than the generic +// implementation. +type mac struct { + macState -func sum(out *[16]byte, m []byte, key *[32]byte) { - if cpu.S390X.HasVX { - var mPtr *byte - if len(m) > 0 { - mPtr = &m[0] - } - if cpu.S390X.HasVXE && len(m) > 256 { - poly1305vmsl(out, mPtr, uint64(len(m)), key) - } else { - poly1305vx(out, mPtr, uint64(len(m)), key) - } - } else { - sumGeneric(out, m, key) - } + buffer [16 * TagSize]byte // size must be a multiple of block size (16) + offset int +} + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < len(h.buffer) { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + if cpu.S390X.HasVX { + updateVX(&h.macState, h.buffer[:]) + } else { + updateGeneric(&h.macState, h.buffer[:]) + } + } + + tail := len(p) % len(h.buffer) // number of bytes to copy into buffer + body := len(p) - tail // number of bytes to process now + if body > 0 { + if cpu.S390X.HasVX { + updateVX(&h.macState, p[:body]) + } else { + updateGeneric(&h.macState, p[:body]) + } + } + h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0 + return nn, nil +} + +func (h *mac) Sum(out *[TagSize]byte) { + state := h.macState + remainder := h.buffer[:h.offset] + + // Use the generic implementation if we have 2 or fewer blocks left + // to sum. The vector implementation has a higher startup time. + if cpu.S390X.HasVX && len(remainder) > 2*TagSize { + updateVX(&state, remainder) + } else if len(remainder) > 0 { + updateGeneric(&state, remainder) + } + finalize(out, &state.h, &state.s) } diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s index 806d1694b0..0fa9ee6e0b 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s @@ -2,115 +2,187 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.11,!gccgo,!purego +// +build !gccgo,!purego #include "textflag.h" -// Implementation of Poly1305 using the vector facility (vx). +// This implementation of Poly1305 uses the vector facility (vx) +// to process up to 2 blocks (32 bytes) per iteration using an +// algorithm based on the one described in: +// +// NEON crypto, Daniel J. Bernstein & Peter Schwabe +// https://cryptojedi.org/papers/neoncrypto-20120320.pdf +// +// This algorithm uses 5 26-bit limbs to represent a 130-bit +// value. These limbs are, for the most part, zero extended and +// placed into 64-bit vector register elements. Each vector +// register is 128-bits wide and so holds 2 of these elements. +// Using 26-bit limbs allows us plenty of headroom to accomodate +// accumulations before and after multiplication without +// overflowing either 32-bits (before multiplication) or 64-bits +// (after multiplication). +// +// In order to parallelise the operations required to calculate +// the sum we use two separate accumulators and then sum those +// in an extra final step. For compatibility with the generic +// implementation we perform this summation at the end of every +// updateVX call. +// +// To use two accumulators we must multiply the message blocks +// by r² rather than r. Only the final message block should be +// multiplied by r. +// +// Example: +// +// We want to calculate the sum (h) for a 64 byte message (m): +// +// h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r +// +// To do this we split the calculation into the even indices +// and odd indices of the message. These form our SIMD 'lanes': +// +// h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0 +// m[16:32]r³ + m[48:64]r <- lane 1 +// +// To calculate this iteratively we refactor so that both lanes +// are written in terms of r² and r: +// +// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0 +// (m[16:32]r² + m[48:64])r <- lane 1 +// ^ ^ +// | coefficients for second iteration +// coefficients for first iteration +// +// So in this case we would have two iterations. In the first +// both lanes are multiplied by r². In the second only the +// first lane is multiplied by r² and the second lane is +// instead multiplied by r. This gives use the odd and even +// powers of r that we need from the original equation. +// +// Notation: +// +// h - accumulator +// r - key +// m - message +// +// [a, b] - SIMD register holding two 64-bit values +// [a, b, c, d] - SIMD register holding four 32-bit values +// xᵢ[n] - limb n of variable x with bit width i +// +// Limbs are expressed in little endian order, so for 26-bit +// limbs x₂₆[4] will be the most significant limb and x₂₆[0] +// will be the least significant limb. -// constants -#define MOD26 V0 -#define EX0 V1 -#define EX1 V2 -#define EX2 V3 +// masking constants +#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits +#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits -// temporaries -#define T_0 V4 -#define T_1 V5 -#define T_2 V6 -#define T_3 V7 -#define T_4 V8 +// expansion constants (see EXPAND macro) +#define EX0 V2 +#define EX1 V3 +#define EX2 V4 -// key (r) -#define R_0 V9 -#define R_1 V10 -#define R_2 V11 -#define R_3 V12 -#define R_4 V13 -#define R5_1 V14 -#define R5_2 V15 -#define R5_3 V16 -#define R5_4 V17 -#define RSAVE_0 R5 -#define RSAVE_1 R6 -#define RSAVE_2 R7 -#define RSAVE_3 R8 -#define RSAVE_4 R9 -#define R5SAVE_1 V28 -#define R5SAVE_2 V29 -#define R5SAVE_3 V30 -#define R5SAVE_4 V31 +// key (r², r or 1 depending on context) +#define R_0 V5 +#define R_1 V6 +#define R_2 V7 +#define R_3 V8 +#define R_4 V9 -// message block -#define F_0 V18 -#define F_1 V19 -#define F_2 V20 -#define F_3 V21 -#define F_4 V22 +// precalculated coefficients (5r², 5r or 0 depending on context) +#define R5_1 V10 +#define R5_2 V11 +#define R5_3 V12 +#define R5_4 V13 -// accumulator -#define H_0 V23 -#define H_1 V24 -#define H_2 V25 -#define H_3 V26 -#define H_4 V27 +// message block (m) +#define M_0 V14 +#define M_1 V15 +#define M_2 V16 +#define M_3 V17 +#define M_4 V18 -GLOBL ·keyMask<>(SB), RODATA, $16 -DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f -DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f +// accumulator (h) +#define H_0 V19 +#define H_1 V20 +#define H_2 V21 +#define H_3 V22 +#define H_4 V23 -GLOBL ·bswapMask<>(SB), RODATA, $16 -DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908 -DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100 +// temporary registers (for short-lived values) +#define T_0 V24 +#define T_1 V25 +#define T_2 V26 +#define T_3 V27 +#define T_4 V28 -GLOBL ·constants<>(SB), RODATA, $64 -// MOD26 -DATA ·constants<>+0(SB)/8, $0x3ffffff -DATA ·constants<>+8(SB)/8, $0x3ffffff +GLOBL ·constants<>(SB), RODATA, $0x30 // EX0 -DATA ·constants<>+16(SB)/8, $0x0006050403020100 -DATA ·constants<>+24(SB)/8, $0x1016151413121110 +DATA ·constants<>+0x00(SB)/8, $0x0006050403020100 +DATA ·constants<>+0x08(SB)/8, $0x1016151413121110 // EX1 -DATA ·constants<>+32(SB)/8, $0x060c0b0a09080706 -DATA ·constants<>+40(SB)/8, $0x161c1b1a19181716 +DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706 +DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716 // EX2 -DATA ·constants<>+48(SB)/8, $0x0d0d0d0d0d0f0e0d -DATA ·constants<>+56(SB)/8, $0x1d1d1d1d1d1f1e1d +DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d +DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d -// h = (f*g) % (2**130-5) [partial reduction] +// MULTIPLY multiplies each lane of f and g, partially reduced +// modulo 2¹³⁰ - 5. The result, h, consists of partial products +// in each lane that need to be reduced further to produce the +// final result. +// +// h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰ +// +// Note that the multiplication by 5 of the high bits is +// achieved by precalculating the multiplication of four of the +// g coefficients by 5. These are g51-g54. #define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \ VMLOF f0, g0, h0 \ - VMLOF f0, g1, h1 \ - VMLOF f0, g2, h2 \ VMLOF f0, g3, h3 \ + VMLOF f0, g1, h1 \ VMLOF f0, g4, h4 \ + VMLOF f0, g2, h2 \ VMLOF f1, g54, T_0 \ - VMLOF f1, g0, T_1 \ - VMLOF f1, g1, T_2 \ VMLOF f1, g2, T_3 \ + VMLOF f1, g0, T_1 \ VMLOF f1, g3, T_4 \ + VMLOF f1, g1, T_2 \ VMALOF f2, g53, h0, h0 \ - VMALOF f2, g54, h1, h1 \ - VMALOF f2, g0, h2, h2 \ VMALOF f2, g1, h3, h3 \ + VMALOF f2, g54, h1, h1 \ VMALOF f2, g2, h4, h4 \ + VMALOF f2, g0, h2, h2 \ VMALOF f3, g52, T_0, T_0 \ - VMALOF f3, g53, T_1, T_1 \ - VMALOF f3, g54, T_2, T_2 \ VMALOF f3, g0, T_3, T_3 \ + VMALOF f3, g53, T_1, T_1 \ VMALOF f3, g1, T_4, T_4 \ + VMALOF f3, g54, T_2, T_2 \ VMALOF f4, g51, h0, h0 \ - VMALOF f4, g52, h1, h1 \ - VMALOF f4, g53, h2, h2 \ VMALOF f4, g54, h3, h3 \ + VMALOF f4, g52, h1, h1 \ VMALOF f4, g0, h4, h4 \ + VMALOF f4, g53, h2, h2 \ VAG T_0, h0, h0 \ - VAG T_1, h1, h1 \ - VAG T_2, h2, h2 \ VAG T_3, h3, h3 \ - VAG T_4, h4, h4 + VAG T_1, h1, h1 \ + VAG T_4, h4, h4 \ + VAG T_2, h2, h2 -// carry h0->h1 h3->h4, h1->h2 h4->h0, h0->h1 h2->h3, h3->h4 +// REDUCE performs the following carry operations in four +// stages, as specified in Bernstein & Schwabe: +// +// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4] +// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0] +// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3] +// 4: h₂₆[3]->h₂₆[4] +// +// The result is that all of the limbs are limited to 26-bits +// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits. +// +// Note that although each limb is aligned at 26-bit intervals +// they may contain values that exceed 2²⁶ - 1, hence the need +// to carry the excess bits in each limb. #define REDUCE(h0, h1, h2, h3, h4) \ VESRLG $26, h0, T_0 \ VESRLG $26, h3, T_1 \ @@ -136,144 +208,155 @@ DATA ·constants<>+56(SB)/8, $0x1d1d1d1d1d1f1e1d VN MOD26, h3, h3 \ VAG T_2, h4, h4 -// expand in0 into d[0] and in1 into d[1] +// EXPAND splits the 128-bit little-endian values in0 and in1 +// into 26-bit big-endian limbs and places the results into +// the first and second lane of d₂₆[0:4] respectively. +// +// The EX0, EX1 and EX2 constants are arrays of byte indices +// for permutation. The permutation both reverses the bytes +// in the input and ensures the bytes are copied into the +// destination limb ready to be shifted into their final +// position. #define EXPAND(in0, in1, d0, d1, d2, d3, d4) \ - VGBM $0x0707, d1 \ // d1=tmp - VPERM in0, in1, EX2, d4 \ VPERM in0, in1, EX0, d0 \ VPERM in0, in1, EX1, d2 \ - VN d1, d4, d4 \ + VPERM in0, in1, EX2, d4 \ VESRLG $26, d0, d1 \ VESRLG $30, d2, d3 \ VESRLG $4, d2, d2 \ - VN MOD26, d0, d0 \ - VN MOD26, d1, d1 \ - VN MOD26, d2, d2 \ - VN MOD26, d3, d3 + VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]] + VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]] + VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]] + VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]] + VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]] -// pack h4:h0 into h1:h0 (no carry) -#define PACK(h0, h1, h2, h3, h4) \ - VESLG $26, h1, h1 \ - VESLG $26, h3, h3 \ - VO h0, h1, h0 \ - VO h2, h3, h2 \ - VESLG $4, h2, h2 \ - VLEIB $7, $48, h1 \ - VSLB h1, h2, h2 \ - VO h0, h2, h0 \ - VLEIB $7, $104, h1 \ - VSLB h1, h4, h3 \ - VO h3, h0, h0 \ - VLEIB $7, $24, h1 \ - VSRLB h1, h4, h1 +// func updateVX(state *macState, msg []byte) +TEXT ·updateVX(SB), NOSPLIT, $0 + MOVD state+0(FP), R1 + LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len -// if h > 2**130-5 then h -= 2**130-5 -#define MOD(h0, h1, t0, t1, t2) \ - VZERO t0 \ - VLEIG $1, $5, t0 \ - VACCQ h0, t0, t1 \ - VAQ h0, t0, t0 \ - VONE t2 \ - VLEIG $1, $-4, t2 \ - VAQ t2, t1, t1 \ - VACCQ h1, t1, t1 \ - VONE t2 \ - VAQ t2, t1, t1 \ - VN h0, t1, t2 \ - VNC t0, t1, t1 \ - VO t1, t2, h0 - -// func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]key) -TEXT ·poly1305vx(SB), $0-32 - // This code processes up to 2 blocks (32 bytes) per iteration - // using the algorithm described in: - // NEON crypto, Daniel J. Bernstein & Peter Schwabe - // https://cryptojedi.org/papers/neoncrypto-20120320.pdf - LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key - - // load MOD26, EX0, EX1 and EX2 + // load EX0, EX1 and EX2 MOVD $·constants<>(SB), R5 - VLM (R5), MOD26, EX2 + VLM (R5), EX0, EX2 - // setup r - VL (R4), T_0 - MOVD $·keyMask<>(SB), R6 - VL (R6), T_1 - VN T_0, T_1, T_0 - EXPAND(T_0, T_0, R_0, R_1, R_2, R_3, R_4) + // generate masks + VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff] + VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff] - // setup r*5 - VLEIG $0, $5, T_0 - VLEIG $1, $5, T_0 + // load h (accumulator) and r (key) from state + VZERO T_1 // [0, 0] + VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]] + VLEG $0, 16(R1), T_1 // [h₆₄[2], 0] + VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]] + VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]] + VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]] - // store r (for final block) - VMLOF T_0, R_1, R5SAVE_1 - VMLOF T_0, R_2, R5SAVE_2 - VMLOF T_0, R_3, R5SAVE_3 - VMLOF T_0, R_4, R5SAVE_4 - VLGVG $0, R_0, RSAVE_0 - VLGVG $0, R_1, RSAVE_1 - VLGVG $0, R_2, RSAVE_2 - VLGVG $0, R_3, RSAVE_3 - VLGVG $0, R_4, RSAVE_4 + // unpack h and r into 26-bit limbs + // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value + VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]] + VZERO H_1 // [0, 0] + VZERO H_3 // [0, 0] + VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out + VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0] + VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]] + VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only + VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]] + VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only + VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete + VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete - // skip r**2 calculation + // replicate r across all 4 vector elements + VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]] + VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]] + VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]] + VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]] + VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]] + + // zero out lane 1 of h + VLEIG $1, $0, H_0 // [h₂₆[0], 0] + VLEIG $1, $0, H_1 // [h₂₆[1], 0] + VLEIG $1, $0, H_2 // [h₂₆[2], 0] + VLEIG $1, $0, H_3 // [h₂₆[3], 0] + VLEIG $1, $0, H_4 // [h₂₆[4], 0] + + // calculate 5r (ignore least significant limb) + VREPIF $5, T_0 + VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]] + VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]] + VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]] + VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]] + + // skip r² calculation if we are only calculating one block CMPBLE R3, $16, skip - // calculate r**2 - MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5SAVE_1, R5SAVE_2, R5SAVE_3, R5SAVE_4, H_0, H_1, H_2, H_3, H_4) - REDUCE(H_0, H_1, H_2, H_3, H_4) - VLEIG $0, $5, T_0 - VLEIG $1, $5, T_0 - VMLOF T_0, H_1, R5_1 - VMLOF T_0, H_2, R5_2 - VMLOF T_0, H_3, R5_3 - VMLOF T_0, H_4, R5_4 - VLR H_0, R_0 - VLR H_1, R_1 - VLR H_2, R_2 - VLR H_3, R_3 - VLR H_4, R_4 + // calculate r² + MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4) + REDUCE(M_0, M_1, M_2, M_3, M_4) + VGBM $0x0f0f, T_0 + VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]] + VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]] + VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]] + VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]] + VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]] - // initialize h - VZERO H_0 - VZERO H_1 - VZERO H_2 - VZERO H_3 - VZERO H_4 + // calculate 5r² (ignore least significant limb) + VREPIF $5, T_0 + VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]] + VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]] + VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]] + VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]] loop: - CMPBLE R3, $32, b2 - VLM (R2), T_0, T_1 - SUB $32, R3 - MOVD $32(R2), R2 - EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4) - VLEIB $4, $1, F_4 - VLEIB $12, $1, F_4 + CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients + + // load next 2 blocks from message + VLM (R2), T_0, T_1 + + // update message slice + SUB $32, R3 + MOVD $32(R2), R2 + + // unpack message blocks into 26-bit big-endian limbs + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // add 2¹²⁸ to each message block value + VLEIB $4, $1, M_4 + VLEIB $12, $1, M_4 multiply: - VAG H_0, F_0, F_0 - VAG H_1, F_1, F_1 - VAG H_2, F_2, F_2 - VAG H_3, F_3, F_3 - VAG H_4, F_4, F_4 - MULTIPLY(F_0, F_1, F_2, F_3, F_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) + // accumulate the incoming message + VAG H_0, M_0, M_0 + VAG H_3, M_3, M_3 + VAG H_1, M_1, M_1 + VAG H_4, M_4, M_4 + VAG H_2, M_2, M_2 + + // multiply the accumulator by the key coefficient + MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) + + // carry and partially reduce the partial products REDUCE(H_0, H_1, H_2, H_3, H_4) + CMPBNE R3, $0, loop finish: - // sum vectors + // sum lane 0 and lane 1 and put the result in lane 1 VZERO T_0 VSUMQG H_0, T_0, H_0 - VSUMQG H_1, T_0, H_1 - VSUMQG H_2, T_0, H_2 VSUMQG H_3, T_0, H_3 + VSUMQG H_1, T_0, H_1 VSUMQG H_4, T_0, H_4 + VSUMQG H_2, T_0, H_2 - // h may be >= 2*(2**130-5) so we need to reduce it again + // reduce again after summation + // TODO(mundaym): there might be a more efficient way to do this + // now that we only have 1 active lane. For example, we could + // simultaneously pack the values as we reduce them. REDUCE(H_0, H_1, H_2, H_3, H_4) - // carry h1->h4 + // carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1 + // TODO(mundaym): in testing this final carry was unnecessary. + // Needs a proof before it can be removed though. VESRLG $26, H_1, T_1 VN MOD26, H_1, H_1 VAQ T_1, H_2, H_2 @@ -284,95 +367,137 @@ finish: VN MOD26, H_3, H_3 VAQ T_3, H_4, H_4 - // h is now < 2*(2**130-5) - // pack h into h1 (hi) and h0 (lo) - PACK(H_0, H_1, H_2, H_3, H_4) - - // if h > 2**130-5 then h -= 2**130-5 - MOD(H_0, H_1, T_0, T_1, T_2) - - // h += s - MOVD $·bswapMask<>(SB), R5 - VL (R5), T_1 - VL 16(R4), T_0 - VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big) - VAQ T_0, H_0, H_0 - VPERM H_0, H_0, T_1, H_0 // reverse bytes (to little) - VST H_0, (R1) + // h is now < 2(2¹³⁰ - 5) + // Pack each lane in h₂₆[0:4] into h₁₂₈[0:1]. + VESLG $26, H_1, H_1 + VESLG $26, H_3, H_3 + VO H_0, H_1, H_0 + VO H_2, H_3, H_2 + VESLG $4, H_2, H_2 + VLEIB $7, $48, H_1 + VSLB H_1, H_2, H_2 + VO H_0, H_2, H_0 + VLEIB $7, $104, H_1 + VSLB H_1, H_4, H_3 + VO H_3, H_0, H_0 + VLEIB $7, $24, H_1 + VSRLB H_1, H_4, H_1 + // update state + VSTEG $1, H_0, 0(R1) + VSTEG $0, H_0, 8(R1) + VSTEG $1, H_1, 16(R1) RET -b2: +b2: // 2 or fewer blocks remaining CMPBLE R3, $16, b1 - // 2 blocks remaining - SUB $17, R3 - VL (R2), T_0 - VLL R3, 16(R2), T_1 - ADD $1, R3 - MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, T_1 - EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4) - CMPBNE R3, $16, 2(PC) - VLEIB $12, $1, F_4 - VLEIB $4, $1, F_4 + // Load the 2 remaining blocks (17-32 bytes remaining). + MOVD $-17(R3), R0 // index of final byte to load modulo 16 + VL (R2), T_0 // load full 16 byte block + VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes - // setup [r²,r] - VLVGG $1, RSAVE_0, R_0 - VLVGG $1, RSAVE_1, R_1 - VLVGG $1, RSAVE_2, R_2 - VLVGG $1, RSAVE_3, R_3 - VLVGG $1, RSAVE_4, R_4 - VPDI $0, R5_1, R5SAVE_1, R5_1 - VPDI $0, R5_2, R5SAVE_2, R5_2 - VPDI $0, R5_3, R5SAVE_3, R5_3 - VPDI $0, R5_4, R5SAVE_4, R5_4 + // The Poly1305 algorithm requires that a 1 bit be appended to + // each message block. If the final block is less than 16 bytes + // long then it is easiest to insert the 1 before the message + // block is split into 26-bit limbs. If, on the other hand, the + // final message block is 16 bytes long then we append the 1 bit + // after expansion as normal. + MOVBZ $1, R0 + MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16) + CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long + VLVGB R3, R0, T_1 // insert 1 into the byte at index R3 + + // Split both blocks into 26-bit limbs in the appropriate lanes. + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // Append a 1 byte to the end of the second to last block. + VLEIB $4, $1, M_4 + + // Append a 1 byte to the end of the last block only if it is a + // full 16 byte block. + CMPBNE R3, $16, 2(PC) + VLEIB $12, $1, M_4 + + // Finally, set up the coefficients for the final multiplication. + // We have previously saved r and 5r in the 32-bit even indexes + // of the R_[0-4] and R5_[1-4] coefficient registers. + // + // We want lane 0 to be multiplied by r² so that can be kept the + // same. We want lane 1 to be multiplied by r so we need to move + // the saved r value into the 32-bit odd index in lane 1 by + // rotating the 64-bit lane by 32. + VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only + VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]] + VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]] + VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]] + VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]] + VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]] + VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]] + VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]] + VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]] + VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]] MOVD $0, R3 BR multiply skip: - VZERO H_0 - VZERO H_1 - VZERO H_2 - VZERO H_3 - VZERO H_4 - CMPBEQ R3, $0, finish -b1: - // 1 block remaining - SUB $1, R3 - VLL R3, (R2), T_0 - ADD $1, R3 +b1: // 1 block remaining + + // Load the final block (1-16 bytes). This will be placed into + // lane 0. + MOVD $-1(R3), R0 + VLL R0, (R2), T_0 // pad to 16 bytes with zeros + + // The Poly1305 algorithm requires that a 1 bit be appended to + // each message block. If the final block is less than 16 bytes + // long then it is easiest to insert the 1 before the message + // block is split into 26-bit limbs. If, on the other hand, the + // final message block is 16 bytes long then we append the 1 bit + // after expansion as normal. MOVBZ $1, R0 CMPBEQ R3, $16, 2(PC) VLVGB R3, R0, T_0 - VZERO T_1 - EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4) - CMPBNE R3, $16, 2(PC) - VLEIB $4, $1, F_4 - VLEIG $1, $1, R_0 - VZERO R_1 - VZERO R_2 - VZERO R_3 - VZERO R_4 - VZERO R5_1 - VZERO R5_2 - VZERO R5_3 - VZERO R5_4 - // setup [r, 1] - VLVGG $0, RSAVE_0, R_0 - VLVGG $0, RSAVE_1, R_1 - VLVGG $0, RSAVE_2, R_2 - VLVGG $0, RSAVE_3, R_3 - VLVGG $0, RSAVE_4, R_4 - VPDI $0, R5SAVE_1, R5_1, R5_1 - VPDI $0, R5SAVE_2, R5_2, R5_2 - VPDI $0, R5SAVE_3, R5_3, R5_3 - VPDI $0, R5SAVE_4, R5_4, R5_4 + // Set the message block in lane 1 to the value 0 so that it + // can be accumulated without affecting the final result. + VZERO T_1 + + // Split the final message block into 26-bit limbs in lane 0. + // Lane 1 will be contain 0. + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // Append a 1 byte to the end of the last block only if it is a + // full 16 byte block. + CMPBNE R3, $16, 2(PC) + VLEIB $4, $1, M_4 + + // We have previously saved r and 5r in the 32-bit even indexes + // of the R_[0-4] and R5_[1-4] coefficient registers. + // + // We want lane 0 to be multiplied by r so we need to move the + // saved r value into the 32-bit odd index in lane 0. We want + // lane 1 to be set to the value 1. This makes multiplication + // a no-op. We do this by setting lane 1 in every register to 0 + // and then just setting the 32-bit index 3 in R_0 to 1. + VZERO T_0 + MOVD $0, R0 + MOVD $0x10111213, R12 + VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000] + VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0] + VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0] + VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0] + VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0] + VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0] + VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0] + VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0] + VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0] + VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0] + + // Set the value of lane 1 to be 1. + VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1] MOVD $0, R3 BR multiply diff --git a/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s deleted file mode 100644 index b439af9369..0000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s +++ /dev/null @@ -1,909 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.11,!gccgo,!purego - -#include "textflag.h" - -// Implementation of Poly1305 using the vector facility (vx) and the VMSL instruction. - -// constants -#define EX0 V1 -#define EX1 V2 -#define EX2 V3 - -// temporaries -#define T_0 V4 -#define T_1 V5 -#define T_2 V6 -#define T_3 V7 -#define T_4 V8 -#define T_5 V9 -#define T_6 V10 -#define T_7 V11 -#define T_8 V12 -#define T_9 V13 -#define T_10 V14 - -// r**2 & r**4 -#define R_0 V15 -#define R_1 V16 -#define R_2 V17 -#define R5_1 V18 -#define R5_2 V19 -// key (r) -#define RSAVE_0 R7 -#define RSAVE_1 R8 -#define RSAVE_2 R9 -#define R5SAVE_1 R10 -#define R5SAVE_2 R11 - -// message block -#define M0 V20 -#define M1 V21 -#define M2 V22 -#define M3 V23 -#define M4 V24 -#define M5 V25 - -// accumulator -#define H0_0 V26 -#define H1_0 V27 -#define H2_0 V28 -#define H0_1 V29 -#define H1_1 V30 -#define H2_1 V31 - -GLOBL ·keyMask<>(SB), RODATA, $16 -DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f -DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f - -GLOBL ·bswapMask<>(SB), RODATA, $16 -DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908 -DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100 - -GLOBL ·constants<>(SB), RODATA, $48 -// EX0 -DATA ·constants<>+0(SB)/8, $0x18191a1b1c1d1e1f -DATA ·constants<>+8(SB)/8, $0x0000050403020100 -// EX1 -DATA ·constants<>+16(SB)/8, $0x18191a1b1c1d1e1f -DATA ·constants<>+24(SB)/8, $0x00000a0908070605 -// EX2 -DATA ·constants<>+32(SB)/8, $0x18191a1b1c1d1e1f -DATA ·constants<>+40(SB)/8, $0x0000000f0e0d0c0b - -GLOBL ·c<>(SB), RODATA, $48 -// EX0 -DATA ·c<>+0(SB)/8, $0x0000050403020100 -DATA ·c<>+8(SB)/8, $0x0000151413121110 -// EX1 -DATA ·c<>+16(SB)/8, $0x00000a0908070605 -DATA ·c<>+24(SB)/8, $0x00001a1918171615 -// EX2 -DATA ·c<>+32(SB)/8, $0x0000000f0e0d0c0b -DATA ·c<>+40(SB)/8, $0x0000001f1e1d1c1b - -GLOBL ·reduce<>(SB), RODATA, $32 -// 44 bit -DATA ·reduce<>+0(SB)/8, $0x0 -DATA ·reduce<>+8(SB)/8, $0xfffffffffff -// 42 bit -DATA ·reduce<>+16(SB)/8, $0x0 -DATA ·reduce<>+24(SB)/8, $0x3ffffffffff - -// h = (f*g) % (2**130-5) [partial reduction] -// uses T_0...T_9 temporary registers -// input: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2 -// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9 -// output: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2 -#define MULTIPLY(m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9) \ - \ // Eliminate the dependency for the last 2 VMSLs - VMSLG m02_0, r_2, m4_2, m4_2 \ - VMSLG m13_0, r_2, m5_2, m5_2 \ // 8 VMSLs pipelined - VMSLG m02_0, r_0, m4_0, m4_0 \ - VMSLG m02_1, r5_2, V0, T_0 \ - VMSLG m02_0, r_1, m4_1, m4_1 \ - VMSLG m02_1, r_0, V0, T_1 \ - VMSLG m02_1, r_1, V0, T_2 \ - VMSLG m02_2, r5_1, V0, T_3 \ - VMSLG m02_2, r5_2, V0, T_4 \ - VMSLG m13_0, r_0, m5_0, m5_0 \ - VMSLG m13_1, r5_2, V0, T_5 \ - VMSLG m13_0, r_1, m5_1, m5_1 \ - VMSLG m13_1, r_0, V0, T_6 \ - VMSLG m13_1, r_1, V0, T_7 \ - VMSLG m13_2, r5_1, V0, T_8 \ - VMSLG m13_2, r5_2, V0, T_9 \ - VMSLG m02_2, r_0, m4_2, m4_2 \ - VMSLG m13_2, r_0, m5_2, m5_2 \ - VAQ m4_0, T_0, m02_0 \ - VAQ m4_1, T_1, m02_1 \ - VAQ m5_0, T_5, m13_0 \ - VAQ m5_1, T_6, m13_1 \ - VAQ m02_0, T_3, m02_0 \ - VAQ m02_1, T_4, m02_1 \ - VAQ m13_0, T_8, m13_0 \ - VAQ m13_1, T_9, m13_1 \ - VAQ m4_2, T_2, m02_2 \ - VAQ m5_2, T_7, m13_2 \ - -// SQUARE uses three limbs of r and r_2*5 to output square of r -// uses T_1, T_5 and T_7 temporary registers -// input: r_0, r_1, r_2, r5_2 -// temp: TEMP0, TEMP1, TEMP2 -// output: p0, p1, p2 -#define SQUARE(r_0, r_1, r_2, r5_2, p0, p1, p2, TEMP0, TEMP1, TEMP2) \ - VMSLG r_0, r_0, p0, p0 \ - VMSLG r_1, r5_2, V0, TEMP0 \ - VMSLG r_2, r5_2, p1, p1 \ - VMSLG r_0, r_1, V0, TEMP1 \ - VMSLG r_1, r_1, p2, p2 \ - VMSLG r_0, r_2, V0, TEMP2 \ - VAQ TEMP0, p0, p0 \ - VAQ TEMP1, p1, p1 \ - VAQ TEMP2, p2, p2 \ - VAQ TEMP0, p0, p0 \ - VAQ TEMP1, p1, p1 \ - VAQ TEMP2, p2, p2 \ - -// carry h0->h1->h2->h0 || h3->h4->h5->h3 -// uses T_2, T_4, T_5, T_7, T_8, T_9 -// t6, t7, t8, t9, t10, t11 -// input: h0, h1, h2, h3, h4, h5 -// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11 -// output: h0, h1, h2, h3, h4, h5 -#define REDUCE(h0, h1, h2, h3, h4, h5, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) \ - VLM (R12), t6, t7 \ // 44 and 42 bit clear mask - VLEIB $7, $0x28, t10 \ // 5 byte shift mask - VREPIB $4, t8 \ // 4 bit shift mask - VREPIB $2, t11 \ // 2 bit shift mask - VSRLB t10, h0, t0 \ // h0 byte shift - VSRLB t10, h1, t1 \ // h1 byte shift - VSRLB t10, h2, t2 \ // h2 byte shift - VSRLB t10, h3, t3 \ // h3 byte shift - VSRLB t10, h4, t4 \ // h4 byte shift - VSRLB t10, h5, t5 \ // h5 byte shift - VSRL t8, t0, t0 \ // h0 bit shift - VSRL t8, t1, t1 \ // h2 bit shift - VSRL t11, t2, t2 \ // h2 bit shift - VSRL t8, t3, t3 \ // h3 bit shift - VSRL t8, t4, t4 \ // h4 bit shift - VESLG $2, t2, t9 \ // h2 carry x5 - VSRL t11, t5, t5 \ // h5 bit shift - VN t6, h0, h0 \ // h0 clear carry - VAQ t2, t9, t2 \ // h2 carry x5 - VESLG $2, t5, t9 \ // h5 carry x5 - VN t6, h1, h1 \ // h1 clear carry - VN t7, h2, h2 \ // h2 clear carry - VAQ t5, t9, t5 \ // h5 carry x5 - VN t6, h3, h3 \ // h3 clear carry - VN t6, h4, h4 \ // h4 clear carry - VN t7, h5, h5 \ // h5 clear carry - VAQ t0, h1, h1 \ // h0->h1 - VAQ t3, h4, h4 \ // h3->h4 - VAQ t1, h2, h2 \ // h1->h2 - VAQ t4, h5, h5 \ // h4->h5 - VAQ t2, h0, h0 \ // h2->h0 - VAQ t5, h3, h3 \ // h5->h3 - VREPG $1, t6, t6 \ // 44 and 42 bit masks across both halves - VREPG $1, t7, t7 \ - VSLDB $8, h0, h0, h0 \ // set up [h0/1/2, h3/4/5] - VSLDB $8, h1, h1, h1 \ - VSLDB $8, h2, h2, h2 \ - VO h0, h3, h3 \ - VO h1, h4, h4 \ - VO h2, h5, h5 \ - VESRLG $44, h3, t0 \ // 44 bit shift right - VESRLG $44, h4, t1 \ - VESRLG $42, h5, t2 \ - VN t6, h3, h3 \ // clear carry bits - VN t6, h4, h4 \ - VN t7, h5, h5 \ - VESLG $2, t2, t9 \ // multiply carry by 5 - VAQ t9, t2, t2 \ - VAQ t0, h4, h4 \ - VAQ t1, h5, h5 \ - VAQ t2, h3, h3 \ - -// carry h0->h1->h2->h0 -// input: h0, h1, h2 -// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8 -// output: h0, h1, h2 -#define REDUCE2(h0, h1, h2, t0, t1, t2, t3, t4, t5, t6, t7, t8) \ - VLEIB $7, $0x28, t3 \ // 5 byte shift mask - VREPIB $4, t4 \ // 4 bit shift mask - VREPIB $2, t7 \ // 2 bit shift mask - VGBM $0x003F, t5 \ // mask to clear carry bits - VSRLB t3, h0, t0 \ - VSRLB t3, h1, t1 \ - VSRLB t3, h2, t2 \ - VESRLG $4, t5, t5 \ // 44 bit clear mask - VSRL t4, t0, t0 \ - VSRL t4, t1, t1 \ - VSRL t7, t2, t2 \ - VESRLG $2, t5, t6 \ // 42 bit clear mask - VESLG $2, t2, t8 \ - VAQ t8, t2, t2 \ - VN t5, h0, h0 \ - VN t5, h1, h1 \ - VN t6, h2, h2 \ - VAQ t0, h1, h1 \ - VAQ t1, h2, h2 \ - VAQ t2, h0, h0 \ - VSRLB t3, h0, t0 \ - VSRLB t3, h1, t1 \ - VSRLB t3, h2, t2 \ - VSRL t4, t0, t0 \ - VSRL t4, t1, t1 \ - VSRL t7, t2, t2 \ - VN t5, h0, h0 \ - VN t5, h1, h1 \ - VESLG $2, t2, t8 \ - VN t6, h2, h2 \ - VAQ t0, h1, h1 \ - VAQ t8, t2, t2 \ - VAQ t1, h2, h2 \ - VAQ t2, h0, h0 \ - -// expands two message blocks into the lower halfs of the d registers -// moves the contents of the d registers into upper halfs -// input: in1, in2, d0, d1, d2, d3, d4, d5 -// temp: TEMP0, TEMP1, TEMP2, TEMP3 -// output: d0, d1, d2, d3, d4, d5 -#define EXPACC(in1, in2, d0, d1, d2, d3, d4, d5, TEMP0, TEMP1, TEMP2, TEMP3) \ - VGBM $0xff3f, TEMP0 \ - VGBM $0xff1f, TEMP1 \ - VESLG $4, d1, TEMP2 \ - VESLG $4, d4, TEMP3 \ - VESRLG $4, TEMP0, TEMP0 \ - VPERM in1, d0, EX0, d0 \ - VPERM in2, d3, EX0, d3 \ - VPERM in1, d2, EX2, d2 \ - VPERM in2, d5, EX2, d5 \ - VPERM in1, TEMP2, EX1, d1 \ - VPERM in2, TEMP3, EX1, d4 \ - VN TEMP0, d0, d0 \ - VN TEMP0, d3, d3 \ - VESRLG $4, d1, d1 \ - VESRLG $4, d4, d4 \ - VN TEMP1, d2, d2 \ - VN TEMP1, d5, d5 \ - VN TEMP0, d1, d1 \ - VN TEMP0, d4, d4 \ - -// expands one message block into the lower halfs of the d registers -// moves the contents of the d registers into upper halfs -// input: in, d0, d1, d2 -// temp: TEMP0, TEMP1, TEMP2 -// output: d0, d1, d2 -#define EXPACC2(in, d0, d1, d2, TEMP0, TEMP1, TEMP2) \ - VGBM $0xff3f, TEMP0 \ - VESLG $4, d1, TEMP2 \ - VGBM $0xff1f, TEMP1 \ - VPERM in, d0, EX0, d0 \ - VESRLG $4, TEMP0, TEMP0 \ - VPERM in, d2, EX2, d2 \ - VPERM in, TEMP2, EX1, d1 \ - VN TEMP0, d0, d0 \ - VN TEMP1, d2, d2 \ - VESRLG $4, d1, d1 \ - VN TEMP0, d1, d1 \ - -// pack h2:h0 into h1:h0 (no carry) -// input: h0, h1, h2 -// output: h0, h1, h2 -#define PACK(h0, h1, h2) \ - VMRLG h1, h2, h2 \ // copy h1 to upper half h2 - VESLG $44, h1, h1 \ // shift limb 1 44 bits, leaving 20 - VO h0, h1, h0 \ // combine h0 with 20 bits from limb 1 - VESRLG $20, h2, h1 \ // put top 24 bits of limb 1 into h1 - VLEIG $1, $0, h1 \ // clear h2 stuff from lower half of h1 - VO h0, h1, h0 \ // h0 now has 88 bits (limb 0 and 1) - VLEIG $0, $0, h2 \ // clear upper half of h2 - VESRLG $40, h2, h1 \ // h1 now has upper two bits of result - VLEIB $7, $88, h1 \ // for byte shift (11 bytes) - VSLB h1, h2, h2 \ // shift h2 11 bytes to the left - VO h0, h2, h0 \ // combine h0 with 20 bits from limb 1 - VLEIG $0, $0, h1 \ // clear upper half of h1 - -// if h > 2**130-5 then h -= 2**130-5 -// input: h0, h1 -// temp: t0, t1, t2 -// output: h0 -#define MOD(h0, h1, t0, t1, t2) \ - VZERO t0 \ - VLEIG $1, $5, t0 \ - VACCQ h0, t0, t1 \ - VAQ h0, t0, t0 \ - VONE t2 \ - VLEIG $1, $-4, t2 \ - VAQ t2, t1, t1 \ - VACCQ h1, t1, t1 \ - VONE t2 \ - VAQ t2, t1, t1 \ - VN h0, t1, t2 \ - VNC t0, t1, t1 \ - VO t1, t2, h0 \ - -// func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]key) -TEXT ·poly1305vmsl(SB), $0-32 - // This code processes 6 + up to 4 blocks (32 bytes) per iteration - // using the algorithm described in: - // NEON crypto, Daniel J. Bernstein & Peter Schwabe - // https://cryptojedi.org/papers/neoncrypto-20120320.pdf - // And as moddified for VMSL as described in - // Accelerating Poly1305 Cryptographic Message Authentication on the z14 - // O'Farrell et al, CASCON 2017, p48-55 - // https://ibm.ent.box.com/s/jf9gedj0e9d2vjctfyh186shaztavnht - - LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key - VZERO V0 // c - - // load EX0, EX1 and EX2 - MOVD $·constants<>(SB), R5 - VLM (R5), EX0, EX2 // c - - // setup r - VL (R4), T_0 - MOVD $·keyMask<>(SB), R6 - VL (R6), T_1 - VN T_0, T_1, T_0 - VZERO T_2 // limbs for r - VZERO T_3 - VZERO T_4 - EXPACC2(T_0, T_2, T_3, T_4, T_1, T_5, T_7) - - // T_2, T_3, T_4: [0, r] - - // setup r*20 - VLEIG $0, $0, T_0 - VLEIG $1, $20, T_0 // T_0: [0, 20] - VZERO T_5 - VZERO T_6 - VMSLG T_0, T_3, T_5, T_5 - VMSLG T_0, T_4, T_6, T_6 - - // store r for final block in GR - VLGVG $1, T_2, RSAVE_0 // c - VLGVG $1, T_3, RSAVE_1 // c - VLGVG $1, T_4, RSAVE_2 // c - VLGVG $1, T_5, R5SAVE_1 // c - VLGVG $1, T_6, R5SAVE_2 // c - - // initialize h - VZERO H0_0 - VZERO H1_0 - VZERO H2_0 - VZERO H0_1 - VZERO H1_1 - VZERO H2_1 - - // initialize pointer for reduce constants - MOVD $·reduce<>(SB), R12 - - // calculate r**2 and 20*(r**2) - VZERO R_0 - VZERO R_1 - VZERO R_2 - SQUARE(T_2, T_3, T_4, T_6, R_0, R_1, R_2, T_1, T_5, T_7) - REDUCE2(R_0, R_1, R_2, M0, M1, M2, M3, M4, R5_1, R5_2, M5, T_1) - VZERO R5_1 - VZERO R5_2 - VMSLG T_0, R_1, R5_1, R5_1 - VMSLG T_0, R_2, R5_2, R5_2 - - // skip r**4 calculation if 3 blocks or less - CMPBLE R3, $48, b4 - - // calculate r**4 and 20*(r**4) - VZERO T_8 - VZERO T_9 - VZERO T_10 - SQUARE(R_0, R_1, R_2, R5_2, T_8, T_9, T_10, T_1, T_5, T_7) - REDUCE2(T_8, T_9, T_10, M0, M1, M2, M3, M4, T_2, T_3, M5, T_1) - VZERO T_2 - VZERO T_3 - VMSLG T_0, T_9, T_2, T_2 - VMSLG T_0, T_10, T_3, T_3 - - // put r**2 to the right and r**4 to the left of R_0, R_1, R_2 - VSLDB $8, T_8, T_8, T_8 - VSLDB $8, T_9, T_9, T_9 - VSLDB $8, T_10, T_10, T_10 - VSLDB $8, T_2, T_2, T_2 - VSLDB $8, T_3, T_3, T_3 - - VO T_8, R_0, R_0 - VO T_9, R_1, R_1 - VO T_10, R_2, R_2 - VO T_2, R5_1, R5_1 - VO T_3, R5_2, R5_2 - - CMPBLE R3, $80, load // less than or equal to 5 blocks in message - - // 6(or 5+1) blocks - SUB $81, R3 - VLM (R2), M0, M4 - VLL R3, 80(R2), M5 - ADD $1, R3 - MOVBZ $1, R0 - CMPBGE R3, $16, 2(PC) - VLVGB R3, R0, M5 - MOVD $96(R2), R2 - EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3) - EXPACC(M2, M3, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3) - VLEIB $2, $1, H2_0 - VLEIB $2, $1, H2_1 - VLEIB $10, $1, H2_0 - VLEIB $10, $1, H2_1 - - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO T_4 - VZERO T_10 - EXPACC(M4, M5, M0, M1, M2, M3, T_4, T_10, T_0, T_1, T_2, T_3) - VLR T_4, M4 - VLEIB $10, $1, M2 - CMPBLT R3, $16, 2(PC) - VLEIB $10, $1, T_10 - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9) - VMRHG V0, H0_1, H0_0 - VMRHG V0, H1_1, H1_0 - VMRHG V0, H2_1, H2_0 - VMRLG V0, H0_1, H0_1 - VMRLG V0, H1_1, H1_1 - VMRLG V0, H2_1, H2_1 - - SUB $16, R3 - CMPBLE R3, $0, square - -load: - // load EX0, EX1 and EX2 - MOVD $·c<>(SB), R5 - VLM (R5), EX0, EX2 - -loop: - CMPBLE R3, $64, add // b4 // last 4 or less blocks left - - // next 4 full blocks - VLM (R2), M2, M5 - SUB $64, R3 - MOVD $64(R2), R2 - REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, T_0, T_1, T_3, T_4, T_5, T_2, T_7, T_8, T_9) - - // expacc in-lined to create [m2, m3] limbs - VGBM $0x3f3f, T_0 // 44 bit clear mask - VGBM $0x1f1f, T_1 // 40 bit clear mask - VPERM M2, M3, EX0, T_3 - VESRLG $4, T_0, T_0 // 44 bit clear mask ready - VPERM M2, M3, EX1, T_4 - VPERM M2, M3, EX2, T_5 - VN T_0, T_3, T_3 - VESRLG $4, T_4, T_4 - VN T_1, T_5, T_5 - VN T_0, T_4, T_4 - VMRHG H0_1, T_3, H0_0 - VMRHG H1_1, T_4, H1_0 - VMRHG H2_1, T_5, H2_0 - VMRLG H0_1, T_3, H0_1 - VMRLG H1_1, T_4, H1_1 - VMRLG H2_1, T_5, H2_1 - VLEIB $10, $1, H2_0 - VLEIB $10, $1, H2_1 - VPERM M4, M5, EX0, T_3 - VPERM M4, M5, EX1, T_4 - VPERM M4, M5, EX2, T_5 - VN T_0, T_3, T_3 - VESRLG $4, T_4, T_4 - VN T_1, T_5, T_5 - VN T_0, T_4, T_4 - VMRHG V0, T_3, M0 - VMRHG V0, T_4, M1 - VMRHG V0, T_5, M2 - VMRLG V0, T_3, M3 - VMRLG V0, T_4, M4 - VMRLG V0, T_5, M5 - VLEIB $10, $1, M2 - VLEIB $10, $1, M5 - - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - CMPBNE R3, $0, loop - REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9) - VMRHG V0, H0_1, H0_0 - VMRHG V0, H1_1, H1_0 - VMRHG V0, H2_1, H2_0 - VMRLG V0, H0_1, H0_1 - VMRLG V0, H1_1, H1_1 - VMRLG V0, H2_1, H2_1 - - // load EX0, EX1, EX2 - MOVD $·constants<>(SB), R5 - VLM (R5), EX0, EX2 - - // sum vectors - VAQ H0_0, H0_1, H0_0 - VAQ H1_0, H1_1, H1_0 - VAQ H2_0, H2_1, H2_0 - - // h may be >= 2*(2**130-5) so we need to reduce it again - // M0...M4 are used as temps here - REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) - -next: // carry h1->h2 - VLEIB $7, $0x28, T_1 - VREPIB $4, T_2 - VGBM $0x003F, T_3 - VESRLG $4, T_3 - - // byte shift - VSRLB T_1, H1_0, T_4 - - // bit shift - VSRL T_2, T_4, T_4 - - // clear h1 carry bits - VN T_3, H1_0, H1_0 - - // add carry - VAQ T_4, H2_0, H2_0 - - // h is now < 2*(2**130-5) - // pack h into h1 (hi) and h0 (lo) - PACK(H0_0, H1_0, H2_0) - - // if h > 2**130-5 then h -= 2**130-5 - MOD(H0_0, H1_0, T_0, T_1, T_2) - - // h += s - MOVD $·bswapMask<>(SB), R5 - VL (R5), T_1 - VL 16(R4), T_0 - VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big) - VAQ T_0, H0_0, H0_0 - VPERM H0_0, H0_0, T_1, H0_0 // reverse bytes (to little) - VST H0_0, (R1) - RET - -add: - // load EX0, EX1, EX2 - MOVD $·constants<>(SB), R5 - VLM (R5), EX0, EX2 - - REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9) - VMRHG V0, H0_1, H0_0 - VMRHG V0, H1_1, H1_0 - VMRHG V0, H2_1, H2_0 - VMRLG V0, H0_1, H0_1 - VMRLG V0, H1_1, H1_1 - VMRLG V0, H2_1, H2_1 - CMPBLE R3, $64, b4 - -b4: - CMPBLE R3, $48, b3 // 3 blocks or less - - // 4(3+1) blocks remaining - SUB $49, R3 - VLM (R2), M0, M2 - VLL R3, 48(R2), M3 - ADD $1, R3 - MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, M3 - MOVD $64(R2), R2 - EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3) - VLEIB $10, $1, H2_0 - VLEIB $10, $1, H2_1 - VZERO M0 - VZERO M1 - VZERO M4 - VZERO M5 - VZERO T_4 - VZERO T_10 - EXPACC(M2, M3, M0, M1, M4, M5, T_4, T_10, T_0, T_1, T_2, T_3) - VLR T_4, M2 - VLEIB $10, $1, M4 - CMPBNE R3, $16, 2(PC) - VLEIB $10, $1, T_10 - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M4, M5, M2, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9) - VMRHG V0, H0_1, H0_0 - VMRHG V0, H1_1, H1_0 - VMRHG V0, H2_1, H2_0 - VMRLG V0, H0_1, H0_1 - VMRLG V0, H1_1, H1_1 - VMRLG V0, H2_1, H2_1 - SUB $16, R3 - CMPBLE R3, $0, square // this condition must always hold true! - -b3: - CMPBLE R3, $32, b2 - - // 3 blocks remaining - - // setup [r²,r] - VSLDB $8, R_0, R_0, R_0 - VSLDB $8, R_1, R_1, R_1 - VSLDB $8, R_2, R_2, R_2 - VSLDB $8, R5_1, R5_1, R5_1 - VSLDB $8, R5_2, R5_2, R5_2 - - VLVGG $1, RSAVE_0, R_0 - VLVGG $1, RSAVE_1, R_1 - VLVGG $1, RSAVE_2, R_2 - VLVGG $1, R5SAVE_1, R5_1 - VLVGG $1, R5SAVE_2, R5_2 - - // setup [h0, h1] - VSLDB $8, H0_0, H0_0, H0_0 - VSLDB $8, H1_0, H1_0, H1_0 - VSLDB $8, H2_0, H2_0, H2_0 - VO H0_1, H0_0, H0_0 - VO H1_1, H1_0, H1_0 - VO H2_1, H2_0, H2_0 - VZERO H0_1 - VZERO H1_1 - VZERO H2_1 - - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - - // H*[r**2, r] - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, T_10, M5) - - SUB $33, R3 - VLM (R2), M0, M1 - VLL R3, 32(R2), M2 - ADD $1, R3 - MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, M2 - - // H += m0 - VZERO T_1 - VZERO T_2 - VZERO T_3 - EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6) - VLEIB $10, $1, T_3 - VAG H0_0, T_1, H0_0 - VAG H1_0, T_2, H1_0 - VAG H2_0, T_3, H2_0 - - VZERO M0 - VZERO M3 - VZERO M4 - VZERO M5 - VZERO T_10 - - // (H+m0)*r - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M3, M4, M5, V0, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_10, H0_1, H1_1, H2_1, T_9) - - // H += m1 - VZERO V0 - VZERO T_1 - VZERO T_2 - VZERO T_3 - EXPACC2(M1, T_1, T_2, T_3, T_4, T_5, T_6) - VLEIB $10, $1, T_3 - VAQ H0_0, T_1, H0_0 - VAQ H1_0, T_2, H1_0 - VAQ H2_0, T_3, H2_0 - REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10) - - // [H, m2] * [r**2, r] - EXPACC2(M2, H0_0, H1_0, H2_0, T_1, T_2, T_3) - CMPBNE R3, $16, 2(PC) - VLEIB $10, $1, H2_0 - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, M5, T_10) - SUB $16, R3 - CMPBLE R3, $0, next // this condition must always hold true! - -b2: - CMPBLE R3, $16, b1 - - // 2 blocks remaining - - // setup [r²,r] - VSLDB $8, R_0, R_0, R_0 - VSLDB $8, R_1, R_1, R_1 - VSLDB $8, R_2, R_2, R_2 - VSLDB $8, R5_1, R5_1, R5_1 - VSLDB $8, R5_2, R5_2, R5_2 - - VLVGG $1, RSAVE_0, R_0 - VLVGG $1, RSAVE_1, R_1 - VLVGG $1, RSAVE_2, R_2 - VLVGG $1, R5SAVE_1, R5_1 - VLVGG $1, R5SAVE_2, R5_2 - - // setup [h0, h1] - VSLDB $8, H0_0, H0_0, H0_0 - VSLDB $8, H1_0, H1_0, H1_0 - VSLDB $8, H2_0, H2_0, H2_0 - VO H0_1, H0_0, H0_0 - VO H1_1, H1_0, H1_0 - VO H2_1, H2_0, H2_0 - VZERO H0_1 - VZERO H1_1 - VZERO H2_1 - - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - - // H*[r**2, r] - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9) - VMRHG V0, H0_1, H0_0 - VMRHG V0, H1_1, H1_0 - VMRHG V0, H2_1, H2_0 - VMRLG V0, H0_1, H0_1 - VMRLG V0, H1_1, H1_1 - VMRLG V0, H2_1, H2_1 - - // move h to the left and 0s at the right - VSLDB $8, H0_0, H0_0, H0_0 - VSLDB $8, H1_0, H1_0, H1_0 - VSLDB $8, H2_0, H2_0, H2_0 - - // get message blocks and append 1 to start - SUB $17, R3 - VL (R2), M0 - VLL R3, 16(R2), M1 - ADD $1, R3 - MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, M1 - VZERO T_6 - VZERO T_7 - VZERO T_8 - EXPACC2(M0, T_6, T_7, T_8, T_1, T_2, T_3) - EXPACC2(M1, T_6, T_7, T_8, T_1, T_2, T_3) - VLEIB $2, $1, T_8 - CMPBNE R3, $16, 2(PC) - VLEIB $10, $1, T_8 - - // add [m0, m1] to h - VAG H0_0, T_6, H0_0 - VAG H1_0, T_7, H1_0 - VAG H2_0, T_8, H2_0 - - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - VZERO T_10 - VZERO M0 - - // at this point R_0 .. R5_2 look like [r**2, r] - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M2, M3, M4, M5, T_10, M0, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M2, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10) - SUB $16, R3, R3 - CMPBLE R3, $0, next - -b1: - CMPBLE R3, $0, next - - // 1 block remaining - - // setup [r²,r] - VSLDB $8, R_0, R_0, R_0 - VSLDB $8, R_1, R_1, R_1 - VSLDB $8, R_2, R_2, R_2 - VSLDB $8, R5_1, R5_1, R5_1 - VSLDB $8, R5_2, R5_2, R5_2 - - VLVGG $1, RSAVE_0, R_0 - VLVGG $1, RSAVE_1, R_1 - VLVGG $1, RSAVE_2, R_2 - VLVGG $1, R5SAVE_1, R5_1 - VLVGG $1, R5SAVE_2, R5_2 - - // setup [h0, h1] - VSLDB $8, H0_0, H0_0, H0_0 - VSLDB $8, H1_0, H1_0, H1_0 - VSLDB $8, H2_0, H2_0, H2_0 - VO H0_1, H0_0, H0_0 - VO H1_1, H1_0, H1_0 - VO H2_1, H2_0, H2_0 - VZERO H0_1 - VZERO H1_1 - VZERO H2_1 - - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - - // H*[r**2, r] - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) - - // set up [0, m0] limbs - SUB $1, R3 - VLL R3, (R2), M0 - ADD $1, R3 - MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, M0 - VZERO T_1 - VZERO T_2 - VZERO T_3 - EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6)// limbs: [0, m] - CMPBNE R3, $16, 2(PC) - VLEIB $10, $1, T_3 - - // h+m0 - VAQ H0_0, T_1, H0_0 - VAQ H1_0, T_2, H1_0 - VAQ H2_0, T_3, H2_0 - - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) - - BR next - -square: - // setup [r²,r] - VSLDB $8, R_0, R_0, R_0 - VSLDB $8, R_1, R_1, R_1 - VSLDB $8, R_2, R_2, R_2 - VSLDB $8, R5_1, R5_1, R5_1 - VSLDB $8, R5_2, R5_2, R5_2 - - VLVGG $1, RSAVE_0, R_0 - VLVGG $1, RSAVE_1, R_1 - VLVGG $1, RSAVE_2, R_2 - VLVGG $1, R5SAVE_1, R5_1 - VLVGG $1, R5SAVE_2, R5_2 - - // setup [h0, h1] - VSLDB $8, H0_0, H0_0, H0_0 - VSLDB $8, H1_0, H1_0, H1_0 - VSLDB $8, H2_0, H2_0, H2_0 - VO H0_1, H0_0, H0_0 - VO H1_1, H1_0, H1_0 - VO H2_1, H2_0, H2_0 - VZERO H0_1 - VZERO H1_1 - VZERO H2_1 - - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - - // (h0*r**2) + (h1*r) - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) - BR next diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go index 51f740500e..b909471cc0 100644 --- a/vendor/golang.org/x/crypto/ssh/agent/client.go +++ b/vendor/golang.org/x/crypto/ssh/agent/client.go @@ -102,8 +102,9 @@ type ConstraintExtension struct { // AddedKey describes an SSH key to be added to an Agent. type AddedKey struct { - // PrivateKey must be a *rsa.PrivateKey, *dsa.PrivateKey or - // *ecdsa.PrivateKey, which will be inserted into the agent. + // PrivateKey must be a *rsa.PrivateKey, *dsa.PrivateKey, + // ed25519.PrivateKey or *ecdsa.PrivateKey, which will be inserted into the + // agent. PrivateKey interface{} // Certificate, if not nil, is communicated to the agent and will be // stored with the key. @@ -566,6 +567,17 @@ func (c *client) insertKey(s interface{}, comment string, constraints []byte) er Comments: comment, Constraints: constraints, }) + case ed25519.PrivateKey: + req = ssh.Marshal(ed25519KeyMsg{ + Type: ssh.KeyAlgoED25519, + Pub: []byte(k)[32:], + Priv: []byte(k), + Comments: comment, + Constraints: constraints, + }) + // This function originally supported only *ed25519.PrivateKey, however the + // general idiom is to pass ed25519.PrivateKey by value, not by pointer. + // We still support the pointer variant for backwards compatibility. case *ed25519.PrivateKey: req = ssh.Marshal(ed25519KeyMsg{ Type: ssh.KeyAlgoED25519, @@ -683,6 +695,18 @@ func (c *client) insertCert(s interface{}, cert *ssh.Certificate, comment string Comments: comment, Constraints: constraints, }) + case ed25519.PrivateKey: + req = ssh.Marshal(ed25519CertMsg{ + Type: cert.Type(), + CertBytes: cert.Marshal(), + Pub: []byte(k)[32:], + Priv: []byte(k), + Comments: comment, + Constraints: constraints, + }) + // This function originally supported only *ed25519.PrivateKey, however the + // general idiom is to pass ed25519.PrivateKey by value, not by pointer. + // We still support the pointer variant for backwards compatibility. case *ed25519.PrivateKey: req = ssh.Marshal(ed25519CertMsg{ Type: cert.Type(), diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go index 0f89aec1c7..916c840b69 100644 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -414,8 +414,8 @@ func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { return nil } -// SignCert sets c.SignatureKey to the authority's public key and stores a -// Signature, by authority, in the certificate. +// SignCert signs the certificate with an authority, setting the Nonce, +// SignatureKey, and Signature fields. func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { c.Nonce = make([]byte, 32) if _, err := io.ReadFull(rand, c.Nonce); err != nil { diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go index b0204ee59f..8bd6b3daff 100644 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ b/vendor/golang.org/x/crypto/ssh/cipher.go @@ -119,7 +119,7 @@ var cipherModes = map[string]*cipherMode{ chacha20Poly1305ID: {64, 0, newChaCha20Cipher}, // CBC mode is insecure and so is not included in the default config. - // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely + // (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely // needed, it's possible to specify a custom Config to enable it. // You should expect that an active attacker can recover plaintext if // you do. diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go index 0590070e22..f3265655ee 100644 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -36,7 +36,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { // during the authentication phase the client first attempts the "none" method // then any untried methods suggested by the server. - tried := make(map[string]bool) + var tried []string var lastMethods []string sessionID := c.transport.getSessionID() @@ -49,7 +49,9 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { // success return nil } else if ok == authFailure { - tried[auth.method()] = true + if m := auth.method(); !contains(tried, m) { + tried = append(tried, m) + } } if methods == nil { methods = lastMethods @@ -61,7 +63,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { findNext: for _, a := range config.Auth { candidateMethod := a.method() - if tried[candidateMethod] { + if contains(tried, candidateMethod) { continue } for _, meth := range methods { @@ -72,16 +74,16 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { } } } - return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried)) + return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried) } -func keys(m map[string]bool) []string { - s := make([]string, 0, len(m)) - - for key := range m { - s = append(s, key) +func contains(list []string, e string) bool { + for _, s := range list { + if s == e { + return true + } } - return s + return false } // An AuthMethod represents an instance of an RFC 4252 authentication method. diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go index 6c3c648fc9..7eedb209fa 100644 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -572,7 +572,7 @@ func (gex *dhGEXSHA) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, e return new(big.Int).Exp(theirPublic, myPrivate, gex.p), nil } -func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { +func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { // Send GexRequest kexDHGexRequest := kexDHGexRequestMsg{ MinBits: dhGroupExchangeMinimumBits, @@ -677,7 +677,7 @@ func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshak // Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256. // // This is a minimal implementation to satisfy the automated tests. -func (gex *dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { +func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { // Receive GexRequest packet, err := c.readPacket() if err != nil { diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index 06f537c135..31f26349a0 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -1246,15 +1246,23 @@ func passphraseProtectedOpenSSHKey(passphrase []byte) openSSHDecryptFunc { } key, iv := k[:32], k[32:] - if cipherName != "aes256-ctr" { - return nil, fmt.Errorf("ssh: unknown cipher %q, only supports %q", cipherName, "aes256-ctr") - } c, err := aes.NewCipher(key) if err != nil { return nil, err } - ctr := cipher.NewCTR(c, iv) - ctr.XORKeyStream(privKeyBlock, privKeyBlock) + switch cipherName { + case "aes256-ctr": + ctr := cipher.NewCTR(c, iv) + ctr.XORKeyStream(privKeyBlock, privKeyBlock) + case "aes256-cbc": + if len(privKeyBlock)%c.BlockSize() != 0 { + return nil, fmt.Errorf("ssh: invalid encrypted private key length, not a multiple of the block size") + } + cbc := cipher.NewCBCDecrypter(c, iv) + cbc.CryptBlocks(privKeyBlock, privKeyBlock) + default: + return nil, fmt.Errorf("ssh: unknown cipher %q, only supports %q or %q", cipherName, "aes256-ctr", "aes256-cbc") + } return privKeyBlock, nil } diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go index f19016270e..9654c01869 100644 --- a/vendor/golang.org/x/crypto/ssh/mux.go +++ b/vendor/golang.org/x/crypto/ssh/mux.go @@ -240,7 +240,7 @@ func (m *mux) onePacket() error { id := binary.BigEndian.Uint32(packet[1:]) ch := m.chanList.getChan(id) if ch == nil { - return fmt.Errorf("ssh: invalid channel %d", id) + return m.handleUnknownChannelPacket(id, packet) } return ch.handlePacket(packet) @@ -328,3 +328,24 @@ func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) } } + +func (m *mux) handleUnknownChannelPacket(id uint32, packet []byte) error { + msg, err := decode(packet) + if err != nil { + return err + } + + switch msg := msg.(type) { + // RFC 4254 section 5.4 says unrecognized channel requests should + // receive a failure response. + case *channelRequestMsg: + if msg.WantReply { + return m.sendMessage(channelRequestFailureMsg{ + PeersID: msg.PeersID, + }) + } + return nil + default: + return fmt.Errorf("ssh: invalid channel %d", id) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go index d1b4fca3a9..2ffb97bfb8 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go @@ -113,6 +113,7 @@ func NewTerminal(c io.ReadWriter, prompt string) *Terminal { } const ( + keyCtrlC = 3 keyCtrlD = 4 keyCtrlU = 21 keyEnter = '\r' @@ -151,8 +152,12 @@ func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { switch b[0] { case 1: // ^A return keyHome, b[1:] + case 2: // ^B + return keyLeft, b[1:] case 5: // ^E return keyEnd, b[1:] + case 6: // ^F + return keyRight, b[1:] case 8: // ^H return keyBackspace, b[1:] case 11: // ^K @@ -738,6 +743,9 @@ func (t *Terminal) readLine() (line string, err error) { return "", io.EOF } } + if key == keyCtrlC { + return "", io.EOF + } if key == keyPasteStart { t.pasteActive = true if len(t.line) == 0 { diff --git a/vendor/golang.org/x/net/go.mod b/vendor/golang.org/x/net/go.mod index 325937b4a8..9d4b5ce9bc 100644 --- a/vendor/golang.org/x/net/go.mod +++ b/vendor/golang.org/x/net/go.mod @@ -3,7 +3,7 @@ module golang.org/x/net go 1.11 require ( - golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 - golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 + golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd golang.org/x/text v0.3.0 ) diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go index f4d9b5ece3..3a67636fe2 100644 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -107,6 +107,7 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis // dialCall is an in-flight Transport dial call to a host. type dialCall struct { + _ incomparable p *clientConnPool done chan struct{} // closed when done res *ClientConn // valid after done is closed @@ -180,6 +181,7 @@ func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) } type addConnCall struct { + _ incomparable p *clientConnPool done chan struct{} // closed when done err error @@ -200,12 +202,6 @@ func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { close(c.done) } -func (p *clientConnPool) addConn(key string, cc *ClientConn) { - p.mu.Lock() - p.addConnLocked(key, cc) - p.mu.Unlock() -} - // p.mu must be held func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { for _, v := range p.conns[key] { diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go index cea601fcdf..b51f0e0cf1 100644 --- a/vendor/golang.org/x/net/http2/flow.go +++ b/vendor/golang.org/x/net/http2/flow.go @@ -8,6 +8,8 @@ package http2 // flow is the flow control window's size. type flow struct { + _ incomparable + // n is the number of DATA bytes we're allowed to send. // A flow is kept both on a conn and a per-stream. n int32 diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go index b412a96c50..a1ab2f0567 100644 --- a/vendor/golang.org/x/net/http2/hpack/huffman.go +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -105,7 +105,14 @@ func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { return nil } +// incomparable is a zero-width, non-comparable type. Adding it to a struct +// makes that struct also non-comparable, and generally doesn't add +// any size (as long as it's first). +type incomparable [0]func() + type node struct { + _ incomparable + // children is non-nil for internal nodes children *[256]*node diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 27cc893cc0..5571ccfd26 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -241,6 +241,7 @@ func (cw closeWaiter) Wait() { // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { + _ incomparable w io.Writer // immutable bw *bufio.Writer // non-nil when data is buffered } @@ -313,6 +314,7 @@ func bodyAllowedForStatus(status int) bool { } type httpError struct { + _ incomparable msg string timeout bool } @@ -376,3 +378,8 @@ func (s *sorter) SortStrings(ss []string) { func validPseudoPath(v string) bool { return (len(v) > 0 && v[0] == '/') || v == "*" } + +// incomparable is a zero-width, non-comparable type. Adding it to a struct +// makes that struct also non-comparable, and generally doesn't add +// any size (as long as it's first). +type incomparable [0]func() diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index bc9e41a1b7..345b7cd85d 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -761,6 +761,7 @@ func (sc *serverConn) readFrames() { // frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. type frameWriteResult struct { + _ incomparable wr FrameWriteRequest // what was written (or attempted) err error // result of the writeFrame call } @@ -771,7 +772,7 @@ type frameWriteResult struct { // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { err := wr.write.writeFrame(sc) - sc.wroteFrameCh <- frameWriteResult{wr, err} + sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err} } func (sc *serverConn) closeAllStreamsOnConnClose() { @@ -1161,7 +1162,7 @@ func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { if wr.write.staysWithinBuffer(sc.bw.Available()) { sc.writingFrameAsync = false err := wr.write.writeFrame(sc) - sc.wroteFrame(frameWriteResult{wr, err}) + sc.wroteFrame(frameWriteResult{wr: wr, err: err}) } else { sc.writingFrameAsync = true go sc.writeFrameAsync(wr) @@ -2057,7 +2058,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r var trailer http.Header for _, v := range rp.header["Trailer"] { for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(strings.TrimSpace(key)) + key = http.CanonicalHeaderKey(textproto.TrimString(key)) switch key { case "Transfer-Encoding", "Trailer", "Content-Length": // Bogus. (copy of http1 rules) @@ -2275,6 +2276,7 @@ func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { // requestBody is the Handler's Request.Body type. // Read and Close may be called concurrently. type requestBody struct { + _ incomparable stream *stream conn *serverConn closed bool // for use by Close only diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 81778bec61..76a92e0ca6 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -108,6 +108,19 @@ type Transport struct { // waiting for their turn. StrictMaxConcurrentStreams bool + // ReadIdleTimeout is the timeout after which a health check using ping + // frame will be carried out if no frame is received on the connection. + // Note that a ping response will is considered a received frame, so if + // there is no other traffic on the connection, the health check will + // be performed every ReadIdleTimeout interval. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to Ping is not received. + // Defaults to 15s. + PingTimeout time.Duration + // t1, if non-nil, is the standard library Transport using // this transport. Its settings are used (but not its // RoundTrip method, etc). @@ -131,6 +144,14 @@ func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } +func (t *Transport) pingTimeout() time.Duration { + if t.PingTimeout == 0 { + return 15 * time.Second + } + return t.PingTimeout + +} + // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It returns an error if t1 has already been HTTP/2-enabled. func ConfigureTransport(t1 *http.Transport) error { @@ -675,6 +696,20 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro return cc, nil } +func (cc *ClientConn) healthCheck() { + pingTimeout := cc.t.pingTimeout() + // We don't need to periodically ping in the health check, because the readLoop of ClientConn will + // trigger the healthCheck again if there is no frame received. + ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) + defer cancel() + err := cc.Ping(ctx) + if err != nil { + cc.closeForLostPing() + cc.t.connPool().MarkDead(cc) + return + } +} + func (cc *ClientConn) setGoAway(f *GoAwayFrame) { cc.mu.Lock() defer cc.mu.Unlock() @@ -846,14 +881,12 @@ func (cc *ClientConn) sendGoAway() error { return nil } -// Close closes the client connection immediately. -// -// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. -func (cc *ClientConn) Close() error { +// closes the client connection immediately. In-flight requests are interrupted. +// err is sent to streams. +func (cc *ClientConn) closeForError(err error) error { cc.mu.Lock() defer cc.cond.Broadcast() defer cc.mu.Unlock() - err := errors.New("http2: client connection force closed via ClientConn.Close") for id, cs := range cc.streams { select { case cs.resc <- resAndError{err: err}: @@ -866,6 +899,20 @@ func (cc *ClientConn) Close() error { return cc.tconn.Close() } +// Close closes the client connection immediately. +// +// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. +func (cc *ClientConn) Close() error { + err := errors.New("http2: client connection force closed via ClientConn.Close") + return cc.closeForError(err) +} + +// closes the client connection immediately. In-flight requests are interrupted. +func (cc *ClientConn) closeForLostPing() error { + err := errors.New("http2: client connection lost") + return cc.closeForError(err) +} + const maxAllocFrameSize = 512 << 10 // frameBuffer returns a scratch buffer suitable for writing DATA frames. @@ -916,7 +963,7 @@ func commaSeparatedTrailers(req *http.Request) (string, error) { k = http.CanonicalHeaderKey(k) switch k { case "Transfer-Encoding", "Trailer", "Content-Length": - return "", &badStringError{"invalid Trailer key", k} + return "", fmt.Errorf("invalid Trailer key %q", k) } keys = append(keys, k) } @@ -1394,13 +1441,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) } } -type badStringError struct { - what string - str string -} - -func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } - // requires cc.mu be held. func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() @@ -1616,6 +1656,7 @@ func (cc *ClientConn) writeHeader(name, value string) { } type resAndError struct { + _ incomparable res *http.Response err error } @@ -1663,6 +1704,7 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { // clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. type clientConnReadLoop struct { + _ incomparable cc *ClientConn closeWhenIdle bool } @@ -1742,8 +1784,17 @@ func (rl *clientConnReadLoop) run() error { rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse gotReply := false // ever saw a HEADERS reply gotSettings := false + readIdleTimeout := cc.t.ReadIdleTimeout + var t *time.Timer + if readIdleTimeout != 0 { + t = time.AfterFunc(readIdleTimeout, cc.healthCheck) + defer t.Stop() + } for { f, err := cc.fr.ReadFrame() + if t != nil { + t.Reset(readIdleTimeout) + } if err != nil { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } @@ -1892,7 +1943,9 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") } - header := make(http.Header) + regularFields := f.RegularFields() + strs := make([]string, len(regularFields)) + header := make(http.Header, len(regularFields)) res := &http.Response{ Proto: "HTTP/2.0", ProtoMajor: 2, @@ -1900,7 +1953,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra StatusCode: statusCode, Status: status + " " + http.StatusText(statusCode), } - for _, hf := range f.RegularFields() { + for _, hf := range regularFields { key := http.CanonicalHeaderKey(hf.Name) if key == "Trailer" { t := res.Trailer @@ -1912,7 +1965,18 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra t[http.CanonicalHeaderKey(v)] = nil }) } else { - header[key] = append(header[key], hf.Value) + vv := header[key] + if vv == nil && len(strs) > 0 { + // More than likely this will be a single-element key. + // Most headers aren't multi-valued. + // Set the capacity on strs[0] to 1, so any future append + // won't extend the slice into the other strings. + vv, strs = strs[:1:1], strs[1:] + vv[0] = hf.Value + header[key] = vv + } else { + header[key] = append(vv, hf.Value) + } } } @@ -2466,6 +2530,7 @@ func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { // gzipReader wraps a response body so it can lazily // call gzip.NewReader on the first call to Read type gzipReader struct { + _ incomparable body io.ReadCloser // underlying Response.Body zr *gzip.Reader // lazily-initialized gzip reader zerr error // sticky error diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go index ed8da8deac..dcbb14ef35 100644 --- a/vendor/golang.org/x/sys/cpu/byteorder.go +++ b/vendor/golang.org/x/sys/cpu/byteorder.go @@ -39,20 +39,25 @@ func (bigEndian) Uint64(b []byte) uint64 { uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 } -// hostByteOrder returns binary.LittleEndian on little-endian machines and -// binary.BigEndian on big-endian machines. +// hostByteOrder returns littleEndian on little-endian machines and +// bigEndian on big-endian machines. func hostByteOrder() byteOrder { switch runtime.GOARCH { case "386", "amd64", "amd64p32", + "alpha", "arm", "arm64", "mipsle", "mips64le", "mips64p32le", + "nios2", "ppc64le", - "riscv", "riscv64": + "riscv", "riscv64", + "sh": return littleEndian{} case "armbe", "arm64be", + "m68k", "mips", "mips64", "mips64p32", "ppc", "ppc64", "s390", "s390x", + "shbe", "sparc", "sparc64": return bigEndian{} } diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go similarity index 96% rename from vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go rename to vendor/golang.org/x/sys/cpu/cpu_aix.go index be60272247..da29896687 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_aix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix,ppc64 +// +build aix package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index 9c87677aef..7bcb36c7bb 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -10,8 +10,14 @@ const cacheLineSize = 64 func init() { switch runtime.GOOS { - case "android", "darwin": + case "android", "darwin", "netbsd": // Android and iOS don't seem to allow reading these registers. + // + // NetBSD: + // ID_AA64ISAR0_EL1 is a privileged register and cannot be read from EL0. + // It can be read via sysctl(3). Example for future implementers: + // https://nxr.netbsd.org/xref/src/usr.sbin/cpuctl/arch/aarch64.c + // // Fake the minimal features expected by // TestARM64minimalFeatures. ARM64.HasASIMD = true diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go new file mode 100644 index 0000000000..76fbe40b76 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -0,0 +1,27 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Recreate a getsystemcfg syscall handler instead of +// using the one provided by x/sys/unix to avoid having +// the dependency between them. (See golang.org/issue/32102) +// Morever, this file will be used during the building of +// gccgo's libgo and thus must not used a CGo method. + +// +build aix +// +build gccgo + +package cpu + +import ( + "syscall" +) + +//extern getsystemcfg +func gccgoGetsystemcfg(label uint32) (r uint64) + +func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) { + r1 = uintptr(gccgoGetsystemcfg(uint32(label))) + e1 = syscall.GetErrno() + return +} diff --git a/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go b/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go new file mode 100644 index 0000000000..e07899b909 --- /dev/null +++ b/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go @@ -0,0 +1,30 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unsafeheader contains header declarations for the Go runtime's +// slice and string implementations. +// +// This package allows x/sys to use types equivalent to +// reflect.SliceHeader and reflect.StringHeader without introducing +// a dependency on the (relatively heavy) "reflect" package. +package unsafeheader + +import ( + "unsafe" +) + +// Slice is the runtime representation of a slice. +// It cannot be used safely or portably and its representation may change in a later release. +type Slice struct { + Data unsafe.Pointer + Len int + Cap int +} + +// String is the runtime representation of a string. +// It cannot be used safely or portably and its representation may change in a later release. +type String struct { + Data unsafe.Pointer + Len int +} diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index ab433ccfbb..579d2d7355 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -89,7 +89,7 @@ constants. Adding new syscall numbers is mostly done by running the build on a sufficiently new installation of the target OS (or updating the source checkouts for the -new build system). However, depending on the OS, you make need to update the +new build system). However, depending on the OS, you may need to update the parsing in mksysnum. ### mksyscall.go @@ -163,7 +163,7 @@ The merge is performed in the following steps: ## Generated files -### `zerror_${GOOS}_${GOARCH}.go` +### `zerrors_${GOOS}_${GOARCH}.go` A file containing all of the system's generated error numbers, error strings, signal numbers, and constants. Generated by `mkerrors.sh` (see above). diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go index f911617be9..dc0befee37 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go @@ -6,7 +6,11 @@ package unix -import "unsafe" +import ( + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" +) //sys closedir(dir uintptr) (err error) //sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) @@ -71,6 +75,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { cnt++ continue } + reclen := int(entry.Reclen) if reclen > len(buf) { // Not enough room. Return for now. @@ -79,13 +84,15 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // restarting is O(n^2) in the length of the directory. Oh well. break } + // Copy entry into return buffer. - s := struct { - ptr unsafe.Pointer - siz int - cap int - }{ptr: unsafe.Pointer(&entry), siz: reclen, cap: reclen} - copy(buf, *(*[]byte)(unsafe.Pointer(&s))) + var s []byte + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&s)) + hdr.Data = unsafe.Pointer(&entry) + hdr.Cap = reclen + hdr.Len = reclen + copy(buf, s) + buf = buf[reclen:] n += reclen cnt++ diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 9a5a6ee544..0cf31acf02 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -423,6 +423,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sysnb Getrlimit(which int, lim *Rlimit) (err error) //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) +//sysnb Gettimeofday(tp *Timeval) (err error) //sysnb Getuid() (uid int) //sysnb Issetugid() (tainted bool) //sys Kqueue() (fd int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go index 707ba4f59a..2724e3a512 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go @@ -20,17 +20,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: int32(sec), Usec: int32(usec)} } -//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error) -func Gettimeofday(tv *Timeval) (err error) { - // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back - // in the two registers. - sec, usec, err := gettimeofday(tv) - tv.Sec = int32(sec) - tv.Usec = int32(usec) - return err -} - func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint32(fd) k.Filter = int16(mode) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index fdbfb5911a..ce2e0d2497 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -20,17 +20,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: sec, Usec: int32(usec)} } -//sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error) -func Gettimeofday(tv *Timeval) (err error) { - // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back - // in the two registers. - sec, usec, err := gettimeofday(tv) - tv.Sec = sec - tv.Usec = usec - return err -} - func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint64(fd) k.Filter = int16(mode) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go index f8bc4cfb1f..fc17a3f232 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go @@ -20,17 +20,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: int32(sec), Usec: int32(usec)} } -//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error) -func Gettimeofday(tv *Timeval) (err error) { - // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back - // in the two registers. - sec, usec, err := gettimeofday(tv) - tv.Sec = int32(sec) - tv.Usec = int32(usec) - return err -} - func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint32(fd) k.Filter = int16(mode) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index 5ede3ac316..1e91ddf325 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -22,17 +22,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: sec, Usec: int32(usec)} } -//sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error) -func Gettimeofday(tv *Timeval) (err error) { - // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back - // in the two registers. - sec, usec, err := gettimeofday(tv) - tv.Sec = sec - tv.Usec = usec - return err -} - func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint64(fd) k.Filter = int16(mode) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index bbe1abbcee..e50e4cb276 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -97,6 +97,12 @@ func IoctlSetRTCTime(fd int, value *RTCTime) error { return err } +func IoctlSetRTCWkAlrm(fd int, value *RTCWkAlrm) error { + err := ioctl(fd, RTC_WKALM_SET, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err +} + func IoctlGetUint32(fd int, req uint) (uint32, error) { var value uint32 err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) @@ -109,6 +115,12 @@ func IoctlGetRTCTime(fd int) (*RTCTime, error) { return &value, err } +func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) { + var value RTCWkAlrm + err := ioctl(fd, RTC_WKALM_RD, uintptr(unsafe.Pointer(&value))) + return &value, err +} + //sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) func Link(oldpath string, newpath string) (err error) { @@ -1633,6 +1645,15 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys DeleteModule(name string, flags int) (err error) //sys Dup(oldfd int) (fd int, err error) + +func Dup2(oldfd, newfd int) error { + // Android O and newer blocks dup2; riscv and arm64 don't implement dup2. + if runtime.GOOS == "android" || runtime.GOARCH == "riscv64" || runtime.GOARCH == "arm64" { + return Dup3(oldfd, newfd, 0) + } + return dup2(oldfd, newfd) +} + //sys Dup3(oldfd int, newfd int, flags int) (err error) //sysnb EpollCreate1(flag int) (fd int, err error) //sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) @@ -1757,6 +1778,9 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { //sys Syncfs(fd int) (err error) //sysnb Sysinfo(info *Sysinfo_t) (err error) //sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error) +//sysnb TimerfdCreate(clockid int, flags int) (fd int, err error) +//sysnb TimerfdGettime(fd int, currValue *ItimerSpec) (err error) +//sysnb TimerfdSettime(fd int, flags int, newValue *ItimerSpec, oldValue *ItimerSpec) (err error) //sysnb Tgkill(tgid int, tid int, sig syscall.Signal) (err error) //sysnb Times(tms *Tms) (ticks uintptr, err error) //sysnb Umask(mask int) (oldmask int) @@ -1926,6 +1950,20 @@ func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) { return int(n), nil } +func isGroupMember(gid int) bool { + groups, err := Getgroups() + if err != nil { + return false + } + + for _, g := range groups { + if g == gid { + return true + } + } + return false +} + //sys faccessat(dirfd int, path string, mode uint32) (err error) func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -1983,7 +2021,7 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { gid = Getgid() } - if uint32(gid) == st.Gid { + if uint32(gid) == st.Gid || isGroupMember(gid) { fmode = (st.Mode >> 3) & 7 } else { fmode = st.Mode & 7 @@ -2178,7 +2216,6 @@ func Klogset(typ int, arg int) (err error) { // TimerGetoverrun // TimerGettime // TimerSettime -// Timerfd // Tkill (obsolete) // Tuxcall // Umount2 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index a8374b67cf..048d18e3c8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -49,7 +49,7 @@ func Pipe2(p []int, flags int) (err error) { // 64-bit file system and 32-bit uid calls // (386 default is 32-bit file system and 16-bit uid). -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64_64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 8ed1d546f0..72efe86ed4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -6,7 +6,7 @@ package unix -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 99ae613733..e1913e2c93 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -80,7 +80,7 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // 64-bit file system and 32-bit uid calls // (16-bit uid calls are not always supported in newer kernels) -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 807a0b20c3..c6de6b9134 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -25,7 +25,7 @@ func EpollCreate(size int) (fd int, err error) { //sysnb Getegid() (egid int) //sysnb Geteuid() (euid int) //sysnb Getgid() (gid int) -//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) +//sysnb getrlimit(resource int, rlim *Rlimit) (err error) //sysnb Getuid() (uid int) //sys Listen(s int, n int) (err error) //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 @@ -47,7 +47,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) +//sysnb setrlimit(resource int, rlim *Rlimit) (err error) //sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) @@ -168,6 +168,24 @@ func Pipe2(p []int, flags int) (err error) { return } +// Getrlimit prefers the prlimit64 system call. See issue 38604. +func Getrlimit(resource int, rlim *Rlimit) error { + err := prlimit(0, resource, nil, rlim) + if err != ENOSYS { + return err + } + return getrlimit(resource, rlim) +} + +// Setrlimit prefers the prlimit64 system call. See issue 38604. +func Setrlimit(resource int, rlim *Rlimit) error { + err := prlimit(0, resource, rlim, nil) + if err != ENOSYS { + return err + } + return setrlimit(resource, rlim) +} + func (r *PtraceRegs) PC() uint64 { return r.Pc } func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc } @@ -192,9 +210,9 @@ func InotifyInit() (fd int, err error) { return InotifyInit1(0) } -func Dup2(oldfd int, newfd int) (err error) { - return Dup3(oldfd, newfd, 0) -} +// dup2 exists because func Dup3 in syscall_linux.go references +// it in an unreachable path. dup2 isn't available on arm64. +func dup2(oldfd int, newfd int) error func Pause() error { _, err := ppoll(nil, 0, nil, nil) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index af77e6e25e..f0287476cd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -7,7 +7,7 @@ package unix -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index e286c6ba31..c11328111d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -14,7 +14,7 @@ import ( func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index ca0345aabf..349374409b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -7,7 +7,7 @@ package unix -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index abdabbac3f..b0b1505565 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -191,10 +191,6 @@ func InotifyInit() (fd int, err error) { return InotifyInit1(0) } -func Dup2(oldfd int, newfd int) (err error) { - return Dup3(oldfd, newfd, 0) -} - func Pause() error { _, err := ppoll(nil, 0, nil, nil) return err @@ -228,3 +224,7 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +// dup2 exists because func Dup3 in syscall_linux.go references +// it in an unreachable path. dup2 isn't available on arm64. +func dup2(oldfd int, newfd int) error diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 533e9305e7..2363f74991 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -10,7 +10,7 @@ import ( "unsafe" ) -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index d890a227bf..d389f1518f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -8,7 +8,7 @@ package unix //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 8f710d0140..400ba9fbc9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -12,6 +12,8 @@ import ( "sync" "syscall" "unsafe" + + "golang.org/x/sys/internal/unsafeheader" ) var ( @@ -113,15 +115,12 @@ func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (d return nil, errno } - // Slice memory layout - var sl = struct { - addr uintptr - len int - cap int - }{addr, length, length} - - // Use unsafe to turn sl into a []byte. - b := *(*[]byte)(unsafe.Pointer(&sl)) + // Use unsafe to convert addr into a []byte. + var b []byte + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b)) + hdr.Data = unsafe.Pointer(addr) + hdr.Cap = length + hdr.Len = length // Register mapping in m and return it. p := &b[cap(b)-1] diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 84c599c524..f8bd50c11b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -160,77 +160,28 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 - BPF_ANY = 0x0 BPF_ARSH = 0xc0 BPF_B = 0x10 BPF_BUILD_ID_SIZE = 0x14 BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 BPF_DW = 0x18 BPF_END = 0xd0 - BPF_EXIST = 0x2 BPF_EXIT = 0x90 - BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 0x1 - BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 0x4 - BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 0x2 BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CLONE = 0x200 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_MMAPABLE = 0x400 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_REPLACE = 0x4 BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_STATE_FREQ = 0x8 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -266,7 +217,6 @@ const ( BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 @@ -274,12 +224,6 @@ const ( BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 @@ -377,18 +321,21 @@ const ( CLOCK_TXINT = 0x3 CLONE_ARGS_SIZE_VER0 = 0x40 CLONE_ARGS_SIZE_VER1 = 0x50 + CLONE_ARGS_SIZE_VER2 = 0x58 CLONE_CHILD_CLEARTID = 0x200000 CLONE_CHILD_SETTID = 0x1000000 CLONE_CLEAR_SIGHAND = 0x100000000 CLONE_DETACHED = 0x400000 CLONE_FILES = 0x400 CLONE_FS = 0x200 + CLONE_INTO_CGROUP = 0x200000000 CLONE_IO = 0x80000000 CLONE_NEWCGROUP = 0x2000000 CLONE_NEWIPC = 0x8000000 CLONE_NEWNET = 0x40000000 CLONE_NEWNS = 0x20000 CLONE_NEWPID = 0x20000000 + CLONE_NEWTIME = 0x80 CLONE_NEWUSER = 0x10000000 CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 @@ -596,7 +543,9 @@ const ( FAN_DELETE = 0x200 FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 + FAN_DIR_MODIFY = 0x80000 FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 @@ -671,6 +620,7 @@ const ( FS_IOC_ADD_ENCRYPTION_KEY = 0xc0506617 FS_IOC_GET_ENCRYPTION_KEY_STATUS = 0xc080661a FS_IOC_GET_ENCRYPTION_POLICY_EX = 0xc0096616 + FS_IOC_MEASURE_VERITY = 0xc0046686 FS_IOC_REMOVE_ENCRYPTION_KEY = 0xc0406618 FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS = 0xc0406619 FS_KEY_DESCRIPTOR_SIZE = 0x8 @@ -683,6 +633,9 @@ const ( FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 FS_POLICY_FLAGS_VALID = 0xf + FS_VERITY_FL = 0x100000 + FS_VERITY_HASH_ALG_SHA256 = 0x1 + FS_VERITY_HASH_ALG_SHA512 = 0x2 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -733,6 +686,7 @@ const ( GENL_NAMSIZ = 0x10 GENL_START_ALLOC = 0x13 GENL_UNS_ADMIN_PERM = 0x10 + GRND_INSECURE = 0x4 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 HDIO_DRIVE_CMD = 0x31f @@ -1483,6 +1437,7 @@ const ( PR_GET_FPEMU = 0x9 PR_GET_FPEXC = 0xb PR_GET_FP_MODE = 0x2e + PR_GET_IO_FLUSHER = 0x3a PR_GET_KEEPCAPS = 0x7 PR_GET_NAME = 0x10 PR_GET_NO_NEW_PRIVS = 0x27 @@ -1518,6 +1473,7 @@ const ( PR_SET_FPEMU = 0xa PR_SET_FPEXC = 0xc PR_SET_FP_MODE = 0x2d + PR_SET_IO_FLUSHER = 0x39 PR_SET_KEEPCAPS = 0x8 PR_SET_MM = 0x23 PR_SET_MM_ARG_END = 0x9 @@ -1746,12 +1702,15 @@ const ( RTM_DELRULE = 0x21 RTM_DELTCLASS = 0x29 RTM_DELTFILTER = 0x2d + RTM_DELVLAN = 0x71 RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 + RTM_F_OFFLOAD = 0x4000 RTM_F_PREFIX = 0x800 + RTM_F_TRAP = 0x8000 RTM_GETACTION = 0x32 RTM_GETADDR = 0x16 RTM_GETADDRLABEL = 0x4a @@ -1773,7 +1732,8 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x6f + RTM_GETVLAN = 0x72 + RTM_MAX = 0x73 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1788,6 +1748,7 @@ const ( RTM_NEWNETCONF = 0x50 RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 + RTM_NEWNVLAN = 0x70 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 RTM_NEWROUTE = 0x18 @@ -1795,8 +1756,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x18 - RTM_NR_MSGTYPES = 0x60 + RTM_NR_FAMILIES = 0x19 + RTM_NR_MSGTYPES = 0x64 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -2086,7 +2047,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 + TASKSTATS_VERSION = 0xa TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 @@ -2094,8 +2055,6 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2151,6 +2110,8 @@ const ( TCP_USER_TIMEOUT = 0x12 TCP_WINDOW_CLAMP = 0xa TCP_ZEROCOPY_RECEIVE = 0x23 + TFD_TIMER_ABSTIME = 0x1 + TFD_TIMER_CANCEL_ON_SET = 0x2 TIMER_ABSTIME = 0x1 TIOCM_DTR = 0x2 TIOCM_LE = 0x1 @@ -2267,7 +2228,7 @@ const ( VMADDR_CID_ANY = 0xffffffff VMADDR_CID_HOST = 0x2 VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 + VMADDR_CID_LOCAL = 0x1 VMADDR_PORT_ANY = 0xffffffff VM_SOCKETS_INVALID_VERSION = 0xffffffff VQUIT = 0x1 @@ -2368,8 +2329,9 @@ const ( XDP_COPY = 0x2 XDP_FLAGS_DRV_MODE = 0x4 XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf + XDP_FLAGS_MASK = 0x1f XDP_FLAGS_MODES = 0xe + XDP_FLAGS_REPLACE = 0x10 XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 @@ -2394,6 +2356,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 Z3FOLD_MAGIC = 0x33 + ZONEFS_MAGIC = 0x5a4f4653 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 0876cf92ff..8d207b041e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -73,6 +73,9 @@ const ( FFDLY = 0x8000 FLUSHO = 0x1000 FP_XSTATE_MAGIC2 = 0x46505845 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80046601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 @@ -340,6 +343,8 @@ const ( TCSETXF = 0x5434 TCSETXW = 0x5435 TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index d5be2e8377..c4bf9cb80f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -73,6 +73,9 @@ const ( FFDLY = 0x8000 FLUSHO = 0x1000 FP_XSTATE_MAGIC2 = 0x46505845 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 @@ -341,6 +344,8 @@ const ( TCSETXF = 0x5434 TCSETXW = 0x5435 TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index fbeef83252..0cab0522e6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -72,6 +72,9 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80046601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 @@ -347,6 +350,8 @@ const ( TCSETXF = 0x5434 TCSETXW = 0x5435 TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 06daa50ebd..370d0a7f59 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -75,6 +75,9 @@ const ( FFDLY = 0x8000 FLUSHO = 0x1000 FPSIMD_MAGIC = 0x46508001 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 @@ -334,6 +337,8 @@ const ( TCSETXF = 0x5434 TCSETXW = 0x5435 TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 7c866b8f5b..fbf2f3174e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -72,6 +72,9 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x2000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40046601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 @@ -337,6 +340,8 @@ const ( TCSETSW = 0x540f TCSETSW2 = 0x8030542c TCXONC = 0x5406 + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x80 TIOCCBRK = 0x5428 TIOCCONS = 0x80047478 TIOCEXCL = 0x740d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index c42966d19c..25e74b30a9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -72,6 +72,9 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x2000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40086601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 @@ -337,6 +340,8 @@ const ( TCSETSW = 0x540f TCSETSW2 = 0x8030542c TCXONC = 0x5406 + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x80 TIOCCBRK = 0x5428 TIOCCONS = 0x80047478 TIOCEXCL = 0x740d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index a5b2b42739..4ecc0bca34 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -72,6 +72,9 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x2000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40086601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 @@ -337,6 +340,8 @@ const ( TCSETSW = 0x540f TCSETSW2 = 0x8030542c TCXONC = 0x5406 + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x80 TIOCCBRK = 0x5428 TIOCCONS = 0x80047478 TIOCEXCL = 0x740d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 7f91881b81..dfb8f88a7e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -72,6 +72,9 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x2000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40046601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 @@ -337,6 +340,8 @@ const ( TCSETSW = 0x540f TCSETSW2 = 0x8030542c TCXONC = 0x5406 + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x80 TIOCCBRK = 0x5428 TIOCCONS = 0x80047478 TIOCEXCL = 0x740d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 63df35597e..72d8dad5b8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -72,6 +72,9 @@ const ( FF1 = 0x4000 FFDLY = 0x4000 FLUSHO = 0x800000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40086601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 @@ -391,6 +394,8 @@ const ( TCSETSF = 0x802c7416 TCSETSW = 0x802c7415 TCXONC = 0x2000741e + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 7ab68f7c8a..ca0e7b5262 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -72,6 +72,9 @@ const ( FF1 = 0x4000 FFDLY = 0x4000 FLUSHO = 0x800000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40086601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 @@ -391,6 +394,8 @@ const ( TCSETSF = 0x802c7416 TCSETSW = 0x802c7415 TCXONC = 0x2000741e + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index f99cf1b9e0..147511a974 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -72,6 +72,9 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 @@ -328,6 +331,8 @@ const ( TCSETXF = 0x5434 TCSETXW = 0x5435 TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 613ee237e3..517349dafa 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -72,6 +72,9 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 @@ -401,6 +404,8 @@ const ( TCSETXF = 0x5434 TCSETXW = 0x5435 TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 1f7a68d5cc..094822465b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -76,6 +76,9 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40086601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 @@ -390,6 +393,8 @@ const ( TCSETSW = 0x8024540a TCSETSW2 = 0x802c540e TCXONC = 0x20005406 + TFD_CLOEXEC = 0x400000 + TFD_NONBLOCK = 0x4000 TIOCCBRK = 0x2000747a TIOCCONS = 0x20007424 TIOCEXCL = 0x2000740d diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go index c1cc0a415f..23e94d3663 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go @@ -966,6 +966,16 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) uid = int(r0) @@ -1709,18 +1719,6 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { - r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int32(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index a3fc490041..e2ffb3bed3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -1376,6 +1376,21 @@ func libc_getsid_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_gettimeofday_trampoline() + +//go:linkname libc_gettimeofday libc_gettimeofday +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) uid = int(r0) @@ -2357,23 +2372,6 @@ func libc_ptrace_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { - r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int32(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_gettimeofday_trampoline() - -//go:linkname libc_gettimeofday libc_gettimeofday -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go index f8e5c37c5c..102561730a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go @@ -966,6 +966,16 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) uid = int(r0) @@ -1709,18 +1719,6 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { - r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int64(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 50d6437e6b..c67e336e2a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -1376,6 +1376,21 @@ func libc_getsid_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_gettimeofday_trampoline() + +//go:linkname libc_gettimeofday libc_gettimeofday +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) uid = int(r0) @@ -2357,23 +2372,6 @@ func libc_ptrace_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { - r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int64(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_gettimeofday_trampoline() - -//go:linkname libc_gettimeofday libc_gettimeofday -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go index cea04e041c..d34e6df2fe 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go @@ -966,6 +966,16 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) uid = int(r0) @@ -1682,18 +1692,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { - r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int32(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index 63103950ca..b759757a77 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -1376,6 +1376,21 @@ func libc_getsid_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_gettimeofday_trampoline() + +//go:linkname libc_gettimeofday libc_gettimeofday +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) uid = int(r0) @@ -2342,23 +2357,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { - r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int32(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_gettimeofday_trampoline() - -//go:linkname libc_gettimeofday libc_gettimeofday -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_fstat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go index 8c3bb3a25d..8d39a09f72 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go @@ -966,6 +966,16 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) uid = int(r0) @@ -1682,18 +1692,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { - r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int64(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index a8709f72dd..b288612600 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -1376,6 +1376,21 @@ func libc_getsid_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_gettimeofday_trampoline() + +//go:linkname libc_gettimeofday libc_gettimeofday +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) uid = int(r0) @@ -2342,23 +2357,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { - r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int64(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_gettimeofday_trampoline() - -//go:linkname libc_gettimeofday libc_gettimeofday -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_fstat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index fd2dae8e57..df217825f0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1450,6 +1450,37 @@ func Sysinfo(info *Sysinfo_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func TimerfdCreate(clockid int, flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_TIMERFD_CREATE, uintptr(clockid), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func TimerfdGettime(fd int, currValue *ItimerSpec) (err error) { + _, _, e1 := RawSyscall(SYS_TIMERFD_GETTIME, uintptr(fd), uintptr(unsafe.Pointer(currValue)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func TimerfdSettime(fd int, flags int, newValue *ItimerSpec, oldValue *ItimerSpec) (err error) { + _, _, e1 := RawSyscall6(SYS_TIMERFD_SETTIME, uintptr(fd), uintptr(flags), uintptr(unsafe.Pointer(newValue)), uintptr(unsafe.Pointer(oldValue)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index ba63af7b08..19ebd3ff75 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -55,7 +55,7 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index f64adef415..5c562182a1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index ac19523e88..dc69d99c61 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -234,7 +234,7 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index f0d2890b16..1b897dee05 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -151,7 +151,7 @@ func Getgid() (gid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrlimit(resource int, rlim *Rlimit) (err error) { +func getrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { err = errnoErr(e1) @@ -307,7 +307,7 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { +func setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index aecbbca754..49186843ae 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 424fb7fb60..9171d3bd2a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 28c7239cf6..82286f04f9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 84596b300a..15920621c4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index de022639d6..73a42e2ccb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 888f21d37a..6b85595366 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 9bc353f0c4..d7032ab1e4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 854e816d67..bcbbdd906e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -72,7 +72,7 @@ func Fadvise(fd int, offset int64, length int64, advice int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 37dcc74c2d..102f1ab475 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -1,4 +1,4 @@ -// mksysctl_openbsd.pl +// go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. // +build 386,openbsd @@ -30,6 +30,7 @@ var sysctlMib = []mibentry{ {"hw.model", []_C_int{6, 2}}, {"hw.ncpu", []_C_int{6, 3}}, {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.ncpuonline", []_C_int{6, 25}}, {"hw.pagesize", []_C_int{6, 7}}, {"hw.physmem", []_C_int{6, 19}}, {"hw.product", []_C_int{6, 15}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index fe6caa6eb7..4866fced8a 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -31,6 +31,7 @@ var sysctlMib = []mibentry{ {"hw.model", []_C_int{6, 2}}, {"hw.ncpu", []_C_int{6, 3}}, {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.ncpuonline", []_C_int{6, 25}}, {"hw.pagesize", []_C_int{6, 7}}, {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index 6eb8c0b086..d3801eb24b 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -30,6 +30,7 @@ var sysctlMib = []mibentry{ {"hw.model", []_C_int{6, 2}}, {"hw.ncpu", []_C_int{6, 3}}, {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.ncpuonline", []_C_int{6, 25}}, {"hw.pagesize", []_C_int{6, 7}}, {"hw.physmem", []_C_int{6, 19}}, {"hw.product", []_C_int{6, 15}}, diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 7aae554f21..54559a8956 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -431,4 +431,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 7968439a92..054a741b7f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -353,4 +353,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 3c663c69d4..307f2ba12e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -395,4 +395,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 1f3b4d150f..e9404dd545 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -298,4 +298,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 00da3de907..68bb6d29b8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -416,4 +416,6 @@ const ( SYS_FSPICK = 4433 SYS_PIDFD_OPEN = 4434 SYS_CLONE3 = 4435 + SYS_OPENAT2 = 4437 + SYS_PIDFD_GETFD = 4438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index d404fbd4d4..4e5251185f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -346,4 +346,6 @@ const ( SYS_FSPICK = 5433 SYS_PIDFD_OPEN = 5434 SYS_CLONE3 = 5435 + SYS_OPENAT2 = 5437 + SYS_PIDFD_GETFD = 5438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index bfbf242f33..4d9aa3003b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -346,4 +346,6 @@ const ( SYS_FSPICK = 5433 SYS_PIDFD_OPEN = 5434 SYS_CLONE3 = 5435 + SYS_OPENAT2 = 5437 + SYS_PIDFD_GETFD = 5438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 3826f497ad..64af0707d5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -416,4 +416,6 @@ const ( SYS_FSPICK = 4433 SYS_PIDFD_OPEN = 4434 SYS_CLONE3 = 4435 + SYS_OPENAT2 = 4437 + SYS_PIDFD_GETFD = 4438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 52e3da6490..cc3c067ba3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -395,4 +395,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 6141f90a82..4050ff9836 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -395,4 +395,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 4f7261a884..529abb6a7f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -297,4 +297,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index f47014ac05..2766500109 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -360,4 +360,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index dd78abb0d6..4dc82bb249 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -374,4 +374,6 @@ const ( SYS_FSMOUNT = 432 SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 6f79227d74..b91c2ae0f0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -125,9 +125,9 @@ type Statfs_t struct { Owner uint32 Fsid Fsid Charspare [80]int8 - Fstypename [16]int8 - Mntfromname [1024]int8 - Mntonname [1024]int8 + Fstypename [16]byte + Mntfromname [1024]byte + Mntonname [1024]byte } type statfs_freebsd11_t struct { @@ -150,9 +150,9 @@ type statfs_freebsd11_t struct { Owner uint32 Fsid Fsid Charspare [80]int8 - Fstypename [16]int8 - Mntfromname [88]int8 - Mntonname [88]int8 + Fstypename [16]byte + Mntfromname [88]byte + Mntonname [88]byte } type Flock_t struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index cb5e06c605..27d67ac8f5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -18,6 +18,11 @@ type ( _C_long_long int64 ) +type ItimerSpec struct { + Interval Timespec + Value Timespec +} + const ( TIME_OK = 0x0 TIME_INS = 0x1 @@ -114,7 +119,8 @@ type FscryptKeySpecifier struct { type FscryptAddKeyArg struct { Key_spec FscryptKeySpecifier Raw_size uint32 - _ [9]uint32 + Key_id uint32 + _ [8]uint32 } type FscryptRemoveKeyArg struct { @@ -479,7 +485,7 @@ const ( IFLA_NEW_IFINDEX = 0x31 IFLA_MIN_MTU = 0x32 IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x35 + IFLA_MAX = 0x36 IFLA_INFO_KIND = 0x1 IFLA_INFO_DATA = 0x2 IFLA_INFO_XSTATS = 0x3 @@ -1865,175 +1871,249 @@ const ( ) const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_FREEZE = 0x16 - BPF_BTF_GET_NEXT_ID = 0x17 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_MAP_TYPE_SK_STORAGE = 0x18 - BPF_MAP_TYPE_DEVMAP_HASH = 0x19 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_PROG_TYPE_CGROUP_SYSCTL = 0x17 - BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 0x18 - BPF_PROG_TYPE_CGROUP_SOCKOPT = 0x19 - BPF_PROG_TYPE_TRACING = 0x1a - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_CGROUP_SYSCTL = 0x12 - BPF_CGROUP_UDP4_RECVMSG = 0x13 - BPF_CGROUP_UDP6_RECVMSG = 0x14 - BPF_CGROUP_GETSOCKOPT = 0x15 - BPF_CGROUP_SETSOCKOPT = 0x16 - BPF_TRACE_RAW_TP = 0x17 - BPF_TRACE_FENTRY = 0x18 - BPF_TRACE_FEXIT = 0x19 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_ADJ_ROOM_MAC = 0x1 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_LWT_ENCAP_IP = 0x2 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_LWT_REROUTE = 0x80 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_SOCK_OPS_RTT_CB = 0xc - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_FREEZE = 0x16 + BPF_BTF_GET_NEXT_ID = 0x17 + BPF_MAP_LOOKUP_BATCH = 0x18 + BPF_MAP_LOOKUP_AND_DELETE_BATCH = 0x19 + BPF_MAP_UPDATE_BATCH = 0x1a + BPF_MAP_DELETE_BATCH = 0x1b + BPF_LINK_CREATE = 0x1c + BPF_LINK_UPDATE = 0x1d + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_MAP_TYPE_SK_STORAGE = 0x18 + BPF_MAP_TYPE_DEVMAP_HASH = 0x19 + BPF_MAP_TYPE_STRUCT_OPS = 0x1a + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_PROG_TYPE_CGROUP_SYSCTL = 0x17 + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 0x18 + BPF_PROG_TYPE_CGROUP_SOCKOPT = 0x19 + BPF_PROG_TYPE_TRACING = 0x1a + BPF_PROG_TYPE_STRUCT_OPS = 0x1b + BPF_PROG_TYPE_EXT = 0x1c + BPF_PROG_TYPE_LSM = 0x1d + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_CGROUP_SYSCTL = 0x12 + BPF_CGROUP_UDP4_RECVMSG = 0x13 + BPF_CGROUP_UDP6_RECVMSG = 0x14 + BPF_CGROUP_GETSOCKOPT = 0x15 + BPF_CGROUP_SETSOCKOPT = 0x16 + BPF_TRACE_RAW_TP = 0x17 + BPF_TRACE_FENTRY = 0x18 + BPF_TRACE_FEXIT = 0x19 + BPF_MODIFY_RETURN = 0x1a + BPF_LSM_MAC = 0x1b + BPF_ANY = 0x0 + BPF_NOEXIST = 0x1 + BPF_EXIST = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NUMA_NODE = 0x4 + BPF_F_RDONLY = 0x8 + BPF_F_WRONLY = 0x10 + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_ZERO_SEED = 0x40 + BPF_F_RDONLY_PROG = 0x80 + BPF_F_WRONLY_PROG = 0x100 + BPF_F_CLONE = 0x200 + BPF_F_MMAPABLE = 0x400 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_INGRESS = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_USER_STACK = 0x100 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 + BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 + BPF_F_GET_BRANCH_RECORDS_SIZE = 0x1 + BPF_ADJ_ROOM_NET = 0x0 + BPF_ADJ_ROOM_MAC = 0x1 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_LWT_ENCAP_IP = 0x2 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_LWT_REROUTE = 0x80 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_SOCK_OPS_RTT_CB = 0xc + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 + BPF_FIB_LOOKUP_DIRECT = 0x1 + BPF_FIB_LOOKUP_OUTPUT = 0x2 + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 0x1 + BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 0x2 + BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 0x4 ) const ( @@ -2199,7 +2279,7 @@ const ( DEVLINK_CMD_DPIPE_ENTRIES_GET = 0x20 DEVLINK_CMD_DPIPE_HEADERS_GET = 0x21 DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET = 0x22 - DEVLINK_CMD_MAX = 0x44 + DEVLINK_CMD_MAX = 0x48 DEVLINK_PORT_TYPE_NOTSET = 0x0 DEVLINK_PORT_TYPE_AUTO = 0x1 DEVLINK_PORT_TYPE_ETH = 0x2 @@ -2279,7 +2359,7 @@ const ( DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE = 0x3c DEVLINK_ATTR_PAD = 0x3d DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 0x3e - DEVLINK_ATTR_MAX = 0x8c + DEVLINK_ATTR_MAX = 0x90 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 @@ -2291,3 +2371,49 @@ const ( DEVLINK_DPIPE_HEADER_IPV4 = 0x1 DEVLINK_DPIPE_HEADER_IPV6 = 0x2 ) + +type FsverityDigest struct { + Algorithm uint16 + Size uint16 +} + +type FsverityEnableArg struct { + Version uint32 + Hash_algorithm uint32 + Block_size uint32 + Salt_size uint32 + Salt_ptr uint64 + Sig_size uint32 + _ uint32 + Sig_ptr uint64 + _ [11]uint64 +} + +type Nhmsg struct { + Family uint8 + Scope uint8 + Protocol uint8 + Resvd uint8 + Flags uint32 +} + +type NexthopGrp struct { + Id uint32 + Weight uint8 + Resvd1 uint8 + Resvd2 uint16 +} + +const ( + NHA_UNSPEC = 0x0 + NHA_ID = 0x1 + NHA_GROUP = 0x2 + NHA_GROUP_TYPE = 0x3 + NHA_BLACKHOLE = 0x4 + NHA_OIF = 0x5 + NHA_GATEWAY = 0x6 + NHA_ENCAP_TYPE = 0x7 + NHA_ENCAP = 0x8 + NHA_GROUPS = 0x9 + NHA_MASTER = 0xa +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index fc6b3fb5c4..761b67c864 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -287,6 +287,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 26c30b84d0..201fb3482d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -298,6 +298,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 814d42d543..8051b56108 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -276,6 +276,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index d9664c7135..a936f21692 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -277,6 +277,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 0d721454f5..aaca03dd7d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -281,6 +281,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index ef697684d1..2e7f3b8ca4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -280,6 +280,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 485fda70be..16add5a257 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -280,6 +280,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 569477eef8..4ed2c8e54c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -281,6 +281,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 602d8b4eed..7415190997 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -287,6 +287,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 6db9a7b737..046c2debd4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -287,6 +287,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 52b5348c2e..0f2f61a6ad 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -305,6 +305,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index a111387b3a..cca1b6be27 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -300,6 +300,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 8153af1818..33a73bf183 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -282,6 +282,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index d777113415..82076fb74f 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -104,6 +104,35 @@ func (d *DLL) MustFindProc(name string) *Proc { return p } +// FindProcByOrdinal searches DLL d for procedure by ordinal and returns *Proc +// if found. It returns an error if search fails. +func (d *DLL) FindProcByOrdinal(ordinal uintptr) (proc *Proc, err error) { + a, e := GetProcAddressByOrdinal(d.Handle, ordinal) + name := "#" + itoa(int(ordinal)) + if e != nil { + return nil, &DLLError{ + Err: e, + ObjName: name, + Msg: "Failed to find " + name + " procedure in " + d.Name + ": " + e.Error(), + } + } + p := &Proc{ + Dll: d, + Name: name, + addr: a, + } + return p, nil +} + +// MustFindProcByOrdinal is like FindProcByOrdinal but panics if search fails. +func (d *DLL) MustFindProcByOrdinal(ordinal uintptr) *Proc { + p, e := d.FindProcByOrdinal(ordinal) + if e != nil { + panic(e) + } + return p +} + // Release unloads DLL d from memory. func (d *DLL) Release() (err error) { return FreeLibrary(d.Handle) diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go index f482a9fab3..92ac05ff4e 100644 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -8,7 +8,6 @@ package windows import ( "syscall" - "unicode/utf16" "unsafe" ) @@ -40,17 +39,11 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { defer DestroyEnvironmentBlock(block) blockp := uintptr(unsafe.Pointer(block)) for { - entry := (*[(1 << 30) - 1]uint16)(unsafe.Pointer(blockp))[:] - for i, v := range entry { - if v == 0 { - entry = entry[:i] - break - } - } + entry := UTF16PtrToString((*uint16)(unsafe.Pointer(blockp))) if len(entry) == 0 { break } - env = append(env, string(utf16.Decode(entry))) + env = append(env, entry) blockp += 2 * (uintptr(len(entry)) + 1) } return env, nil diff --git a/vendor/golang.org/x/sys/windows/memory_windows.go b/vendor/golang.org/x/sys/windows/memory_windows.go index f80a4204f0..e409d76f0f 100644 --- a/vendor/golang.org/x/sys/windows/memory_windows.go +++ b/vendor/golang.org/x/sys/windows/memory_windows.go @@ -23,4 +23,9 @@ const ( PAGE_EXECUTE_READ = 0x20 PAGE_EXECUTE_READWRITE = 0x40 PAGE_EXECUTE_WRITECOPY = 0x80 + + QUOTA_LIMITS_HARDWS_MIN_DISABLE = 0x00000002 + QUOTA_LIMITS_HARDWS_MIN_ENABLE = 0x00000001 + QUOTA_LIMITS_HARDWS_MAX_DISABLE = 0x00000008 + QUOTA_LIMITS_HARDWS_MAX_ENABLE = 0x00000004 ) diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 4b6eff1868..9e3c44a855 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -7,6 +7,8 @@ package windows import ( "syscall" "unsafe" + + "golang.org/x/sys/internal/unsafeheader" ) const ( @@ -1229,7 +1231,7 @@ func (sd *SECURITY_DESCRIPTOR) String() string { return "" } defer LocalFree(Handle(unsafe.Pointer(sddl))) - return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(sddl))[:]) + return UTF16PtrToString(sddl) } // ToAbsolute converts a self-relative security descriptor into an absolute one. @@ -1307,9 +1309,17 @@ func (absoluteSD *SECURITY_DESCRIPTOR) ToSelfRelative() (selfRelativeSD *SECURIT } func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() *SECURITY_DESCRIPTOR { - sdBytes := make([]byte, selfRelativeSD.Length()) - copy(sdBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(selfRelativeSD))[:len(sdBytes)]) - return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&sdBytes[0])) + sdLen := (int)(selfRelativeSD.Length()) + + var src []byte + h := (*unsafeheader.Slice)(unsafe.Pointer(&src)) + h.Data = unsafe.Pointer(selfRelativeSD) + h.Len = sdLen + h.Cap = sdLen + + dst := make([]byte, sdLen) + copy(dst, src) + return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&dst[0])) } // SecurityDescriptorFromString converts an SDDL string describing a security descriptor into a @@ -1391,6 +1401,6 @@ func ACLFromEntries(explicitEntries []EXPLICIT_ACCESS, mergedACL *ACL) (acl *ACL } defer LocalFree(Handle(unsafe.Pointer(winHeapACL))) aclBytes := make([]byte, winHeapACL.aclSize) - copy(aclBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(winHeapACL))[:len(aclBytes)]) + copy(aclBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(winHeapACL))[:len(aclBytes):len(aclBytes)]) return (*ACL)(unsafe.Pointer(&aclBytes[0])), nil } diff --git a/vendor/golang.org/x/sys/windows/svc/security.go b/vendor/golang.org/x/sys/windows/svc/security.go index 6fbc9236ed..65025998da 100644 --- a/vendor/golang.org/x/sys/windows/svc/security.go +++ b/vendor/golang.org/x/sys/windows/svc/security.go @@ -7,8 +7,6 @@ package svc import ( - "unsafe" - "golang.org/x/sys/windows" ) @@ -48,9 +46,8 @@ func IsAnInteractiveSession() (bool, error) { if err != nil { return false, err } - p := unsafe.Pointer(&gs.Groups[0]) - groups := (*[2 << 20]windows.SIDAndAttributes)(p)[:gs.GroupCount] - for _, g := range groups { + + for _, g := range gs.AllGroups() { if windows.EqualSid(g.Sid, interSid) { return true, nil } diff --git a/vendor/golang.org/x/sys/windows/svc/service.go b/vendor/golang.org/x/sys/windows/svc/service.go index ee3d6965a1..bae818dddc 100644 --- a/vendor/golang.org/x/sys/windows/svc/service.go +++ b/vendor/golang.org/x/sys/windows/svc/service.go @@ -14,6 +14,7 @@ import ( "syscall" "unsafe" + "golang.org/x/sys/internal/unsafeheader" "golang.org/x/sys/windows" ) @@ -224,10 +225,16 @@ const ( func (s *service) run() { s.goWaits.Wait() s.h = windows.Handle(ssHandle) - argv := (*[100]*int16)(unsafe.Pointer(sArgv))[:sArgc] + + var argv []*uint16 + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&argv)) + hdr.Data = unsafe.Pointer(sArgv) + hdr.Len = int(sArgc) + hdr.Cap = int(sArgc) + args := make([]string, len(argv)) for i, a := range argv { - args[i] = syscall.UTF16ToString((*[1 << 20]uint16)(unsafe.Pointer(a))[:]) + args[i] = windows.UTF16PtrToString(a) } cmdsToHandler := make(chan ChangeRequest) diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 053d664d0b..62cf70e9f6 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -13,6 +13,8 @@ import ( "time" "unicode/utf16" "unsafe" + + "golang.org/x/sys/internal/unsafeheader" ) type Handle uintptr @@ -117,6 +119,32 @@ func UTF16PtrFromString(s string) (*uint16, error) { return &a[0], nil } +// UTF16PtrToString takes a pointer to a UTF-16 sequence and returns the corresponding UTF-8 encoded string. +// If the pointer is nil, this returns the empty string. This assumes that the UTF-16 sequence is terminated +// at a zero word; if the zero word is not present, the program may crash. +func UTF16PtrToString(p *uint16) string { + if p == nil { + return "" + } + if *p == 0 { + return "" + } + + // Find NUL terminator. + n := 0 + for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { + ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) + } + + var s []uint16 + h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) + h.Data = unsafe.Pointer(p) + h.Len = n + h.Cap = n + + return string(utf16.Decode(s)) +} + func Getpagesize() int { return 4096 } // NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. @@ -280,6 +308,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetProcessId(process Handle) (id uint32, err error) //sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) //sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost +//sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) +//sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) // Volume Management Functions //sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW @@ -1181,7 +1211,12 @@ type IPv6Mreq struct { Interface uint32 } -func GetsockoptInt(fd Handle, level, opt int) (int, error) { return -1, syscall.EWINDOWS } +func GetsockoptInt(fd Handle, level, opt int) (int, error) { + v := int32(0) + l := int32(unsafe.Sizeof(v)) + err := Getsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), &l) + return int(v), err +} func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) { sys := sysLinger{Onoff: uint16(l.Onoff), Linger: uint16(l.Linger)} @@ -1378,7 +1413,7 @@ func (t Token) KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, e return "", err } defer CoTaskMemFree(unsafe.Pointer(p)) - return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(p))[:]), nil + return UTF16PtrToString(p), nil } // RtlGetVersion returns the version of the underlying operating system, ignoring diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 2aa4fa642a..8a562feed0 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -217,6 +217,8 @@ var ( procGetProcessId = modkernel32.NewProc("GetProcessId") procOpenThread = modkernel32.NewProc("OpenThread") procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost") + procGetProcessWorkingSetSizeEx = modkernel32.NewProc("GetProcessWorkingSetSizeEx") + procSetProcessWorkingSetSizeEx = modkernel32.NewProc("SetProcessWorkingSetSizeEx") procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") @@ -2414,6 +2416,23 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) { return } +func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) { + syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0) + return +} + +func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) if r1 == 0 {