vendor: github.com/prometheus/procfs v0.15.1

full diff: https://github.com/prometheus/procfs/compare/v0.12.0...v0.15.1

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit caa5d15e98)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn 2024-07-16 18:46:30 +02:00
parent 2794ebd599
commit 674f8d2979
No known key found for this signature in database
GPG Key ID: 76698F39D527CE8C
37 changed files with 496 additions and 200 deletions

View File

@ -81,7 +81,7 @@ require (
github.com/prometheus/client_golang v1.17.0 // indirect github.com/prometheus/client_golang v1.17.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect
github.com/rivo/uniseg v0.2.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect

View File

@ -236,8 +236,8 @@ github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=

View File

@ -1,9 +1,16 @@
--- ---
linters: linters:
enable: enable:
- errcheck
- godot - godot
- gosimple
- govet
- ineffassign
- misspell - misspell
- revive - revive
- staticcheck
- testifylint
- unused
linter-settings: linter-settings:
godot: godot:

View File

@ -1,2 +1,3 @@
* Johannes 'fish' Ziemke <github@freigeist.org> @discordianfish * Johannes 'fish' Ziemke <github@freigeist.org> @discordianfish
* Paul Gier <pgier@redhat.com> @pgier * Paul Gier <paulgier@gmail.com> @pgier
* Ben Kochie <superq@gmail.com> @SuperQ

View File

@ -49,23 +49,23 @@ endif
GOTEST := $(GO) test GOTEST := $(GO) test
GOTEST_DIR := GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),) ifneq ($(CIRCLE_JOB),)
ifneq ($(shell command -v gotestsum > /dev/null),) ifneq ($(shell command -v gotestsum 2> /dev/null),)
GOTEST_DIR := test-results GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif endif
endif endif
PROMU_VERSION ?= 0.15.0 PROMU_VERSION ?= 0.17.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
SKIP_GOLANGCI_LINT := SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT := GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.54.2 GOLANGCI_LINT_VERSION ?= v1.59.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different. # windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))
# If we're in CI and there is an Actions file, that means the linter # If we're in CI and there is an Actions file, that means the linter
# is being run in Actions, so we don't need to run it here. # is being run in Actions, so we don't need to run it here.
ifneq (,$(SKIP_GOLANGCI_LINT)) ifneq (,$(SKIP_GOLANGCI_LINT))
@ -169,16 +169,20 @@ common-vet:
common-lint: $(GOLANGCI_LINT) common-lint: $(GOLANGCI_LINT)
ifdef GOLANGCI_LINT ifdef GOLANGCI_LINT
@echo ">> running golangci-lint" @echo ">> running golangci-lint"
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
# Otherwise staticcheck might fail randomly for some reason not yet explained.
$(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
endif endif
.PHONY: common-lint-fix
common-lint-fix: $(GOLANGCI_LINT)
ifdef GOLANGCI_LINT
@echo ">> running golangci-lint fix"
$(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
endif
.PHONY: common-yamllint .PHONY: common-yamllint
common-yamllint: common-yamllint:
@echo ">> running yamllint on all YAML files in the repository" @echo ">> running yamllint on all YAML files in the repository"
ifeq (, $(shell command -v yamllint > /dev/null)) ifeq (, $(shell command -v yamllint 2> /dev/null))
@echo "yamllint not installed so skipping" @echo "yamllint not installed so skipping"
else else
yamllint . yamllint .
@ -204,6 +208,10 @@ common-tarball: promu
@echo ">> building release tarball" @echo ">> building release tarball"
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
.PHONY: common-docker-repo-name
common-docker-repo-name:
@echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
.PHONY: common-docker $(BUILD_DOCKER_ARCHS) .PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%: $(BUILD_DOCKER_ARCHS): common-docker-%:

View File

@ -55,7 +55,7 @@ type ARPEntry struct {
func (fs FS) GatherARPEntries() ([]ARPEntry, error) { func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
data, err := os.ReadFile(fs.proc.Path("net/arp")) data, err := os.ReadFile(fs.proc.Path("net/arp"))
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err) return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
} }
return parseARPEntries(data) return parseARPEntries(data)
@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
} else if width == expectedDataWidth { } else if width == expectedDataWidth {
entry, err := parseARPEntry(columns) entry, err := parseARPEntry(columns)
if err != nil { if err != nil {
return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
} }
entries = append(entries, entry) entries = append(entries, entry)
} else { } else {
return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
} }
} }

View File

@ -58,8 +58,8 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts) return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
} }
node := strings.TrimRight(parts[1], ",") node := strings.TrimSuffix(parts[1], ",")
zone := strings.TrimRight(parts[3], ",") zone := strings.TrimSuffix(parts[3], ",")
arraySize := len(parts[4:]) arraySize := len(parts[4:])
if bucketCount == -1 { if bucketCount == -1 {
@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
for i := 0; i < arraySize; i++ { for i := 0; i < arraySize; i++ {
sizes[i], err = strconv.ParseFloat(parts[i+4], 64) sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
} }
} }

View File

@ -194,7 +194,7 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
match, err := regexp.MatchString("^[Pp]rocessor", firstLine) match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
if !match || !strings.Contains(firstLine, ":") { if !match || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err) return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
@ -386,7 +386,7 @@ func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
// find the first "processor" line // find the first "processor" line
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine) return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{} cpuinfo := []CPUInfo{}

View File

@ -55,13 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) {
path := fs.proc.Path("crypto") path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path) b, err := util.ReadFileNoStat(path)
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err) return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err)
} }
crypto, err := parseCrypto(bytes.NewReader(b)) crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err) return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err)
} }
return crypto, nil return crypto, nil
@ -84,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) {
kv := strings.Split(text, ":") kv := strings.Split(text, ":")
if len(kv) != 2 { if len(kv) != 2 {
return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text) return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text)
} }
k := strings.TrimSpace(kv[0]) k := strings.TrimSpace(kv[0])

View File

@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
m, err := parseFscacheinfo(bytes.NewReader(b)) m, err := parseFscacheinfo(bytes.NewReader(b))
if err != nil { if err != nil {
return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err) return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err)
} }
return *m, nil return *m, nil
@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
func setFSCacheFields(fields []string, setFields ...*uint64) error { func setFSCacheFields(fields []string, setFields ...*uint64) error {
var err error var err error
if len(fields) < len(setFields) { if len(fields) < len(setFields) {
return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err) return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
} }
for i := range setFields { for i := range setFields {

View File

@ -221,16 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) {
case 46: case 46:
ip = net.ParseIP(s[1:40]) ip = net.ParseIP(s[1:40])
if ip == nil { if ip == nil {
return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err) return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
} }
default: default:
return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err) return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
} }
portString := s[len(s)-4:] portString := s[len(s)-4:]
if len(portString) != 4 { if len(portString) != 4 {
return nil, 0, return nil, 0,
fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err) fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err)
} }
port, err := strconv.ParseUint(portString, 16, 16) port, err := strconv.ParseUint(portString, 16, 16)
if err != nil { if err != nil {

View File

@ -51,7 +51,7 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
for i, load := range parts[0:3] { for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64) loads[i], err = strconv.ParseFloat(load, 64)
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err) return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
} }
} }
return &LoadAvg{ return &LoadAvg{

View File

@ -23,7 +23,7 @@ import (
var ( var (
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`) statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`) recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`)
recoveryLinePctRE = regexp.MustCompile(`= (.+)%`) recoveryLinePctRE = regexp.MustCompile(`= (.+)%`)
recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`)
@ -50,6 +50,8 @@ type MDStat struct {
BlocksTotal int64 BlocksTotal int64
// Number of blocks on the device that are in sync. // Number of blocks on the device that are in sync.
BlocksSynced int64 BlocksSynced int64
// Number of blocks on the device that need to be synced.
BlocksToBeSynced int64
// progress percentage of current sync // progress percentage of current sync
BlocksSyncedPct float64 BlocksSyncedPct float64
// estimated finishing time for current sync (in minutes) // estimated finishing time for current sync (in minutes)
@ -70,7 +72,7 @@ func (fs FS) MDStat() ([]MDStat, error) {
} }
mdstat, err := parseMDStat(data) mdstat, err := parseMDStat(data)
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err) return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
} }
return mdstat, nil return mdstat, nil
} }
@ -90,7 +92,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
deviceFields := strings.Fields(line) deviceFields := strings.Fields(line)
if len(deviceFields) < 3 { if len(deviceFields) < 3 {
return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line) return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line)
} }
mdName := deviceFields[0] // mdx mdName := deviceFields[0] // mdx
state := deviceFields[2] // active or inactive state := deviceFields[2] // active or inactive
@ -105,7 +107,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err) return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
} }
syncLineIdx := i + 2 syncLineIdx := i + 2
@ -115,7 +117,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
// If device is syncing at the moment, get the number of currently // If device is syncing at the moment, get the number of currently
// synced bytes, otherwise that number equals the size of the device. // synced bytes, otherwise that number equals the size of the device.
syncedBlocks := size blocksSynced := size
blocksToBeSynced := size
speed := float64(0) speed := float64(0)
finish := float64(0) finish := float64(0)
pct := float64(0) pct := float64(0)
@ -136,11 +139,11 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
// Handle case when resync=PENDING or resync=DELAYED. // Handle case when resync=PENDING or resync=DELAYED.
if strings.Contains(lines[syncLineIdx], "PENDING") || if strings.Contains(lines[syncLineIdx], "PENDING") ||
strings.Contains(lines[syncLineIdx], "DELAYED") { strings.Contains(lines[syncLineIdx], "DELAYED") {
syncedBlocks = 0 blocksSynced = 0
} else { } else {
syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
} }
} }
} }
@ -154,7 +157,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
DisksSpare: spare, DisksSpare: spare,
DisksTotal: total, DisksTotal: total,
BlocksTotal: size, BlocksTotal: size,
BlocksSynced: syncedBlocks, BlocksSynced: blocksSynced,
BlocksToBeSynced: blocksToBeSynced,
BlocksSyncedPct: pct, BlocksSyncedPct: pct,
BlocksSyncedFinishTime: finish, BlocksSyncedFinishTime: finish,
BlocksSyncedSpeed: speed, BlocksSyncedSpeed: speed,
@ -168,13 +172,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
statusFields := strings.Fields(statusLine) statusFields := strings.Fields(statusLine)
if len(statusFields) < 1 { if len(statusFields) < 1 {
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
} }
sizeStr := statusFields[0] sizeStr := statusFields[0]
size, err = strconv.ParseInt(sizeStr, 10, 64) size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
} }
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
@ -189,65 +193,71 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in
matches := statusLineRE.FindStringSubmatch(statusLine) matches := statusLineRE.FindStringSubmatch(statusLine)
if len(matches) != 5 { if len(matches) != 5 {
return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err) return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
} }
total, err = strconv.ParseInt(matches[2], 10, 64) total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
} }
active, err = strconv.ParseInt(matches[3], 10, 64) active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err) return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err)
} }
down = int64(strings.Count(matches[4], "_")) down = int64(strings.Count(matches[4], "_"))
return active, total, down, size, nil return active, total, down, size, nil
} }
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) { func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) {
matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 { if len(matches) != 2 {
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err) return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err)
} }
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) blocks := strings.Split(matches[1], "/")
blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err) return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err)
}
blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64)
if err != nil {
return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err)
} }
// Get percentage complete // Get percentage complete
matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 { if len(matches) != 2 {
return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
} }
pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
if err != nil { if err != nil {
return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
} }
// Get time expected left to complete // Get time expected left to complete
matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 { if len(matches) != 2 {
return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
} }
finish, err = strconv.ParseFloat(matches[1], 64) finish, err = strconv.ParseFloat(matches[1], 64)
if err != nil { if err != nil {
return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
} }
// Get recovery speed // Get recovery speed
matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 { if len(matches) != 2 {
return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
} }
speed, err = strconv.ParseFloat(matches[1], 64) speed, err = strconv.ParseFloat(matches[1], 64)
if err != nil { if err != nil {
return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
} }
return syncedBlocks, pct, finish, speed, nil return blocksSynced, blocksToBeSynced, pct, finish, speed, nil
} }
func evalComponentDevices(deviceFields []string) []string { func evalComponentDevices(deviceFields []string) []string {

View File

@ -126,6 +126,7 @@ type Meminfo struct {
VmallocUsed *uint64 VmallocUsed *uint64
// largest contiguous block of vmalloc area which is free // largest contiguous block of vmalloc area which is free
VmallocChunk *uint64 VmallocChunk *uint64
Percpu *uint64
HardwareCorrupted *uint64 HardwareCorrupted *uint64
AnonHugePages *uint64 AnonHugePages *uint64
ShmemHugePages *uint64 ShmemHugePages *uint64
@ -140,6 +141,55 @@ type Meminfo struct {
DirectMap4k *uint64 DirectMap4k *uint64
DirectMap2M *uint64 DirectMap2M *uint64
DirectMap1G *uint64 DirectMap1G *uint64
// The struct fields below are the byte-normalized counterparts to the
// existing struct fields. Values are normalized using the optional
// unit field in the meminfo line.
MemTotalBytes *uint64
MemFreeBytes *uint64
MemAvailableBytes *uint64
BuffersBytes *uint64
CachedBytes *uint64
SwapCachedBytes *uint64
ActiveBytes *uint64
InactiveBytes *uint64
ActiveAnonBytes *uint64
InactiveAnonBytes *uint64
ActiveFileBytes *uint64
InactiveFileBytes *uint64
UnevictableBytes *uint64
MlockedBytes *uint64
SwapTotalBytes *uint64
SwapFreeBytes *uint64
DirtyBytes *uint64
WritebackBytes *uint64
AnonPagesBytes *uint64
MappedBytes *uint64
ShmemBytes *uint64
SlabBytes *uint64
SReclaimableBytes *uint64
SUnreclaimBytes *uint64
KernelStackBytes *uint64
PageTablesBytes *uint64
NFSUnstableBytes *uint64
BounceBytes *uint64
WritebackTmpBytes *uint64
CommitLimitBytes *uint64
CommittedASBytes *uint64
VmallocTotalBytes *uint64
VmallocUsedBytes *uint64
VmallocChunkBytes *uint64
PercpuBytes *uint64
HardwareCorruptedBytes *uint64
AnonHugePagesBytes *uint64
ShmemHugePagesBytes *uint64
ShmemPmdMappedBytes *uint64
CmaTotalBytes *uint64
CmaFreeBytes *uint64
HugepagesizeBytes *uint64
DirectMap4kBytes *uint64
DirectMap2MBytes *uint64
DirectMap1GBytes *uint64
} }
// Meminfo returns an information about current kernel/system memory statistics. // Meminfo returns an information about current kernel/system memory statistics.
@ -152,7 +202,7 @@ func (fs FS) Meminfo() (Meminfo, error) {
m, err := parseMemInfo(bytes.NewReader(b)) m, err := parseMemInfo(bytes.NewReader(b))
if err != nil { if err != nil {
return Meminfo{}, fmt.Errorf("%s: %w", ErrFileParse, err) return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err)
} }
return *m, nil return *m, nil
@ -162,114 +212,176 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
var m Meminfo var m Meminfo
s := bufio.NewScanner(r) s := bufio.NewScanner(r)
for s.Scan() { for s.Scan() {
// Each line has at least a name and value; we ignore the unit.
fields := strings.Fields(s.Text()) fields := strings.Fields(s.Text())
if len(fields) < 2 { var val, valBytes uint64
return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
}
v, err := strconv.ParseUint(fields[1], 0, 64) val, err := strconv.ParseUint(fields[1], 0, 64)
if err != nil { if err != nil {
return nil, err return nil, err
} }
switch len(fields) {
case 2:
// No unit present, use the parsed the value as bytes directly.
valBytes = val
case 3:
// Unit present in optional 3rd field, convert it to
// bytes. The only unit supported within the Linux
// kernel is `kB`.
if fields[2] != "kB" {
return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2])
}
valBytes = 1024 * val
default:
return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
}
switch fields[0] { switch fields[0] {
case "MemTotal:": case "MemTotal:":
m.MemTotal = &v m.MemTotal = &val
m.MemTotalBytes = &valBytes
case "MemFree:": case "MemFree:":
m.MemFree = &v m.MemFree = &val
m.MemFreeBytes = &valBytes
case "MemAvailable:": case "MemAvailable:":
m.MemAvailable = &v m.MemAvailable = &val
m.MemAvailableBytes = &valBytes
case "Buffers:": case "Buffers:":
m.Buffers = &v m.Buffers = &val
m.BuffersBytes = &valBytes
case "Cached:": case "Cached:":
m.Cached = &v m.Cached = &val
m.CachedBytes = &valBytes
case "SwapCached:": case "SwapCached:":
m.SwapCached = &v m.SwapCached = &val
m.SwapCachedBytes = &valBytes
case "Active:": case "Active:":
m.Active = &v m.Active = &val
m.ActiveBytes = &valBytes
case "Inactive:": case "Inactive:":
m.Inactive = &v m.Inactive = &val
m.InactiveBytes = &valBytes
case "Active(anon):": case "Active(anon):":
m.ActiveAnon = &v m.ActiveAnon = &val
m.ActiveAnonBytes = &valBytes
case "Inactive(anon):": case "Inactive(anon):":
m.InactiveAnon = &v m.InactiveAnon = &val
m.InactiveAnonBytes = &valBytes
case "Active(file):": case "Active(file):":
m.ActiveFile = &v m.ActiveFile = &val
m.ActiveFileBytes = &valBytes
case "Inactive(file):": case "Inactive(file):":
m.InactiveFile = &v m.InactiveFile = &val
m.InactiveFileBytes = &valBytes
case "Unevictable:": case "Unevictable:":
m.Unevictable = &v m.Unevictable = &val
m.UnevictableBytes = &valBytes
case "Mlocked:": case "Mlocked:":
m.Mlocked = &v m.Mlocked = &val
m.MlockedBytes = &valBytes
case "SwapTotal:": case "SwapTotal:":
m.SwapTotal = &v m.SwapTotal = &val
m.SwapTotalBytes = &valBytes
case "SwapFree:": case "SwapFree:":
m.SwapFree = &v m.SwapFree = &val
m.SwapFreeBytes = &valBytes
case "Dirty:": case "Dirty:":
m.Dirty = &v m.Dirty = &val
m.DirtyBytes = &valBytes
case "Writeback:": case "Writeback:":
m.Writeback = &v m.Writeback = &val
m.WritebackBytes = &valBytes
case "AnonPages:": case "AnonPages:":
m.AnonPages = &v m.AnonPages = &val
m.AnonPagesBytes = &valBytes
case "Mapped:": case "Mapped:":
m.Mapped = &v m.Mapped = &val
m.MappedBytes = &valBytes
case "Shmem:": case "Shmem:":
m.Shmem = &v m.Shmem = &val
m.ShmemBytes = &valBytes
case "Slab:": case "Slab:":
m.Slab = &v m.Slab = &val
m.SlabBytes = &valBytes
case "SReclaimable:": case "SReclaimable:":
m.SReclaimable = &v m.SReclaimable = &val
m.SReclaimableBytes = &valBytes
case "SUnreclaim:": case "SUnreclaim:":
m.SUnreclaim = &v m.SUnreclaim = &val
m.SUnreclaimBytes = &valBytes
case "KernelStack:": case "KernelStack:":
m.KernelStack = &v m.KernelStack = &val
m.KernelStackBytes = &valBytes
case "PageTables:": case "PageTables:":
m.PageTables = &v m.PageTables = &val
m.PageTablesBytes = &valBytes
case "NFS_Unstable:": case "NFS_Unstable:":
m.NFSUnstable = &v m.NFSUnstable = &val
m.NFSUnstableBytes = &valBytes
case "Bounce:": case "Bounce:":
m.Bounce = &v m.Bounce = &val
m.BounceBytes = &valBytes
case "WritebackTmp:": case "WritebackTmp:":
m.WritebackTmp = &v m.WritebackTmp = &val
m.WritebackTmpBytes = &valBytes
case "CommitLimit:": case "CommitLimit:":
m.CommitLimit = &v m.CommitLimit = &val
m.CommitLimitBytes = &valBytes
case "Committed_AS:": case "Committed_AS:":
m.CommittedAS = &v m.CommittedAS = &val
m.CommittedASBytes = &valBytes
case "VmallocTotal:": case "VmallocTotal:":
m.VmallocTotal = &v m.VmallocTotal = &val
m.VmallocTotalBytes = &valBytes
case "VmallocUsed:": case "VmallocUsed:":
m.VmallocUsed = &v m.VmallocUsed = &val
m.VmallocUsedBytes = &valBytes
case "VmallocChunk:": case "VmallocChunk:":
m.VmallocChunk = &v m.VmallocChunk = &val
m.VmallocChunkBytes = &valBytes
case "Percpu:":
m.Percpu = &val
m.PercpuBytes = &valBytes
case "HardwareCorrupted:": case "HardwareCorrupted:":
m.HardwareCorrupted = &v m.HardwareCorrupted = &val
m.HardwareCorruptedBytes = &valBytes
case "AnonHugePages:": case "AnonHugePages:":
m.AnonHugePages = &v m.AnonHugePages = &val
m.AnonHugePagesBytes = &valBytes
case "ShmemHugePages:": case "ShmemHugePages:":
m.ShmemHugePages = &v m.ShmemHugePages = &val
m.ShmemHugePagesBytes = &valBytes
case "ShmemPmdMapped:": case "ShmemPmdMapped:":
m.ShmemPmdMapped = &v m.ShmemPmdMapped = &val
m.ShmemPmdMappedBytes = &valBytes
case "CmaTotal:": case "CmaTotal:":
m.CmaTotal = &v m.CmaTotal = &val
m.CmaTotalBytes = &valBytes
case "CmaFree:": case "CmaFree:":
m.CmaFree = &v m.CmaFree = &val
m.CmaFreeBytes = &valBytes
case "HugePages_Total:": case "HugePages_Total:":
m.HugePagesTotal = &v m.HugePagesTotal = &val
case "HugePages_Free:": case "HugePages_Free:":
m.HugePagesFree = &v m.HugePagesFree = &val
case "HugePages_Rsvd:": case "HugePages_Rsvd:":
m.HugePagesRsvd = &v m.HugePagesRsvd = &val
case "HugePages_Surp:": case "HugePages_Surp:":
m.HugePagesSurp = &v m.HugePagesSurp = &val
case "Hugepagesize:": case "Hugepagesize:":
m.Hugepagesize = &v m.Hugepagesize = &val
m.HugepagesizeBytes = &valBytes
case "DirectMap4k:": case "DirectMap4k:":
m.DirectMap4k = &v m.DirectMap4k = &val
m.DirectMap4kBytes = &valBytes
case "DirectMap2M:": case "DirectMap2M:":
m.DirectMap2M = &v m.DirectMap2M = &val
m.DirectMap2MBytes = &valBytes
case "DirectMap1G:": case "DirectMap1G:":
m.DirectMap1G = &v m.DirectMap1G = &val
m.DirectMap1GBytes = &valBytes
} }
} }

View File

@ -109,7 +109,7 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
if mountInfo[6] != "" { if mountInfo[6] != "" {
mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: %w", ErrFileParse, err) return nil, fmt.Errorf("%w: %w", ErrFileParse, err)
} }
} }
return mount, nil return mount, nil

View File

@ -88,7 +88,7 @@ type MountStatsNFS struct {
// Statistics broken down by filesystem operation. // Statistics broken down by filesystem operation.
Operations []NFSOperationStats Operations []NFSOperationStats
// Statistics about the NFS RPC transport. // Statistics about the NFS RPC transport.
Transport NFSTransportStats Transport []NFSTransportStats
} }
// mountStats implements MountStats. // mountStats implements MountStats.
@ -194,8 +194,6 @@ type NFSOperationStats struct {
CumulativeTotalResponseMilliseconds uint64 CumulativeTotalResponseMilliseconds uint64
// Duration from when a request was enqueued to when it was completely handled. // Duration from when a request was enqueued to when it was completely handled.
CumulativeTotalRequestMilliseconds uint64 CumulativeTotalRequestMilliseconds uint64
// The average time from the point the client sends RPC requests until it receives the response.
AverageRTTMilliseconds float64
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
Errors uint64 Errors uint64
} }
@ -434,7 +432,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
return nil, err return nil, err
} }
stats.Transport = *tstats stats.Transport = append(stats.Transport, *tstats)
} }
// When encountering "per-operation statistics", we must break this // When encountering "per-operation statistics", we must break this
@ -582,9 +580,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
CumulativeTotalResponseMilliseconds: ns[6], CumulativeTotalResponseMilliseconds: ns[6],
CumulativeTotalRequestMilliseconds: ns[7], CumulativeTotalRequestMilliseconds: ns[7],
} }
if ns[0] != 0 {
opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0])
}
if len(ns) > 8 { if len(ns) > 8 {
opStats.Errors = ns[8] opStats.Errors = ns[8]
@ -632,7 +627,7 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol) return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
} }
default: default:
return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
} }
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay

View File

@ -58,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
stat, err := parseConntrackStat(bytes.NewReader(b)) stat, err := parseConntrackStat(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, path, err) return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err)
} }
return stat, nil return stat, nil
@ -86,7 +86,7 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
entries, err := util.ParseHexUint64s(fields) entries, err := util.ParseHexUint64s(fields)
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Cannot parse entry: %d: %w", ErrFileParse, entries, err) return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
} }
numEntries := len(entries) numEntries := len(entries)
if numEntries < 16 || numEntries > 17 { if numEntries < 16 || numEntries > 17 {

View File

@ -50,10 +50,13 @@ type (
// UsedSockets shows the total number of parsed lines representing the // UsedSockets shows the total number of parsed lines representing the
// number of used sockets. // number of used sockets.
UsedSockets uint64 UsedSockets uint64
// Drops shows the total number of dropped packets of all UPD sockets.
Drops *uint64
} }
// netIPSocketLine represents the fields parsed from a single line // netIPSocketLine represents the fields parsed from a single line
// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
// Drops is non-nil for udp{,6}, but nil for tcp{,6}.
// For the proc file format details, see https://linux.die.net/man/5/proc. // For the proc file format details, see https://linux.die.net/man/5/proc.
netIPSocketLine struct { netIPSocketLine struct {
Sl uint64 Sl uint64
@ -66,6 +69,7 @@ type (
RxQueue uint64 RxQueue uint64
UID uint64 UID uint64
Inode uint64 Inode uint64
Drops *uint64
} }
) )
@ -77,13 +81,14 @@ func newNetIPSocket(file string) (NetIPSocket, error) {
defer f.Close() defer f.Close()
var netIPSocket NetIPSocket var netIPSocket NetIPSocket
isUDP := strings.Contains(file, "udp")
lr := io.LimitReader(f, readLimit) lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr) s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers s.Scan() // skip first line with headers
for s.Scan() { for s.Scan() {
fields := strings.Fields(s.Text()) fields := strings.Fields(s.Text())
line, err := parseNetIPSocketLine(fields) line, err := parseNetIPSocketLine(fields, isUDP)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -104,19 +109,25 @@ func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
defer f.Close() defer f.Close()
var netIPSocketSummary NetIPSocketSummary var netIPSocketSummary NetIPSocketSummary
var udpPacketDrops uint64
isUDP := strings.Contains(file, "udp")
lr := io.LimitReader(f, readLimit) lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr) s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers s.Scan() // skip first line with headers
for s.Scan() { for s.Scan() {
fields := strings.Fields(s.Text()) fields := strings.Fields(s.Text())
line, err := parseNetIPSocketLine(fields) line, err := parseNetIPSocketLine(fields, isUDP)
if err != nil { if err != nil {
return nil, err return nil, err
} }
netIPSocketSummary.TxQueueLength += line.TxQueue netIPSocketSummary.TxQueueLength += line.TxQueue
netIPSocketSummary.RxQueueLength += line.RxQueue netIPSocketSummary.RxQueueLength += line.RxQueue
netIPSocketSummary.UsedSockets++ netIPSocketSummary.UsedSockets++
if isUDP {
udpPacketDrops += *line.Drops
netIPSocketSummary.Drops = &udpPacketDrops
}
} }
if err := s.Err(); err != nil { if err := s.Err(); err != nil {
return nil, err return nil, err
@ -130,7 +141,7 @@ func parseIP(hexIP string) (net.IP, error) {
var byteIP []byte var byteIP []byte
byteIP, err := hex.DecodeString(hexIP) byteIP, err := hex.DecodeString(hexIP)
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err) return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
} }
switch len(byteIP) { switch len(byteIP) {
case 4: case 4:
@ -144,12 +155,12 @@ func parseIP(hexIP string) (net.IP, error) {
} }
return i, nil return i, nil
default: default:
return nil, fmt.Errorf("%s: Unable to parse IP %s: %w", ErrFileParse, hexIP, nil) return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil)
} }
} }
// parseNetIPSocketLine parses a single line, represented by a list of fields. // parseNetIPSocketLine parses a single line, represented by a list of fields.
func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) {
line := &netIPSocketLine{} line := &netIPSocketLine{}
if len(fields) < 10 { if len(fields) < 10 {
return nil, fmt.Errorf( return nil, fmt.Errorf(
@ -167,7 +178,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
} }
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
return nil, fmt.Errorf("%s: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err) return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
} }
// local_address // local_address
l := strings.Split(fields[1], ":") l := strings.Split(fields[1], ":")
@ -178,7 +189,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
return nil, err return nil, err
} }
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
return nil, fmt.Errorf("%s: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err) return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
} }
// remote_address // remote_address
@ -190,12 +201,12 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
return nil, err return nil, err
} }
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
return nil, fmt.Errorf("%s: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err) return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
} }
// st // st
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
return nil, fmt.Errorf("%s: Cannot parse st value in %q: %w", ErrFileParse, line.St, err) return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
} }
// tx_queue and rx_queue // tx_queue and rx_queue
@ -208,20 +219,29 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
) )
} }
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
return nil, fmt.Errorf("%s: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err) return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
} }
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
return nil, fmt.Errorf("%s: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err) return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
} }
// uid // uid
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
return nil, fmt.Errorf("%s: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err) return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
} }
// inode // inode
if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err) return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
}
// drops
if isUDP {
drops, err := strconv.ParseUint(fields[12], 0, 64)
if err != nil {
return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err)
}
line.Drops = &drops
} }
return line, nil return line, nil

View File

@ -69,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) {
stat, err := parseSockstat(bytes.NewReader(b)) stat, err := parseSockstat(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: sockstats from %q: %w", ErrFileRead, name, err) return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err)
} }
return stat, nil return stat, nil
@ -89,7 +89,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
// The remaining fields are key/value pairs. // The remaining fields are key/value pairs.
kvs, err := parseSockstatKVs(fields[1:]) kvs, err := parseSockstatKVs(fields[1:])
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err) return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
} }
// The first field is the protocol. We must trim its colon suffix. // The first field is the protocol. We must trim its colon suffix.

View File

@ -64,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
entries, err := parseSoftnet(bytes.NewReader(b)) entries, err := parseSoftnet(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: /proc/net/softnet_stat: %w", ErrFileParse, err) return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err)
} }
return entries, nil return entries, nil

119
vendor/github.com/prometheus/procfs/net_tls_stat.go generated vendored Normal file
View File

@ -0,0 +1,119 @@
// Copyright 2023 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
)
// TLSStat struct represents data in /proc/net/tls_stat.
// See https://docs.kernel.org/networking/tls.html#statistics
type TLSStat struct {
// number of TX sessions currently installed where host handles cryptography
TLSCurrTxSw int
// number of RX sessions currently installed where host handles cryptography
TLSCurrRxSw int
// number of TX sessions currently installed where NIC handles cryptography
TLSCurrTxDevice int
// number of RX sessions currently installed where NIC handles cryptography
TLSCurrRxDevice int
//number of TX sessions opened with host cryptography
TLSTxSw int
//number of RX sessions opened with host cryptography
TLSRxSw int
// number of TX sessions opened with NIC cryptography
TLSTxDevice int
// number of RX sessions opened with NIC cryptography
TLSRxDevice int
// record decryption failed (e.g. due to incorrect authentication tag)
TLSDecryptError int
// number of RX resyncs sent to NICs handling cryptography
TLSRxDeviceResync int
// number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records.
TLSDecryptRetry int
// number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction.
TLSRxNoPadViolation int
}
// NewTLSStat reads the tls_stat statistics.
func NewTLSStat() (TLSStat, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return TLSStat{}, err
}
return fs.NewTLSStat()
}
// NewTLSStat reads the tls_stat statistics.
func (fs FS) NewTLSStat() (TLSStat, error) {
file, err := os.Open(fs.proc.Path("net/tls_stat"))
if err != nil {
return TLSStat{}, err
}
defer file.Close()
var (
tlsstat = TLSStat{}
s = bufio.NewScanner(file)
)
for s.Scan() {
fields := strings.Fields(s.Text())
if len(fields) != 2 {
return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
}
name := fields[0]
value, err := strconv.Atoi(fields[1])
if err != nil {
return TLSStat{}, err
}
switch name {
case "TlsCurrTxSw":
tlsstat.TLSCurrTxSw = value
case "TlsCurrRxSw":
tlsstat.TLSCurrRxSw = value
case "TlsCurrTxDevice":
tlsstat.TLSCurrTxDevice = value
case "TlsCurrRxDevice":
tlsstat.TLSCurrRxDevice = value
case "TlsTxSw":
tlsstat.TLSTxSw = value
case "TlsRxSw":
tlsstat.TLSRxSw = value
case "TlsTxDevice":
tlsstat.TLSTxDevice = value
case "TlsRxDevice":
tlsstat.TLSRxDevice = value
case "TlsDecryptError":
tlsstat.TLSDecryptError = value
case "TlsRxDeviceResync":
tlsstat.TLSRxDeviceResync = value
case "TlsDecryptRetry":
tlsstat.TLSDecryptRetry = value
case "TlsRxNoPadViolation":
tlsstat.TLSRxNoPadViolation = value
}
}
return tlsstat, s.Err()
}

View File

@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
line := s.Text() line := s.Text()
item, err := nu.parseLine(line, hasInode, minFields) item, err := nu.parseLine(line, hasInode, minFields)
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err) return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
} }
nu.Rows = append(nu.Rows, item) nu.Rows = append(nu.Rows, item)
} }
if err := s.Err(); err != nil { if err := s.Err(); err != nil {
return nil, fmt.Errorf("%s: /proc/net/unix encountered data: %w", ErrFileParse, err) return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err)
} }
return &nu, nil return &nu, nil
@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
users, err := u.parseUsers(fields[1]) users, err := u.parseUsers(fields[1])
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: ref count %q: %w", ErrFileParse, fields[1], err) return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err)
} }
flags, err := u.parseFlags(fields[3]) flags, err := u.parseFlags(fields[3])
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Unable to parse flags %q: %w", ErrFileParse, fields[3], err) return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
} }
typ, err := u.parseType(fields[4]) typ, err := u.parseType(fields[4])
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Failed to parse type %q: %w", ErrFileParse, fields[4], err) return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
} }
state, err := u.parseState(fields[5]) state, err := u.parseState(fields[5])
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Failed to parse state %q: %w", ErrFileParse, fields[5], err) return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
} }
var inode uint64 var inode uint64
if hasInode { if hasInode {
inode, err = u.parseInode(fields[6]) inode, err = u.parseInode(fields[6])
if err != nil { if err != nil {
return nil, fmt.Errorf("%s failed to parse inode %q: %w", ErrFileParse, fields[6], err) return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err)
} }
} }

View File

@ -68,7 +68,7 @@ func (fs FS) Wireless() ([]*Wireless, error) {
m, err := parseWireless(bytes.NewReader(b)) m, err := parseWireless(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: wireless: %w", ErrFileParse, err) return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err)
} }
return m, nil return m, nil
@ -114,47 +114,47 @@ func parseWireless(r io.Reader) ([]*Wireless, error) {
qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], ".")) qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err) return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
} }
qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], ".")) qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Quality:level as integer %q: %w", ErrFileParse, qlevel, err) return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
} }
qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], ".")) qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err) return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
} }
dnwid, err := strconv.Atoi(stats[4]) dnwid, err := strconv.Atoi(stats[4])
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err) return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
} }
dcrypt, err := strconv.Atoi(stats[5]) dcrypt, err := strconv.Atoi(stats[5])
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err) return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
} }
dfrag, err := strconv.Atoi(stats[6]) dfrag, err := strconv.Atoi(stats[6])
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err) return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
} }
dretry, err := strconv.Atoi(stats[7]) dretry, err := strconv.Atoi(stats[7])
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err) return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
} }
dmisc, err := strconv.Atoi(stats[8]) dmisc, err := strconv.Atoi(stats[8])
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err) return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
} }
mbeacon, err := strconv.Atoi(stats[9]) mbeacon, err := strconv.Atoi(stats[9])
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err) return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
} }
w := &Wireless{ w := &Wireless{
@ -175,7 +175,7 @@ func parseWireless(r io.Reader) ([]*Wireless, error) {
} }
if err := scanner.Err(); err != nil { if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("%s: Failed to scan /proc/net/wireless: %w", ErrFileRead, err) return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
} }
return interfaces, nil return interfaces, nil

View File

@ -111,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) {
names, err := d.Readdirnames(-1) names, err := d.Readdirnames(-1)
if err != nil { if err != nil {
return Procs{}, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
} }
p := Procs{} p := Procs{}
@ -137,7 +137,7 @@ func (p Proc) CmdLine() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil
} }
// Wchan returns the wchan (wait channel) of a process. // Wchan returns the wchan (wait channel) of a process.
@ -212,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) {
for i, n := range names { for i, n := range names {
fd, err := strconv.ParseInt(n, 10, 32) fd, err := strconv.ParseInt(n, 10, 32)
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Cannot parse line: %v: %w", ErrFileParse, i, err) return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err)
} }
fds[i] = uintptr(fd) fds[i] = uintptr(fd)
} }
@ -297,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) {
names, err := d.Readdirnames(-1) names, err := d.Readdirnames(-1)
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
} }
return names, nil return names, nil

View File

@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) {
} }
i, err := strconv.ParseUint(s, 10, 64) i, err := strconv.ParseUint(s, 10, 64)
if err != nil { if err != nil {
return 0, fmt.Errorf("%s: couldn't parse value %q: %w", ErrFileParse, s, err) return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err)
} }
return i, nil return i, nil
} }

View File

@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
names, err := d.Readdirnames(-1) names, err := d.Readdirnames(-1)
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: failed to read contents of ns dir: %w", ErrFileRead, err) return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err)
} }
ns := make(Namespaces, len(names)) ns := make(Namespaces, len(names))
@ -58,7 +58,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
typ := fields[0] typ := fields[0]
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: inode from %q: %w", ErrFileParse, fields[1], err) return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err)
} }
ns[name] = Namespace{typ, uint32(inode)} ns[name] = Namespace{typ, uint32(inode)}

View File

@ -61,7 +61,7 @@ type PSIStats struct {
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
if err != nil { if err != nil {
return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
} }
return parsePSIStats(bytes.NewReader(data)) return parsePSIStats(bytes.NewReader(data))

View File

@ -127,7 +127,7 @@ func (s *ProcSMapsRollup) parseLine(line string) error {
} }
v := strings.TrimSpace(kv[1]) v := strings.TrimSpace(kv[1])
v = strings.TrimRight(v, " kB") v = strings.TrimSuffix(v, " kB")
vKBytes, err := strconv.ParseUint(v, 10, 64) vKBytes, err := strconv.ParseUint(v, 10, 64)
if err != nil { if err != nil {

View File

@ -110,6 +110,11 @@ type ProcStat struct {
Policy uint Policy uint
// Aggregated block I/O delays, measured in clock ticks (centiseconds). // Aggregated block I/O delays, measured in clock ticks (centiseconds).
DelayAcctBlkIOTicks uint64 DelayAcctBlkIOTicks uint64
// Guest time of the process (time spent running a virtual CPU for a guest
// operating system), measured in clock ticks.
GuestTime int
// Guest time of the process's children, measured in clock ticks.
CGuestTime int
proc FS proc FS
} }
@ -189,6 +194,8 @@ func (p Proc) Stat() (ProcStat, error) {
&s.RTPriority, &s.RTPriority,
&s.Policy, &s.Policy,
&s.DelayAcctBlkIOTicks, &s.DelayAcctBlkIOTicks,
&s.GuestTime,
&s.CGuestTime,
) )
if err != nil { if err != nil {
return ProcStat{}, err return ProcStat{}, err

View File

@ -15,6 +15,7 @@ package procfs
import ( import (
"bytes" "bytes"
"math/bits"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@ -76,9 +77,9 @@ type ProcStatus struct {
NonVoluntaryCtxtSwitches uint64 NonVoluntaryCtxtSwitches uint64
// UIDs of the process (Real, effective, saved set, and filesystem UIDs) // UIDs of the process (Real, effective, saved set, and filesystem UIDs)
UIDs [4]string UIDs [4]uint64
// GIDs of the process (Real, effective, saved set, and filesystem GIDs) // GIDs of the process (Real, effective, saved set, and filesystem GIDs)
GIDs [4]string GIDs [4]uint64
// CpusAllowedList: List of cpu cores processes are allowed to run on. // CpusAllowedList: List of cpu cores processes are allowed to run on.
CpusAllowedList []uint64 CpusAllowedList []uint64
@ -113,22 +114,37 @@ func (p Proc) NewStatus() (ProcStatus, error) {
// convert kB to B // convert kB to B
vBytes := vKBytes * 1024 vBytes := vKBytes * 1024
s.fillStatus(k, v, vKBytes, vBytes) err = s.fillStatus(k, v, vKBytes, vBytes)
if err != nil {
return ProcStatus{}, err
}
} }
return s, nil return s, nil
} }
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error {
switch k { switch k {
case "Tgid": case "Tgid":
s.TGID = int(vUint) s.TGID = int(vUint)
case "Name": case "Name":
s.Name = vString s.Name = vString
case "Uid": case "Uid":
copy(s.UIDs[:], strings.Split(vString, "\t")) var err error
for i, v := range strings.Split(vString, "\t") {
s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
if err != nil {
return err
}
}
case "Gid": case "Gid":
copy(s.GIDs[:], strings.Split(vString, "\t")) var err error
for i, v := range strings.Split(vString, "\t") {
s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
if err != nil {
return err
}
}
case "NSpid": case "NSpid":
s.NSpids = calcNSPidsList(vString) s.NSpids = calcNSPidsList(vString)
case "VmPeak": case "VmPeak":
@ -173,6 +189,7 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
s.CpusAllowedList = calcCpusAllowedList(vString) s.CpusAllowedList = calcCpusAllowedList(vString)
} }
return nil
} }
// TotalCtxtSwitches returns the total context switch. // TotalCtxtSwitches returns the total context switch.

View File

@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) {
vp := util.NewValueParser(f) vp := util.NewValueParser(f)
values[i] = vp.Int() values[i] = vp.Int()
if err := vp.Err(); err != nil { if err := vp.Err(); err != nil {
return nil, fmt.Errorf("%s: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err) return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
} }
} }
return values, nil return values, nil

View File

@ -74,7 +74,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Hi = make([]uint64, len(perCPU)) softirqs.Hi = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "TIMER:": case parts[0] == "TIMER:":
@ -82,7 +82,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Timer = make([]uint64, len(perCPU)) softirqs.Timer = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "NET_TX:": case parts[0] == "NET_TX:":
@ -90,7 +90,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.NetTx = make([]uint64, len(perCPU)) softirqs.NetTx = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "NET_RX:": case parts[0] == "NET_RX:":
@ -98,7 +98,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.NetRx = make([]uint64, len(perCPU)) softirqs.NetRx = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "BLOCK:": case parts[0] == "BLOCK:":
@ -106,7 +106,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Block = make([]uint64, len(perCPU)) softirqs.Block = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "IRQ_POLL:": case parts[0] == "IRQ_POLL:":
@ -114,7 +114,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.IRQPoll = make([]uint64, len(perCPU)) softirqs.IRQPoll = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "TASKLET:": case parts[0] == "TASKLET:":
@ -122,7 +122,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Tasklet = make([]uint64, len(perCPU)) softirqs.Tasklet = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "SCHED:": case parts[0] == "SCHED:":
@ -130,7 +130,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Sched = make([]uint64, len(perCPU)) softirqs.Sched = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "HRTIMER:": case parts[0] == "HRTIMER:":
@ -138,7 +138,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.HRTimer = make([]uint64, len(perCPU)) softirqs.HRTimer = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "RCU:": case parts[0] == "RCU:":
@ -146,14 +146,14 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.RCU = make([]uint64, len(perCPU)) softirqs.RCU = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err) return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
} }
} }
} }
} }
if err := scanner.Err(); err != nil { if err := scanner.Err(); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse softirqs: %w", ErrFileParse, err) return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err)
} }
return softirqs, scanner.Err() return softirqs, scanner.Err()

View File

@ -93,7 +93,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
&cpuStat.Guest, &cpuStat.GuestNice) &cpuStat.Guest, &cpuStat.GuestNice)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu): %w", ErrFileParse, line, err) return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
} }
if count == 0 { if count == 0 {
return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line) return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line)
@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
if err != nil { if err != nil {
return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err) return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
} }
return cpuStat, cpuID, nil return cpuStat, cpuID, nil
@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
&softIRQStat.Hrtimer, &softIRQStat.Rcu) &softIRQStat.Hrtimer, &softIRQStat.Rcu)
if err != nil { if err != nil {
return SoftIRQStat{}, 0, fmt.Errorf("%s: couldn't parse %q (softirq): %w", ErrFileParse, line, err) return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
} }
return softIRQStat, total, nil return softIRQStat, total, nil
@ -201,34 +201,34 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
switch { switch {
case parts[0] == "btime": case parts[0] == "btime":
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err) return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
} }
case parts[0] == "intr": case parts[0] == "intr":
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err) return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
} }
numberedIRQs := parts[2:] numberedIRQs := parts[2:]
stat.IRQ = make([]uint64, len(numberedIRQs)) stat.IRQ = make([]uint64, len(numberedIRQs))
for i, count := range numberedIRQs { for i, count := range numberedIRQs {
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err) return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "ctxt": case parts[0] == "ctxt":
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err) return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
} }
case parts[0] == "processes": case parts[0] == "processes":
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err) return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
} }
case parts[0] == "procs_running": case parts[0] == "procs_running":
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err) return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
} }
case parts[0] == "procs_blocked": case parts[0] == "procs_blocked":
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err) return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
} }
case parts[0] == "softirq": case parts[0] == "softirq":
softIRQStats, total, err := parseSoftIRQStat(line) softIRQStats, total, err := parseSoftIRQStat(line)
@ -251,7 +251,7 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
} }
if err := scanner.Err(); err != nil { if err := scanner.Err(); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q: %w", ErrFileParse, fileName, err) return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err)
} }
return stat, nil return stat, nil

View File

@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) {
swap.Size, err = strconv.Atoi(swapFields[2]) swap.Size, err = strconv.Atoi(swapFields[2])
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err) return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
} }
swap.Used, err = strconv.Atoi(swapFields[3]) swap.Used, err = strconv.Atoi(swapFields[3])
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err) return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
} }
swap.Priority, err = strconv.Atoi(swapFields[4]) swap.Priority, err = strconv.Atoi(swapFields[4])
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err) return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
} }
return swap, nil return swap, nil

View File

@ -45,7 +45,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) {
names, err := d.Readdirnames(-1) names, err := d.Readdirnames(-1)
if err != nil { if err != nil {
return Procs{}, fmt.Errorf("%s: could not read %q: %w", ErrFileRead, d.Name(), err) return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err)
} }
t := Procs{} t := Procs{}

View File

@ -75,11 +75,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
func (fs FS) Zoneinfo() ([]Zoneinfo, error) { func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
data, err := os.ReadFile(fs.proc.Path("zoneinfo")) data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err) return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
} }
zoneinfo, err := parseZoneinfo(data) zoneinfo, err := parseZoneinfo(data)
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err) return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
} }
return zoneinfo, nil return zoneinfo, nil
} }

4
vendor/modules.txt vendored
View File

@ -246,8 +246,8 @@ github.com/prometheus/client_model/go
github.com/prometheus/common/expfmt github.com/prometheus/common/expfmt
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
github.com/prometheus/common/model github.com/prometheus/common/model
# github.com/prometheus/procfs v0.12.0 # github.com/prometheus/procfs v0.15.1
## explicit; go 1.19 ## explicit; go 1.20
github.com/prometheus/procfs github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util github.com/prometheus/procfs/internal/util