vendor: github.com/prometheus/procfs v0.15.1

full diff: https://github.com/prometheus/procfs/compare/v0.12.0...v0.15.1

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn 2024-07-16 18:46:30 +02:00
parent 0f712827f1
commit caa5d15e98
No known key found for this signature in database
GPG Key ID: 76698F39D527CE8C
37 changed files with 496 additions and 200 deletions

View File

@ -81,7 +81,7 @@ require (
github.com/prometheus/client_golang v1.17.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect

View File

@ -236,8 +236,8 @@ github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=

View File

@ -1,9 +1,16 @@
---
linters:
enable:
- errcheck
- godot
- gosimple
- govet
- ineffassign
- misspell
- revive
- staticcheck
- testifylint
- unused
linter-settings:
godot:

View File

@ -1,2 +1,3 @@
* Johannes 'fish' Ziemke <github@freigeist.org> @discordianfish
* Paul Gier <pgier@redhat.com> @pgier
* Paul Gier <paulgier@gmail.com> @pgier
* Ben Kochie <superq@gmail.com> @SuperQ

View File

@ -49,23 +49,23 @@ endif
GOTEST := $(GO) test
GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),)
ifneq ($(shell command -v gotestsum > /dev/null),)
ifneq ($(shell command -v gotestsum 2> /dev/null),)
GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif
endif
PROMU_VERSION ?= 0.15.0
PROMU_VERSION ?= 0.17.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.54.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
GOLANGCI_LINT_VERSION ?= v1.59.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))
# If we're in CI and there is an Actions file, that means the linter
# is being run in Actions, so we don't need to run it here.
ifneq (,$(SKIP_GOLANGCI_LINT))
@ -169,16 +169,20 @@ common-vet:
common-lint: $(GOLANGCI_LINT)
ifdef GOLANGCI_LINT
@echo ">> running golangci-lint"
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
# Otherwise staticcheck might fail randomly for some reason not yet explained.
$(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
endif
.PHONY: common-lint-fix
common-lint-fix: $(GOLANGCI_LINT)
ifdef GOLANGCI_LINT
@echo ">> running golangci-lint fix"
$(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
endif
.PHONY: common-yamllint
common-yamllint:
@echo ">> running yamllint on all YAML files in the repository"
ifeq (, $(shell command -v yamllint > /dev/null))
ifeq (, $(shell command -v yamllint 2> /dev/null))
@echo "yamllint not installed so skipping"
else
yamllint .
@ -204,6 +208,10 @@ common-tarball: promu
@echo ">> building release tarball"
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
.PHONY: common-docker-repo-name
common-docker-repo-name:
@echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%:

View File

@ -55,7 +55,7 @@ type ARPEntry struct {
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
data, err := os.ReadFile(fs.proc.Path("net/arp"))
if err != nil {
return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
}
return parseARPEntries(data)
@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
} else if width == expectedDataWidth {
entry, err := parseARPEntry(columns)
if err != nil {
return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
}
entries = append(entries, entry)
} else {
return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
}
}

View File

@ -58,8 +58,8 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
}
node := strings.TrimRight(parts[1], ",")
zone := strings.TrimRight(parts[3], ",")
node := strings.TrimSuffix(parts[1], ",")
zone := strings.TrimSuffix(parts[3], ",")
arraySize := len(parts[4:])
if bucketCount == -1 {
@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
for i := 0; i < arraySize; i++ {
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
if err != nil {
return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
}
}

View File

@ -194,7 +194,7 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
if !match || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
}
field := strings.SplitN(firstLine, ": ", 2)
@ -386,7 +386,7 @@ func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine)
return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}

View File

@ -55,13 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) {
path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path)
if err != nil {
return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err)
return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err)
}
crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err)
return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err)
}
return crypto, nil
@ -84,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) {
kv := strings.Split(text, ":")
if len(kv) != 2 {
return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text)
return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text)
}
k := strings.TrimSpace(kv[0])

View File

@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
m, err := parseFscacheinfo(bytes.NewReader(b))
if err != nil {
return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err)
return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err)
}
return *m, nil
@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
func setFSCacheFields(fields []string, setFields ...*uint64) error {
var err error
if len(fields) < len(setFields) {
return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
}
for i := range setFields {

View File

@ -221,16 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) {
case 46:
ip = net.ParseIP(s[1:40])
if ip == nil {
return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
}
default:
return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
}
portString := s[len(s)-4:]
if len(portString) != 4 {
return nil, 0,
fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err)
fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err)
}
port, err := strconv.ParseUint(portString, 16, 16)
if err != nil {

View File

@ -51,7 +51,7 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64)
if err != nil {
return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
}
}
return &LoadAvg{

View File

@ -23,7 +23,7 @@ import (
var (
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`)
recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`)
recoveryLinePctRE = regexp.MustCompile(`= (.+)%`)
recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`)
@ -50,6 +50,8 @@ type MDStat struct {
BlocksTotal int64
// Number of blocks on the device that are in sync.
BlocksSynced int64
// Number of blocks on the device that need to be synced.
BlocksToBeSynced int64
// progress percentage of current sync
BlocksSyncedPct float64
// estimated finishing time for current sync (in minutes)
@ -70,7 +72,7 @@ func (fs FS) MDStat() ([]MDStat, error) {
}
mdstat, err := parseMDStat(data)
if err != nil {
return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
}
return mdstat, nil
}
@ -90,7 +92,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
deviceFields := strings.Fields(line)
if len(deviceFields) < 3 {
return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line)
return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line)
}
mdName := deviceFields[0] // mdx
state := deviceFields[2] // active or inactive
@ -105,7 +107,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil {
return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
}
syncLineIdx := i + 2
@ -115,7 +117,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
// If device is syncing at the moment, get the number of currently
// synced bytes, otherwise that number equals the size of the device.
syncedBlocks := size
blocksSynced := size
blocksToBeSynced := size
speed := float64(0)
finish := float64(0)
pct := float64(0)
@ -136,11 +139,11 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
// Handle case when resync=PENDING or resync=DELAYED.
if strings.Contains(lines[syncLineIdx], "PENDING") ||
strings.Contains(lines[syncLineIdx], "DELAYED") {
syncedBlocks = 0
blocksSynced = 0
} else {
syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil {
return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
}
}
}
@ -154,7 +157,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
DisksSpare: spare,
DisksTotal: total,
BlocksTotal: size,
BlocksSynced: syncedBlocks,
BlocksSynced: blocksSynced,
BlocksToBeSynced: blocksToBeSynced,
BlocksSyncedPct: pct,
BlocksSyncedFinishTime: finish,
BlocksSyncedSpeed: speed,
@ -168,13 +172,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
statusFields := strings.Fields(statusLine)
if len(statusFields) < 1 {
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
sizeStr := statusFields[0]
size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil {
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
@ -189,65 +193,71 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in
matches := statusLineRE.FindStringSubmatch(statusLine)
if len(matches) != 5 {
return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
}
total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil {
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil {
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err)
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err)
}
down = int64(strings.Count(matches[4], "_"))
return active, total, down, size, nil
}
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) {
func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) {
matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err)
return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err)
}
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
blocks := strings.Split(matches[1], "/")
blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64)
if err != nil {
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err)
return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err)
}
blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64)
if err != nil {
return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err)
}
// Get percentage complete
matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
}
pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
if err != nil {
return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
}
// Get time expected left to complete
matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
}
finish, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
}
// Get recovery speed
matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
}
speed, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
}
return syncedBlocks, pct, finish, speed, nil
return blocksSynced, blocksToBeSynced, pct, finish, speed, nil
}
func evalComponentDevices(deviceFields []string) []string {

View File

@ -126,6 +126,7 @@ type Meminfo struct {
VmallocUsed *uint64
// largest contiguous block of vmalloc area which is free
VmallocChunk *uint64
Percpu *uint64
HardwareCorrupted *uint64
AnonHugePages *uint64
ShmemHugePages *uint64
@ -140,6 +141,55 @@ type Meminfo struct {
DirectMap4k *uint64
DirectMap2M *uint64
DirectMap1G *uint64
// The struct fields below are the byte-normalized counterparts to the
// existing struct fields. Values are normalized using the optional
// unit field in the meminfo line.
MemTotalBytes *uint64
MemFreeBytes *uint64
MemAvailableBytes *uint64
BuffersBytes *uint64
CachedBytes *uint64
SwapCachedBytes *uint64
ActiveBytes *uint64
InactiveBytes *uint64
ActiveAnonBytes *uint64
InactiveAnonBytes *uint64
ActiveFileBytes *uint64
InactiveFileBytes *uint64
UnevictableBytes *uint64
MlockedBytes *uint64
SwapTotalBytes *uint64
SwapFreeBytes *uint64
DirtyBytes *uint64
WritebackBytes *uint64
AnonPagesBytes *uint64
MappedBytes *uint64
ShmemBytes *uint64
SlabBytes *uint64
SReclaimableBytes *uint64
SUnreclaimBytes *uint64
KernelStackBytes *uint64
PageTablesBytes *uint64
NFSUnstableBytes *uint64
BounceBytes *uint64
WritebackTmpBytes *uint64
CommitLimitBytes *uint64
CommittedASBytes *uint64
VmallocTotalBytes *uint64
VmallocUsedBytes *uint64
VmallocChunkBytes *uint64
PercpuBytes *uint64
HardwareCorruptedBytes *uint64
AnonHugePagesBytes *uint64
ShmemHugePagesBytes *uint64
ShmemPmdMappedBytes *uint64
CmaTotalBytes *uint64
CmaFreeBytes *uint64
HugepagesizeBytes *uint64
DirectMap4kBytes *uint64
DirectMap2MBytes *uint64
DirectMap1GBytes *uint64
}
// Meminfo returns an information about current kernel/system memory statistics.
@ -152,7 +202,7 @@ func (fs FS) Meminfo() (Meminfo, error) {
m, err := parseMemInfo(bytes.NewReader(b))
if err != nil {
return Meminfo{}, fmt.Errorf("%s: %w", ErrFileParse, err)
return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err)
}
return *m, nil
@ -162,114 +212,176 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
var m Meminfo
s := bufio.NewScanner(r)
for s.Scan() {
// Each line has at least a name and value; we ignore the unit.
fields := strings.Fields(s.Text())
if len(fields) < 2 {
return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
}
var val, valBytes uint64
v, err := strconv.ParseUint(fields[1], 0, 64)
val, err := strconv.ParseUint(fields[1], 0, 64)
if err != nil {
return nil, err
}
switch len(fields) {
case 2:
// No unit present, use the parsed the value as bytes directly.
valBytes = val
case 3:
// Unit present in optional 3rd field, convert it to
// bytes. The only unit supported within the Linux
// kernel is `kB`.
if fields[2] != "kB" {
return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2])
}
valBytes = 1024 * val
default:
return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
}
switch fields[0] {
case "MemTotal:":
m.MemTotal = &v
m.MemTotal = &val
m.MemTotalBytes = &valBytes
case "MemFree:":
m.MemFree = &v
m.MemFree = &val
m.MemFreeBytes = &valBytes
case "MemAvailable:":
m.MemAvailable = &v
m.MemAvailable = &val
m.MemAvailableBytes = &valBytes
case "Buffers:":
m.Buffers = &v
m.Buffers = &val
m.BuffersBytes = &valBytes
case "Cached:":
m.Cached = &v
m.Cached = &val
m.CachedBytes = &valBytes
case "SwapCached:":
m.SwapCached = &v
m.SwapCached = &val
m.SwapCachedBytes = &valBytes
case "Active:":
m.Active = &v
m.Active = &val
m.ActiveBytes = &valBytes
case "Inactive:":
m.Inactive = &v
m.Inactive = &val
m.InactiveBytes = &valBytes
case "Active(anon):":
m.ActiveAnon = &v
m.ActiveAnon = &val
m.ActiveAnonBytes = &valBytes
case "Inactive(anon):":
m.InactiveAnon = &v
m.InactiveAnon = &val
m.InactiveAnonBytes = &valBytes
case "Active(file):":
m.ActiveFile = &v
m.ActiveFile = &val
m.ActiveFileBytes = &valBytes
case "Inactive(file):":
m.InactiveFile = &v
m.InactiveFile = &val
m.InactiveFileBytes = &valBytes
case "Unevictable:":
m.Unevictable = &v
m.Unevictable = &val
m.UnevictableBytes = &valBytes
case "Mlocked:":
m.Mlocked = &v
m.Mlocked = &val
m.MlockedBytes = &valBytes
case "SwapTotal:":
m.SwapTotal = &v
m.SwapTotal = &val
m.SwapTotalBytes = &valBytes
case "SwapFree:":
m.SwapFree = &v
m.SwapFree = &val
m.SwapFreeBytes = &valBytes
case "Dirty:":
m.Dirty = &v
m.Dirty = &val
m.DirtyBytes = &valBytes
case "Writeback:":
m.Writeback = &v
m.Writeback = &val
m.WritebackBytes = &valBytes
case "AnonPages:":
m.AnonPages = &v
m.AnonPages = &val
m.AnonPagesBytes = &valBytes
case "Mapped:":
m.Mapped = &v
m.Mapped = &val
m.MappedBytes = &valBytes
case "Shmem:":
m.Shmem = &v
m.Shmem = &val
m.ShmemBytes = &valBytes
case "Slab:":
m.Slab = &v
m.Slab = &val
m.SlabBytes = &valBytes
case "SReclaimable:":
m.SReclaimable = &v
m.SReclaimable = &val
m.SReclaimableBytes = &valBytes
case "SUnreclaim:":
m.SUnreclaim = &v
m.SUnreclaim = &val
m.SUnreclaimBytes = &valBytes
case "KernelStack:":
m.KernelStack = &v
m.KernelStack = &val
m.KernelStackBytes = &valBytes
case "PageTables:":
m.PageTables = &v
m.PageTables = &val
m.PageTablesBytes = &valBytes
case "NFS_Unstable:":
m.NFSUnstable = &v
m.NFSUnstable = &val
m.NFSUnstableBytes = &valBytes
case "Bounce:":
m.Bounce = &v
m.Bounce = &val
m.BounceBytes = &valBytes
case "WritebackTmp:":
m.WritebackTmp = &v
m.WritebackTmp = &val
m.WritebackTmpBytes = &valBytes
case "CommitLimit:":
m.CommitLimit = &v
m.CommitLimit = &val
m.CommitLimitBytes = &valBytes
case "Committed_AS:":
m.CommittedAS = &v
m.CommittedAS = &val
m.CommittedASBytes = &valBytes
case "VmallocTotal:":
m.VmallocTotal = &v
m.VmallocTotal = &val
m.VmallocTotalBytes = &valBytes
case "VmallocUsed:":
m.VmallocUsed = &v
m.VmallocUsed = &val
m.VmallocUsedBytes = &valBytes
case "VmallocChunk:":
m.VmallocChunk = &v
m.VmallocChunk = &val
m.VmallocChunkBytes = &valBytes
case "Percpu:":
m.Percpu = &val
m.PercpuBytes = &valBytes
case "HardwareCorrupted:":
m.HardwareCorrupted = &v
m.HardwareCorrupted = &val
m.HardwareCorruptedBytes = &valBytes
case "AnonHugePages:":
m.AnonHugePages = &v
m.AnonHugePages = &val
m.AnonHugePagesBytes = &valBytes
case "ShmemHugePages:":
m.ShmemHugePages = &v
m.ShmemHugePages = &val
m.ShmemHugePagesBytes = &valBytes
case "ShmemPmdMapped:":
m.ShmemPmdMapped = &v
m.ShmemPmdMapped = &val
m.ShmemPmdMappedBytes = &valBytes
case "CmaTotal:":
m.CmaTotal = &v
m.CmaTotal = &val
m.CmaTotalBytes = &valBytes
case "CmaFree:":
m.CmaFree = &v
m.CmaFree = &val
m.CmaFreeBytes = &valBytes
case "HugePages_Total:":
m.HugePagesTotal = &v
m.HugePagesTotal = &val
case "HugePages_Free:":
m.HugePagesFree = &v
m.HugePagesFree = &val
case "HugePages_Rsvd:":
m.HugePagesRsvd = &v
m.HugePagesRsvd = &val
case "HugePages_Surp:":
m.HugePagesSurp = &v
m.HugePagesSurp = &val
case "Hugepagesize:":
m.Hugepagesize = &v
m.Hugepagesize = &val
m.HugepagesizeBytes = &valBytes
case "DirectMap4k:":
m.DirectMap4k = &v
m.DirectMap4k = &val
m.DirectMap4kBytes = &valBytes
case "DirectMap2M:":
m.DirectMap2M = &v
m.DirectMap2M = &val
m.DirectMap2MBytes = &valBytes
case "DirectMap1G:":
m.DirectMap1G = &v
m.DirectMap1G = &val
m.DirectMap1GBytes = &valBytes
}
}

View File

@ -109,7 +109,7 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
if mountInfo[6] != "" {
mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
if err != nil {
return nil, fmt.Errorf("%s: %w", ErrFileParse, err)
return nil, fmt.Errorf("%w: %w", ErrFileParse, err)
}
}
return mount, nil

View File

@ -88,7 +88,7 @@ type MountStatsNFS struct {
// Statistics broken down by filesystem operation.
Operations []NFSOperationStats
// Statistics about the NFS RPC transport.
Transport NFSTransportStats
Transport []NFSTransportStats
}
// mountStats implements MountStats.
@ -194,8 +194,6 @@ type NFSOperationStats struct {
CumulativeTotalResponseMilliseconds uint64
// Duration from when a request was enqueued to when it was completely handled.
CumulativeTotalRequestMilliseconds uint64
// The average time from the point the client sends RPC requests until it receives the response.
AverageRTTMilliseconds float64
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
Errors uint64
}
@ -434,7 +432,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
return nil, err
}
stats.Transport = *tstats
stats.Transport = append(stats.Transport, *tstats)
}
// When encountering "per-operation statistics", we must break this
@ -582,9 +580,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
CumulativeTotalResponseMilliseconds: ns[6],
CumulativeTotalRequestMilliseconds: ns[7],
}
if ns[0] != 0 {
opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0])
}
if len(ns) > 8 {
opStats.Errors = ns[8]
@ -632,7 +627,7 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
}
default:
return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
}
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay

View File

@ -58,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
stat, err := parseConntrackStat(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, path, err)
return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err)
}
return stat, nil
@ -86,7 +86,7 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
entries, err := util.ParseHexUint64s(fields)
if err != nil {
return nil, fmt.Errorf("%s: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
}
numEntries := len(entries)
if numEntries < 16 || numEntries > 17 {

View File

@ -50,10 +50,13 @@ type (
// UsedSockets shows the total number of parsed lines representing the
// number of used sockets.
UsedSockets uint64
// Drops shows the total number of dropped packets of all UPD sockets.
Drops *uint64
}
// netIPSocketLine represents the fields parsed from a single line
// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
// Drops is non-nil for udp{,6}, but nil for tcp{,6}.
// For the proc file format details, see https://linux.die.net/man/5/proc.
netIPSocketLine struct {
Sl uint64
@ -66,6 +69,7 @@ type (
RxQueue uint64
UID uint64
Inode uint64
Drops *uint64
}
)
@ -77,13 +81,14 @@ func newNetIPSocket(file string) (NetIPSocket, error) {
defer f.Close()
var netIPSocket NetIPSocket
isUDP := strings.Contains(file, "udp")
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetIPSocketLine(fields)
line, err := parseNetIPSocketLine(fields, isUDP)
if err != nil {
return nil, err
}
@ -104,19 +109,25 @@ func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
defer f.Close()
var netIPSocketSummary NetIPSocketSummary
var udpPacketDrops uint64
isUDP := strings.Contains(file, "udp")
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetIPSocketLine(fields)
line, err := parseNetIPSocketLine(fields, isUDP)
if err != nil {
return nil, err
}
netIPSocketSummary.TxQueueLength += line.TxQueue
netIPSocketSummary.RxQueueLength += line.RxQueue
netIPSocketSummary.UsedSockets++
if isUDP {
udpPacketDrops += *line.Drops
netIPSocketSummary.Drops = &udpPacketDrops
}
}
if err := s.Err(); err != nil {
return nil, err
@ -130,7 +141,7 @@ func parseIP(hexIP string) (net.IP, error) {
var byteIP []byte
byteIP, err := hex.DecodeString(hexIP)
if err != nil {
return nil, fmt.Errorf("%s: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
}
switch len(byteIP) {
case 4:
@ -144,12 +155,12 @@ func parseIP(hexIP string) (net.IP, error) {
}
return i, nil
default:
return nil, fmt.Errorf("%s: Unable to parse IP %s: %w", ErrFileParse, hexIP, nil)
return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil)
}
}
// parseNetIPSocketLine parses a single line, represented by a list of fields.
func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) {
line := &netIPSocketLine{}
if len(fields) < 10 {
return nil, fmt.Errorf(
@ -167,7 +178,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
}
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
return nil, fmt.Errorf("%s: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
}
// local_address
l := strings.Split(fields[1], ":")
@ -178,7 +189,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
return nil, err
}
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
return nil, fmt.Errorf("%s: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
}
// remote_address
@ -190,12 +201,12 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
return nil, err
}
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
return nil, fmt.Errorf("%s: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
}
// st
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
return nil, fmt.Errorf("%s: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
}
// tx_queue and rx_queue
@ -208,20 +219,29 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
)
}
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
return nil, fmt.Errorf("%s: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
}
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
return nil, fmt.Errorf("%s: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
}
// uid
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
return nil, fmt.Errorf("%s: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
}
// inode
if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
}
// drops
if isUDP {
drops, err := strconv.ParseUint(fields[12], 0, 64)
if err != nil {
return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err)
}
line.Drops = &drops
}
return line, nil

View File

@ -69,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) {
stat, err := parseSockstat(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("%s: sockstats from %q: %w", ErrFileRead, name, err)
return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err)
}
return stat, nil
@ -89,7 +89,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
// The remaining fields are key/value pairs.
kvs, err := parseSockstatKVs(fields[1:])
if err != nil {
return nil, fmt.Errorf("%s: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
}
// The first field is the protocol. We must trim its colon suffix.

View File

@ -64,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
entries, err := parseSoftnet(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("%s: /proc/net/softnet_stat: %w", ErrFileParse, err)
return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err)
}
return entries, nil

119
vendor/github.com/prometheus/procfs/net_tls_stat.go generated vendored Normal file
View File

@ -0,0 +1,119 @@
// Copyright 2023 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
)
// TLSStat struct represents data in /proc/net/tls_stat.
// See https://docs.kernel.org/networking/tls.html#statistics
type TLSStat struct {
// number of TX sessions currently installed where host handles cryptography
TLSCurrTxSw int
// number of RX sessions currently installed where host handles cryptography
TLSCurrRxSw int
// number of TX sessions currently installed where NIC handles cryptography
TLSCurrTxDevice int
// number of RX sessions currently installed where NIC handles cryptography
TLSCurrRxDevice int
//number of TX sessions opened with host cryptography
TLSTxSw int
//number of RX sessions opened with host cryptography
TLSRxSw int
// number of TX sessions opened with NIC cryptography
TLSTxDevice int
// number of RX sessions opened with NIC cryptography
TLSRxDevice int
// record decryption failed (e.g. due to incorrect authentication tag)
TLSDecryptError int
// number of RX resyncs sent to NICs handling cryptography
TLSRxDeviceResync int
// number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records.
TLSDecryptRetry int
// number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction.
TLSRxNoPadViolation int
}
// NewTLSStat reads the tls_stat statistics.
func NewTLSStat() (TLSStat, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return TLSStat{}, err
}
return fs.NewTLSStat()
}
// NewTLSStat reads the tls_stat statistics.
func (fs FS) NewTLSStat() (TLSStat, error) {
file, err := os.Open(fs.proc.Path("net/tls_stat"))
if err != nil {
return TLSStat{}, err
}
defer file.Close()
var (
tlsstat = TLSStat{}
s = bufio.NewScanner(file)
)
for s.Scan() {
fields := strings.Fields(s.Text())
if len(fields) != 2 {
return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
}
name := fields[0]
value, err := strconv.Atoi(fields[1])
if err != nil {
return TLSStat{}, err
}
switch name {
case "TlsCurrTxSw":
tlsstat.TLSCurrTxSw = value
case "TlsCurrRxSw":
tlsstat.TLSCurrRxSw = value
case "TlsCurrTxDevice":
tlsstat.TLSCurrTxDevice = value
case "TlsCurrRxDevice":
tlsstat.TLSCurrRxDevice = value
case "TlsTxSw":
tlsstat.TLSTxSw = value
case "TlsRxSw":
tlsstat.TLSRxSw = value
case "TlsTxDevice":
tlsstat.TLSTxDevice = value
case "TlsRxDevice":
tlsstat.TLSRxDevice = value
case "TlsDecryptError":
tlsstat.TLSDecryptError = value
case "TlsRxDeviceResync":
tlsstat.TLSRxDeviceResync = value
case "TlsDecryptRetry":
tlsstat.TLSDecryptRetry = value
case "TlsRxNoPadViolation":
tlsstat.TLSRxNoPadViolation = value
}
}
return tlsstat, s.Err()
}

View File

@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
line := s.Text()
item, err := nu.parseLine(line, hasInode, minFields)
if err != nil {
return nil, fmt.Errorf("%s: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
}
nu.Rows = append(nu.Rows, item)
}
if err := s.Err(); err != nil {
return nil, fmt.Errorf("%s: /proc/net/unix encountered data: %w", ErrFileParse, err)
return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err)
}
return &nu, nil
@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
users, err := u.parseUsers(fields[1])
if err != nil {
return nil, fmt.Errorf("%s: ref count %q: %w", ErrFileParse, fields[1], err)
return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err)
}
flags, err := u.parseFlags(fields[3])
if err != nil {
return nil, fmt.Errorf("%s: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
}
typ, err := u.parseType(fields[4])
if err != nil {
return nil, fmt.Errorf("%s: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
}
state, err := u.parseState(fields[5])
if err != nil {
return nil, fmt.Errorf("%s: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
}
var inode uint64
if hasInode {
inode, err = u.parseInode(fields[6])
if err != nil {
return nil, fmt.Errorf("%s failed to parse inode %q: %w", ErrFileParse, fields[6], err)
return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err)
}
}

View File

@ -68,7 +68,7 @@ func (fs FS) Wireless() ([]*Wireless, error) {
m, err := parseWireless(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("%s: wireless: %w", ErrFileParse, err)
return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err)
}
return m, nil
@ -114,47 +114,47 @@ func parseWireless(r io.Reader) ([]*Wireless, error) {
qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
if err != nil {
return nil, fmt.Errorf("%s: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
}
qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
if err != nil {
return nil, fmt.Errorf("%s: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
}
qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
if err != nil {
return nil, fmt.Errorf("%s: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
}
dnwid, err := strconv.Atoi(stats[4])
if err != nil {
return nil, fmt.Errorf("%s: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
}
dcrypt, err := strconv.Atoi(stats[5])
if err != nil {
return nil, fmt.Errorf("%s: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
}
dfrag, err := strconv.Atoi(stats[6])
if err != nil {
return nil, fmt.Errorf("%s: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
}
dretry, err := strconv.Atoi(stats[7])
if err != nil {
return nil, fmt.Errorf("%s: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
}
dmisc, err := strconv.Atoi(stats[8])
if err != nil {
return nil, fmt.Errorf("%s: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
}
mbeacon, err := strconv.Atoi(stats[9])
if err != nil {
return nil, fmt.Errorf("%s: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
}
w := &Wireless{
@ -175,7 +175,7 @@ func parseWireless(r io.Reader) ([]*Wireless, error) {
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("%s: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
}
return interfaces, nil

View File

@ -111,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) {
names, err := d.Readdirnames(-1)
if err != nil {
return Procs{}, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err)
return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
}
p := Procs{}
@ -137,7 +137,7 @@ func (p Proc) CmdLine() ([]string, error) {
return []string{}, nil
}
return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil
}
// Wchan returns the wchan (wait channel) of a process.
@ -212,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) {
for i, n := range names {
fd, err := strconv.ParseInt(n, 10, 32)
if err != nil {
return nil, fmt.Errorf("%s: Cannot parse line: %v: %w", ErrFileParse, i, err)
return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err)
}
fds[i] = uintptr(fd)
}
@ -297,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) {
names, err := d.Readdirnames(-1)
if err != nil {
return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err)
return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
}
return names, nil

View File

@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) {
}
i, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return 0, fmt.Errorf("%s: couldn't parse value %q: %w", ErrFileParse, s, err)
return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err)
}
return i, nil
}

View File

@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
names, err := d.Readdirnames(-1)
if err != nil {
return nil, fmt.Errorf("%s: failed to read contents of ns dir: %w", ErrFileRead, err)
return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err)
}
ns := make(Namespaces, len(names))
@ -58,7 +58,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
typ := fields[0]
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
if err != nil {
return nil, fmt.Errorf("%s: inode from %q: %w", ErrFileParse, fields[1], err)
return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err)
}
ns[name] = Namespace{typ, uint32(inode)}

View File

@ -61,7 +61,7 @@ type PSIStats struct {
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
if err != nil {
return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
}
return parsePSIStats(bytes.NewReader(data))

View File

@ -127,7 +127,7 @@ func (s *ProcSMapsRollup) parseLine(line string) error {
}
v := strings.TrimSpace(kv[1])
v = strings.TrimRight(v, " kB")
v = strings.TrimSuffix(v, " kB")
vKBytes, err := strconv.ParseUint(v, 10, 64)
if err != nil {

View File

@ -110,6 +110,11 @@ type ProcStat struct {
Policy uint
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
DelayAcctBlkIOTicks uint64
// Guest time of the process (time spent running a virtual CPU for a guest
// operating system), measured in clock ticks.
GuestTime int
// Guest time of the process's children, measured in clock ticks.
CGuestTime int
proc FS
}
@ -189,6 +194,8 @@ func (p Proc) Stat() (ProcStat, error) {
&s.RTPriority,
&s.Policy,
&s.DelayAcctBlkIOTicks,
&s.GuestTime,
&s.CGuestTime,
)
if err != nil {
return ProcStat{}, err

View File

@ -15,6 +15,7 @@ package procfs
import (
"bytes"
"math/bits"
"sort"
"strconv"
"strings"
@ -76,9 +77,9 @@ type ProcStatus struct {
NonVoluntaryCtxtSwitches uint64
// UIDs of the process (Real, effective, saved set, and filesystem UIDs)
UIDs [4]string
UIDs [4]uint64
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
GIDs [4]string
GIDs [4]uint64
// CpusAllowedList: List of cpu cores processes are allowed to run on.
CpusAllowedList []uint64
@ -113,22 +114,37 @@ func (p Proc) NewStatus() (ProcStatus, error) {
// convert kB to B
vBytes := vKBytes * 1024
s.fillStatus(k, v, vKBytes, vBytes)
err = s.fillStatus(k, v, vKBytes, vBytes)
if err != nil {
return ProcStatus{}, err
}
}
return s, nil
}
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error {
switch k {
case "Tgid":
s.TGID = int(vUint)
case "Name":
s.Name = vString
case "Uid":
copy(s.UIDs[:], strings.Split(vString, "\t"))
var err error
for i, v := range strings.Split(vString, "\t") {
s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
if err != nil {
return err
}
}
case "Gid":
copy(s.GIDs[:], strings.Split(vString, "\t"))
var err error
for i, v := range strings.Split(vString, "\t") {
s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
if err != nil {
return err
}
}
case "NSpid":
s.NSpids = calcNSPidsList(vString)
case "VmPeak":
@ -173,6 +189,7 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
s.CpusAllowedList = calcCpusAllowedList(vString)
}
return nil
}
// TotalCtxtSwitches returns the total context switch.

View File

@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) {
vp := util.NewValueParser(f)
values[i] = vp.Int()
if err := vp.Err(); err != nil {
return nil, fmt.Errorf("%s: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
}
}
return values, nil

View File

@ -74,7 +74,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Hi = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "TIMER:":
@ -82,7 +82,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Timer = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "NET_TX:":
@ -90,7 +90,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.NetTx = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "NET_RX:":
@ -98,7 +98,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.NetRx = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "BLOCK:":
@ -106,7 +106,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Block = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "IRQ_POLL:":
@ -114,7 +114,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.IRQPoll = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "TASKLET:":
@ -122,7 +122,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Tasklet = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "SCHED:":
@ -130,7 +130,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Sched = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "HRTIMER:":
@ -138,7 +138,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.HRTimer = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "RCU:":
@ -146,14 +146,14 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.RCU = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
}
}
}
}
if err := scanner.Err(); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse softirqs: %w", ErrFileParse, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err)
}
return softirqs, scanner.Err()

View File

@ -93,7 +93,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
&cpuStat.Guest, &cpuStat.GuestNice)
if err != nil && err != io.EOF {
return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
}
if count == 0 {
return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line)
@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
if err != nil {
return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
}
return cpuStat, cpuID, nil
@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
&softIRQStat.Hrtimer, &softIRQStat.Rcu)
if err != nil {
return SoftIRQStat{}, 0, fmt.Errorf("%s: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
}
return softIRQStat, total, nil
@ -201,34 +201,34 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
switch {
case parts[0] == "btime":
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "intr":
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
}
numberedIRQs := parts[2:]
stat.IRQ = make([]uint64, len(numberedIRQs))
for i, count := range numberedIRQs {
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "ctxt":
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "processes":
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "procs_running":
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "procs_blocked":
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "softirq":
softIRQStats, total, err := parseSoftIRQStat(line)
@ -251,7 +251,7 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
}
if err := scanner.Err(); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q: %w", ErrFileParse, fileName, err)
return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err)
}
return stat, nil

View File

@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) {
swap.Size, err = strconv.Atoi(swapFields[2])
if err != nil {
return nil, fmt.Errorf("%s: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
}
swap.Used, err = strconv.Atoi(swapFields[3])
if err != nil {
return nil, fmt.Errorf("%s: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
}
swap.Priority, err = strconv.Atoi(swapFields[4])
if err != nil {
return nil, fmt.Errorf("%s: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
}
return swap, nil

View File

@ -45,7 +45,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) {
names, err := d.Readdirnames(-1)
if err != nil {
return Procs{}, fmt.Errorf("%s: could not read %q: %w", ErrFileRead, d.Name(), err)
return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err)
}
t := Procs{}

View File

@ -75,11 +75,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
if err != nil {
return nil, fmt.Errorf("%s: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
}
zoneinfo, err := parseZoneinfo(data)
if err != nil {
return nil, fmt.Errorf("%s: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
}
return zoneinfo, nil
}

4
vendor/modules.txt vendored
View File

@ -247,8 +247,8 @@ github.com/prometheus/client_model/go
github.com/prometheus/common/expfmt
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
github.com/prometheus/common/model
# github.com/prometheus/procfs v0.12.0
## explicit; go 1.19
# github.com/prometheus/procfs v0.15.1
## explicit; go 1.20
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util