vendor: github.com/prometheus/procfs v0.7.3

un-pinning the dependency to let go modules resolve the version to use.

full diff: https://github.com/prometheus/procfs/compare/v0.0.11...v0.7.3

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn 2022-03-24 13:46:31 +01:00
parent 29f799aae7
commit 7408799ec3
No known key found for this signature in database
GPG Key ID: 76698F39D527CE8C
57 changed files with 4685 additions and 457 deletions

View File

@ -50,5 +50,4 @@ replace (
github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220309172631-83b51522df43+incompatible // master (v21.xx-dev) github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220309172631-83b51522df43+incompatible // master (v21.xx-dev)
github.com/gogo/googleapis => github.com/gogo/googleapis v1.3.2 github.com/gogo/googleapis => github.com/gogo/googleapis v1.3.2
github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.6.0 github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.6.0
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.11
) )

View File

@ -780,8 +780,15 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug=
github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=

View File

@ -0,0 +1,3 @@
## Prometheus Community Code of Conduct
Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).

View File

@ -18,6 +18,8 @@ include Makefile.common
./ttar -C $(dir $*) -x -f $*.ttar ./ttar -C $(dir $*) -x -f $*.ttar
touch $@ touch $@
fixtures: fixtures/.unpacked
update_fixtures: update_fixtures:
rm -vf fixtures/.unpacked rm -vf fixtures/.unpacked
./ttar -c -f fixtures.ttar fixtures/ ./ttar -c -f fixtures.ttar fixtures/

View File

@ -78,12 +78,12 @@ ifneq ($(shell which gotestsum),)
endif endif
endif endif
PROMU_VERSION ?= 0.5.0 PROMU_VERSION ?= 0.12.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
GOLANGCI_LINT := GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.18.0 GOLANGCI_LINT_VERSION ?= v1.39.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different. # windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@ -118,7 +118,7 @@ endif
%: common-% ; %: common-% ;
.PHONY: common-all .PHONY: common-all
common-all: precheck style check_license lint unused build test common-all: precheck style check_license lint yamllint unused build test
.PHONY: common-style .PHONY: common-style
common-style: common-style:
@ -150,6 +150,17 @@ else
$(GO) get $(GOOPTS) -t ./... $(GO) get $(GOOPTS) -t ./...
endif endif
.PHONY: update-go-deps
update-go-deps:
@echo ">> updating Go dependencies"
@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
$(GO) get $$m; \
done
GO111MODULE=$(GO111MODULE) $(GO) mod tidy
ifneq (,$(wildcard vendor))
GO111MODULE=$(GO111MODULE) $(GO) mod vendor
endif
.PHONY: common-test-short .PHONY: common-test-short
common-test-short: $(GOTEST_DIR) common-test-short: $(GOTEST_DIR)
@echo ">> running short tests" @echo ">> running short tests"
@ -187,6 +198,15 @@ else
endif endif
endif endif
.PHONY: common-yamllint
common-yamllint:
@echo ">> running yamllint on all YAML files in the repository"
ifeq (, $(shell which yamllint))
@echo "yamllint not installed so skipping"
else
yamllint .
endif
# For backward-compatibility. # For backward-compatibility.
.PHONY: common-staticcheck .PHONY: common-staticcheck
common-staticcheck: lint common-staticcheck: lint
@ -234,10 +254,12 @@ common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: $(TAG_DOCKER_ARCHS) common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
.PHONY: common-docker-manifest .PHONY: common-docker-manifest
common-docker-manifest: common-docker-manifest:

View File

@ -6,8 +6,8 @@ metrics from the pseudo-filesystems /proc and /sys.
*WARNING*: This package is a work in progress. Its API may still break in *WARNING*: This package is a work in progress. Its API may still break in
backwards-incompatible ways without warnings. Use it at your own risk. backwards-incompatible ways without warnings. Use it at your own risk.
[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) [![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/procfs.svg)](https://pkg.go.dev/github.com/prometheus/procfs)
[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) [![CircleCI](https://circleci.com/gh/prometheus/procfs/tree/master.svg?style=svg)](https://circleci.com/gh/prometheus/procfs/tree/master)
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
## Usage ## Usage

6
vendor/github.com/prometheus/procfs/SECURITY.md generated vendored Normal file
View File

@ -0,0 +1,6 @@
# Reporting a security issue
The Prometheus security policy, including how to report vulnerabilities, can be
found here:
https://prometheus.io/docs/operating/security/

View File

@ -36,7 +36,7 @@ type ARPEntry struct {
func (fs FS) GatherARPEntries() ([]ARPEntry, error) { func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
data, err := ioutil.ReadFile(fs.proc.Path("net/arp")) data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err) return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err)
} }
return parseARPEntries(data) return parseARPEntries(data)
@ -59,7 +59,7 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
} else if width == expectedDataWidth { } else if width == expectedDataWidth {
entry, err := parseARPEntry(columns) entry, err := parseARPEntry(columns)
if err != nil { if err != nil {
return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err) return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err)
} }
entries = append(entries, entry) entries = append(entries, entry)
} else { } else {

View File

@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
for i := 0; i < arraySize; i++ { for i := 0; i < arraySize; i++ {
sizes[i], err = strconv.ParseFloat(parts[i+4], 64) sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) return nil, fmt.Errorf("invalid value in buddyinfo: %w", err)
} }
} }

30
vendor/github.com/prometheus/procfs/cmdline.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"strings"
"github.com/prometheus/procfs/internal/util"
)
// CmdLine returns the command line of the kernel.
func (fs FS) CmdLine() ([]string, error) {
data, err := util.ReadFileNoStat(fs.proc.Path("cmdline"))
if err != nil {
return nil, err
}
return strings.Fields(string(data)), nil
}

View File

@ -11,11 +11,16 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// +build linux
package procfs package procfs
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"errors"
"fmt"
"regexp"
"strconv" "strconv"
"strings" "strings"
@ -52,6 +57,11 @@ type CPUInfo struct {
PowerManagement string PowerManagement string
} }
var (
cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`)
cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`)
)
// CPUInfo returns information about current system CPUs. // CPUInfo returns information about current system CPUs.
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
func (fs FS) CPUInfo() ([]CPUInfo, error) { func (fs FS) CPUInfo() ([]CPUInfo, error) {
@ -62,14 +72,26 @@ func (fs FS) CPUInfo() ([]CPUInfo, error) {
return parseCPUInfo(data) return parseCPUInfo(data)
} }
// parseCPUInfo parses data from /proc/cpuinfo func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
func parseCPUInfo(info []byte) ([]CPUInfo, error) {
cpuinfo := []CPUInfo{}
i := -1
scanner := bufio.NewScanner(bytes.NewReader(info)) scanner := bufio.NewScanner(bytes.NewReader(info))
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
firstcpu := CPUInfo{Processor: uint(v)}
cpuinfo := []CPUInfo{firstcpu}
i := 0
for scanner.Scan() { for scanner.Scan() {
line := scanner.Text() line := scanner.Text()
if strings.TrimSpace(line) == "" { if !strings.Contains(line, ":") {
continue continue
} }
field := strings.SplitN(line, ": ", 2) field := strings.SplitN(line, ": ", 2)
@ -82,7 +104,7 @@ func parseCPUInfo(info []byte) ([]CPUInfo, error) {
return nil, err return nil, err
} }
cpuinfo[i].Processor = uint(v) cpuinfo[i].Processor = uint(v)
case "vendor_id": case "vendor", "vendor_id":
cpuinfo[i].VendorID = field[1] cpuinfo[i].VendorID = field[1]
case "cpu family": case "cpu family":
cpuinfo[i].CPUFamily = field[1] cpuinfo[i].CPUFamily = field[1]
@ -163,5 +185,297 @@ func parseCPUInfo(info []byte) ([]CPUInfo, error) {
} }
} }
return cpuinfo, nil return cpuinfo, nil
}
func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
match, _ := regexp.MatchString("^[Pp]rocessor", firstLine)
if !match || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
featuresLine := ""
commonCPUInfo := CPUInfo{}
i := 0
if strings.TrimSpace(field[0]) == "Processor" {
commonCPUInfo = CPUInfo{ModelName: field[1]}
i = -1
} else {
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
firstcpu := CPUInfo{Processor: uint(v)}
cpuinfo = []CPUInfo{firstcpu}
}
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "processor":
cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor
i++
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].Processor = uint(v)
case "BogoMIPS":
if i == -1 {
cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor
i++
cpuinfo[i].Processor = 0
}
v, err := strconv.ParseFloat(field[1], 64)
if err != nil {
return nil, err
}
cpuinfo[i].BogoMips = v
case "Features":
featuresLine = line
case "model name":
cpuinfo[i].ModelName = field[1]
}
}
fields := strings.SplitN(featuresLine, ": ", 2)
for i := range cpuinfo {
cpuinfo[i].Flags = strings.Fields(fields[1])
}
return cpuinfo, nil
} }
func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
commonCPUInfo := CPUInfo{VendorID: field[1]}
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "bogomips per cpu":
v, err := strconv.ParseFloat(field[1], 64)
if err != nil {
return nil, err
}
commonCPUInfo.BogoMips = v
case "features":
commonCPUInfo.Flags = strings.Fields(field[1])
}
if strings.HasPrefix(line, "processor") {
match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
if len(match) < 2 {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
cpu := commonCPUInfo
v, err := strconv.ParseUint(match[1], 0, 32)
if err != nil {
return nil, err
}
cpu.Processor = uint(v)
cpuinfo = append(cpuinfo, cpu)
}
if strings.HasPrefix(line, "cpu number") {
break
}
}
i := 0
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "cpu number":
i++
case "cpu MHz dynamic":
clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
v, err := strconv.ParseFloat(clock, 64)
if err != nil {
return nil, err
}
cpuinfo[i].CPUMHz = v
case "physical id":
cpuinfo[i].PhysicalID = field[1]
case "core id":
cpuinfo[i].CoreID = field[1]
case "cpu cores":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].CPUCores = uint(v)
case "siblings":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].Siblings = uint(v)
}
}
return cpuinfo, nil
}
func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
systemType := field[1]
i := 0
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "processor":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
i = int(v)
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
cpuinfo[i].Processor = uint(v)
cpuinfo[i].VendorID = systemType
case "cpu model":
cpuinfo[i].ModelName = field[1]
case "BogoMIPS":
v, err := strconv.ParseFloat(field[1], 64)
if err != nil {
return nil, err
}
cpuinfo[i].BogoMips = v
}
}
return cpuinfo, nil
}
func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
firstcpu := CPUInfo{Processor: uint(v)}
cpuinfo := []CPUInfo{firstcpu}
i := 0
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "processor":
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
i++
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].Processor = uint(v)
case "cpu":
cpuinfo[i].VendorID = field[1]
case "clock":
clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
v, err := strconv.ParseFloat(clock, 64)
if err != nil {
return nil, err
}
cpuinfo[i].CPUMHz = v
}
}
return cpuinfo, nil
}
func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
firstcpu := CPUInfo{Processor: uint(v)}
cpuinfo := []CPUInfo{firstcpu}
i := 0
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "processor":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
i = int(v)
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
cpuinfo[i].Processor = uint(v)
case "hart":
cpuinfo[i].CoreID = field[1]
case "isa":
cpuinfo[i].ModelName = field[1]
}
}
return cpuinfo, nil
}
func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode
return nil, errors.New("not implemented")
}
// firstNonEmptyLine advances the scanner to the first non-empty line
// and returns the contents of that line
func firstNonEmptyLine(scanner *bufio.Scanner) string {
for scanner.Scan() {
line := scanner.Text()
if strings.TrimSpace(line) != "" {
return line
}
}
return ""
}

19
vendor/github.com/prometheus/procfs/cpuinfo_armx.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build arm arm64
package procfs
var parseCPUInfo = parseCPUInfoARM

19
vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build mips mipsle mips64 mips64le
package procfs
var parseCPUInfo = parseCPUInfoMips

19
vendor/github.com/prometheus/procfs/cpuinfo_others.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
package procfs
var parseCPUInfo = parseCPUInfoDummy

19
vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build ppc64 ppc64le
package procfs
var parseCPUInfo = parseCPUInfoPPC

19
vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build riscv riscv64
package procfs
var parseCPUInfo = parseCPUInfoRISCV

18
vendor/github.com/prometheus/procfs/cpuinfo_s390x.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package procfs
var parseCPUInfo = parseCPUInfoS390X

19
vendor/github.com/prometheus/procfs/cpuinfo_x86.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build 386 amd64
package procfs
var parseCPUInfo = parseCPUInfoX86

View File

@ -55,12 +55,12 @@ func (fs FS) Crypto() ([]Crypto, error) {
path := fs.proc.Path("crypto") path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path) b, err := util.ReadFileNoStat(path)
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading crypto %s: %s", path, err) return nil, fmt.Errorf("error reading crypto %q: %w", path, err)
} }
crypto, err := parseCrypto(bytes.NewReader(b)) crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing crypto %s: %s", path, err) return nil, fmt.Errorf("error parsing crypto %q: %w", path, err)
} }
return crypto, nil return crypto, nil

View File

@ -31,7 +31,7 @@
// log.Fatalf("could not get process: %s", err) // log.Fatalf("could not get process: %s", err)
// } // }
// //
// stat, err := p.NewStat() // stat, err := p.Stat()
// if err != nil { // if err != nil {
// log.Fatalf("could not get process stat: %s", err) // log.Fatalf("could not get process stat: %s", err)
// } // }

File diff suppressed because it is too large Load Diff

422
vendor/github.com/prometheus/procfs/fscache.go generated vendored Normal file
View File

@ -0,0 +1,422 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Fscacheinfo represents fscache statistics.
type Fscacheinfo struct {
// Number of index cookies allocated
IndexCookiesAllocated uint64
// data storage cookies allocated
DataStorageCookiesAllocated uint64
// Number of special cookies allocated
SpecialCookiesAllocated uint64
// Number of objects allocated
ObjectsAllocated uint64
// Number of object allocation failures
ObjectAllocationsFailure uint64
// Number of objects that reached the available state
ObjectsAvailable uint64
// Number of objects that reached the dead state
ObjectsDead uint64
// Number of objects that didn't have a coherency check
ObjectsWithoutCoherencyCheck uint64
// Number of objects that passed a coherency check
ObjectsWithCoherencyCheck uint64
// Number of objects that needed a coherency data update
ObjectsNeedCoherencyCheckUpdate uint64
// Number of objects that were declared obsolete
ObjectsDeclaredObsolete uint64
// Number of pages marked as being cached
PagesMarkedAsBeingCached uint64
// Number of uncache page requests seen
UncachePagesRequestSeen uint64
// Number of acquire cookie requests seen
AcquireCookiesRequestSeen uint64
// Number of acq reqs given a NULL parent
AcquireRequestsWithNullParent uint64
// Number of acq reqs rejected due to no cache available
AcquireRequestsRejectedNoCacheAvailable uint64
// Number of acq reqs succeeded
AcquireRequestsSucceeded uint64
// Number of acq reqs rejected due to error
AcquireRequestsRejectedDueToError uint64
// Number of acq reqs failed on ENOMEM
AcquireRequestsFailedDueToEnomem uint64
// Number of lookup calls made on cache backends
LookupsNumber uint64
// Number of negative lookups made
LookupsNegative uint64
// Number of positive lookups made
LookupsPositive uint64
// Number of objects created by lookup
ObjectsCreatedByLookup uint64
// Number of lookups timed out and requeued
LookupsTimedOutAndRequed uint64
InvalidationsNumber uint64
InvalidationsRunning uint64
// Number of update cookie requests seen
UpdateCookieRequestSeen uint64
// Number of upd reqs given a NULL parent
UpdateRequestsWithNullParent uint64
// Number of upd reqs granted CPU time
UpdateRequestsRunning uint64
// Number of relinquish cookie requests seen
RelinquishCookiesRequestSeen uint64
// Number of rlq reqs given a NULL parent
RelinquishCookiesWithNullParent uint64
// Number of rlq reqs waited on completion of creation
RelinquishRequestsWaitingCompleteCreation uint64
// Relinqs rtr
RelinquishRetries uint64
// Number of attribute changed requests seen
AttributeChangedRequestsSeen uint64
// Number of attr changed requests queued
AttributeChangedRequestsQueued uint64
// Number of attr changed rejected -ENOBUFS
AttributeChangedRejectDueToEnobufs uint64
// Number of attr changed failed -ENOMEM
AttributeChangedFailedDueToEnomem uint64
// Number of attr changed ops given CPU time
AttributeChangedOps uint64
// Number of allocation requests seen
AllocationRequestsSeen uint64
// Number of successful alloc reqs
AllocationOkRequests uint64
// Number of alloc reqs that waited on lookup completion
AllocationWaitingOnLookup uint64
// Number of alloc reqs rejected -ENOBUFS
AllocationsRejectedDueToEnobufs uint64
// Number of alloc reqs aborted -ERESTARTSYS
AllocationsAbortedDueToErestartsys uint64
// Number of alloc reqs submitted
AllocationOperationsSubmitted uint64
// Number of alloc reqs waited for CPU time
AllocationsWaitedForCPU uint64
// Number of alloc reqs aborted due to object death
AllocationsAbortedDueToObjectDeath uint64
// Number of retrieval (read) requests seen
RetrievalsReadRequests uint64
// Number of successful retr reqs
RetrievalsOk uint64
// Number of retr reqs that waited on lookup completion
RetrievalsWaitingLookupCompletion uint64
// Number of retr reqs returned -ENODATA
RetrievalsReturnedEnodata uint64
// Number of retr reqs rejected -ENOBUFS
RetrievalsRejectedDueToEnobufs uint64
// Number of retr reqs aborted -ERESTARTSYS
RetrievalsAbortedDueToErestartsys uint64
// Number of retr reqs failed -ENOMEM
RetrievalsFailedDueToEnomem uint64
// Number of retr reqs submitted
RetrievalsRequests uint64
// Number of retr reqs waited for CPU time
RetrievalsWaitingCPU uint64
// Number of retr reqs aborted due to object death
RetrievalsAbortedDueToObjectDeath uint64
// Number of storage (write) requests seen
StoreWriteRequests uint64
// Number of successful store reqs
StoreSuccessfulRequests uint64
// Number of store reqs on a page already pending storage
StoreRequestsOnPendingStorage uint64
// Number of store reqs rejected -ENOBUFS
StoreRequestsRejectedDueToEnobufs uint64
// Number of store reqs failed -ENOMEM
StoreRequestsFailedDueToEnomem uint64
// Number of store reqs submitted
StoreRequestsSubmitted uint64
// Number of store reqs granted CPU time
StoreRequestsRunning uint64
// Number of pages given store req processing time
StorePagesWithRequestsProcessing uint64
// Number of store reqs deleted from tracking tree
StoreRequestsDeleted uint64
// Number of store reqs over store limit
StoreRequestsOverStoreLimit uint64
// Number of release reqs against pages with no pending store
ReleaseRequestsAgainstPagesWithNoPendingStorage uint64
// Number of release reqs against pages stored by time lock granted
ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
// Number of release reqs ignored due to in-progress store
ReleaseRequestsIgnoredDueToInProgressStore uint64
// Number of page stores cancelled due to release req
PageStoresCancelledByReleaseRequests uint64
VmscanWaiting uint64
// Number of times async ops added to pending queues
OpsPending uint64
// Number of times async ops given CPU time
OpsRunning uint64
// Number of times async ops queued for processing
OpsEnqueued uint64
// Number of async ops cancelled
OpsCancelled uint64
// Number of async ops rejected due to object lookup/create failure
OpsRejected uint64
// Number of async ops initialised
OpsInitialised uint64
// Number of async ops queued for deferred release
OpsDeferred uint64
// Number of async ops released (should equal ini=N when idle)
OpsReleased uint64
// Number of deferred-release async ops garbage collected
OpsGarbageCollected uint64
// Number of in-progress alloc_object() cache ops
CacheopAllocationsinProgress uint64
// Number of in-progress lookup_object() cache ops
CacheopLookupObjectInProgress uint64
// Number of in-progress lookup_complete() cache ops
CacheopLookupCompleteInPorgress uint64
// Number of in-progress grab_object() cache ops
CacheopGrabObjectInProgress uint64
CacheopInvalidations uint64
// Number of in-progress update_object() cache ops
CacheopUpdateObjectInProgress uint64
// Number of in-progress drop_object() cache ops
CacheopDropObjectInProgress uint64
// Number of in-progress put_object() cache ops
CacheopPutObjectInProgress uint64
// Number of in-progress attr_changed() cache ops
CacheopAttributeChangeInProgress uint64
// Number of in-progress sync_cache() cache ops
CacheopSyncCacheInProgress uint64
// Number of in-progress read_or_alloc_page() cache ops
CacheopReadOrAllocPageInProgress uint64
// Number of in-progress read_or_alloc_pages() cache ops
CacheopReadOrAllocPagesInProgress uint64
// Number of in-progress allocate_page() cache ops
CacheopAllocatePageInProgress uint64
// Number of in-progress allocate_pages() cache ops
CacheopAllocatePagesInProgress uint64
// Number of in-progress write_page() cache ops
CacheopWritePagesInProgress uint64
// Number of in-progress uncache_page() cache ops
CacheopUncachePagesInProgress uint64
// Number of in-progress dissociate_pages() cache ops
CacheopDissociatePagesInProgress uint64
// Number of object lookups/creations rejected due to lack of space
CacheevLookupsAndCreationsRejectedLackSpace uint64
// Number of stale objects deleted
CacheevStaleObjectsDeleted uint64
// Number of objects retired when relinquished
CacheevRetiredWhenReliquished uint64
// Number of objects culled
CacheevObjectsCulled uint64
}
// Fscacheinfo returns information about current fscache statistics.
// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt
func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats"))
if err != nil {
return Fscacheinfo{}, err
}
m, err := parseFscacheinfo(bytes.NewReader(b))
if err != nil {
return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err)
}
return *m, nil
}
func setFSCacheFields(fields []string, setFields ...*uint64) error {
var err error
if len(fields) < len(setFields) {
return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields))
}
for i := range setFields {
*setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64)
if err != nil {
return err
}
}
return nil
}
func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) {
var m Fscacheinfo
s := bufio.NewScanner(r)
for s.Scan() {
fields := strings.Fields(s.Text())
if len(fields) < 2 {
return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text())
}
switch fields[0] {
case "Cookies:":
err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated,
&m.SpecialCookiesAllocated)
if err != nil {
return &m, err
}
case "Objects:":
err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure,
&m.ObjectsAvailable, &m.ObjectsDead)
if err != nil {
return &m, err
}
case "ChkAux":
err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck,
&m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete)
if err != nil {
return &m, err
}
case "Pages":
err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen)
if err != nil {
return &m, err
}
case "Acquire:":
err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent,
&m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError,
&m.AcquireRequestsFailedDueToEnomem)
if err != nil {
return &m, err
}
case "Lookups:":
err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive,
&m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed)
if err != nil {
return &m, err
}
case "Invals":
err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning)
if err != nil {
return &m, err
}
case "Updates:":
err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent,
&m.UpdateRequestsRunning)
if err != nil {
return &m, err
}
case "Relinqs:":
err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent,
&m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries)
if err != nil {
return &m, err
}
case "AttrChg:":
err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued,
&m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps)
if err != nil {
return &m, err
}
case "Allocs":
if strings.Split(fields[2], "=")[0] == "n" {
err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests,
&m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys)
if err != nil {
return &m, err
}
} else {
err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU,
&m.AllocationsAbortedDueToObjectDeath)
if err != nil {
return &m, err
}
}
case "Retrvls:":
if strings.Split(fields[1], "=")[0] == "n" {
err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion,
&m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys,
&m.RetrievalsFailedDueToEnomem)
if err != nil {
return &m, err
}
} else {
err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath)
if err != nil {
return &m, err
}
}
case "Stores":
if strings.Split(fields[2], "=")[0] == "n" {
err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests,
&m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem)
if err != nil {
return &m, err
}
} else {
err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning,
&m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit)
if err != nil {
return &m, err
}
}
case "VmScan":
err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage,
&m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore,
&m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting)
if err != nil {
return &m, err
}
case "Ops":
if strings.Split(fields[2], "=")[0] == "pend" {
err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected)
if err != nil {
return &m, err
}
} else {
err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected)
if err != nil {
return &m, err
}
}
case "CacheOp:":
if strings.Split(fields[1], "=")[0] == "alo" {
err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress,
&m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress)
if err != nil {
return &m, err
}
} else if strings.Split(fields[1], "=")[0] == "inv" {
err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress,
&m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress,
&m.CacheopSyncCacheInProgress)
if err != nil {
return &m, err
}
} else {
err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress,
&m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress,
&m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress)
if err != nil {
return &m, err
}
}
case "CacheEv:":
err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted,
&m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled)
if err != nil {
return &m, err
}
}
}
return &m, nil
}

View File

@ -1,9 +1,9 @@
module github.com/prometheus/procfs module github.com/prometheus/procfs
go 1.12 go 1.13
require ( require (
github.com/google/go-cmp v0.3.1 github.com/google/go-cmp v0.5.4
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
) )

View File

@ -1,6 +1,8 @@
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e h1:LwyF2AFISC9nVbS6MgzsaQNSUsRXI49GS+YQ5KX/QH0= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -39,10 +39,10 @@ type FS string
func NewFS(mountPoint string) (FS, error) { func NewFS(mountPoint string) (FS, error) {
info, err := os.Stat(mountPoint) info, err := os.Stat(mountPoint)
if err != nil { if err != nil {
return "", fmt.Errorf("could not read %s: %s", mountPoint, err) return "", fmt.Errorf("could not read %q: %w", mountPoint, err)
} }
if !info.IsDir() { if !info.IsDir() {
return "", fmt.Errorf("mount point %s is not a directory", mountPoint) return "", fmt.Errorf("mount point %q is not a directory", mountPoint)
} }
return FS(mountPoint), nil return FS(mountPoint), nil

View File

@ -73,6 +73,15 @@ func ReadUintFromFile(path string) (uint64, error) {
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
} }
// ReadIntFromFile reads a file and attempts to parse a int64 from it.
func ReadIntFromFile(path string) (int64, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return 0, err
}
return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
}
// ParseBool parses a string into a boolean pointer. // ParseBool parses a string into a boolean pointer.
func ParseBool(b string) *bool { func ParseBool(b string) *bool {
var truth bool var truth bool

62
vendor/github.com/prometheus/procfs/kernel_random.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package procfs
import (
"os"
"github.com/prometheus/procfs/internal/util"
)
// KernelRandom contains information about to the kernel's random number generator.
type KernelRandom struct {
// EntropyAvaliable gives the available entropy, in bits.
EntropyAvaliable *uint64
// PoolSize gives the size of the entropy pool, in bits.
PoolSize *uint64
// URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded.
URandomMinReseedSeconds *uint64
// WriteWakeupThreshold the number of bits of entropy below which we wake up processes
// that do a select(2) or poll(2) for write access to /dev/random.
WriteWakeupThreshold *uint64
// ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep
// waiting for entropy from /dev/random.
ReadWakeupThreshold *uint64
}
// KernelRandom returns values from /proc/sys/kernel/random.
func (fs FS) KernelRandom() (KernelRandom, error) {
random := KernelRandom{}
for file, p := range map[string]**uint64{
"entropy_avail": &random.EntropyAvaliable,
"poolsize": &random.PoolSize,
"urandom_min_reseed_secs": &random.URandomMinReseedSeconds,
"write_wakeup_threshold": &random.WriteWakeupThreshold,
"read_wakeup_threshold": &random.ReadWakeupThreshold,
} {
val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file))
if os.IsNotExist(err) {
continue
}
if err != nil {
return random, err
}
*p = &val
}
return random, nil
}

View File

@ -44,14 +44,14 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
loads := make([]float64, 3) loads := make([]float64, 3)
parts := strings.Fields(string(loadavgBytes)) parts := strings.Fields(string(loadavgBytes))
if len(parts) < 3 { if len(parts) < 3 {
return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %s", string(loadavgBytes)) return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes))
} }
var err error var err error
for i, load := range parts[0:3] { for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64) loads[i], err = strconv.ParseFloat(load, 64)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not parse load '%s': %s", load, err) return nil, fmt.Errorf("could not parse load %q: %w", load, err)
} }
} }
return &LoadAvg{ return &LoadAvg{

View File

@ -22,8 +22,12 @@ import (
) )
var ( var (
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`) recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`)
recoveryLinePctRE = regexp.MustCompile(`= (.+)%`)
recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`)
componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`)
) )
// MDStat holds info parsed from /proc/mdstat. // MDStat holds info parsed from /proc/mdstat.
@ -38,12 +42,22 @@ type MDStat struct {
DisksTotal int64 DisksTotal int64
// Number of failed disks. // Number of failed disks.
DisksFailed int64 DisksFailed int64
// Number of "down" disks. (the _ indicator in the status line)
DisksDown int64
// Spare disks in the device. // Spare disks in the device.
DisksSpare int64 DisksSpare int64
// Number of blocks the device holds. // Number of blocks the device holds.
BlocksTotal int64 BlocksTotal int64
// Number of blocks on the device that are in sync. // Number of blocks on the device that are in sync.
BlocksSynced int64 BlocksSynced int64
// progress percentage of current sync
BlocksSyncedPct float64
// estimated finishing time for current sync (in minutes)
BlocksSyncedFinishTime float64
// current sync speed (in Kilobytes/sec)
BlocksSyncedSpeed float64
// Name of md component devices
Devices []string
} }
// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of // MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of
@ -52,11 +66,11 @@ type MDStat struct {
func (fs FS) MDStat() ([]MDStat, error) { func (fs FS) MDStat() ([]MDStat, error) {
data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) data, err := ioutil.ReadFile(fs.proc.Path("mdstat"))
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) return nil, err
} }
mdstat, err := parseMDStat(data) mdstat, err := parseMDStat(data)
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err)
} }
return mdstat, nil return mdstat, nil
} }
@ -82,19 +96,16 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
state := deviceFields[2] // active or inactive state := deviceFields[2] // active or inactive
if len(lines) <= i+3 { if len(lines) <= i+3 {
return nil, fmt.Errorf( return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName)
"error parsing %s: too few lines for md device",
mdName,
)
} }
// Failed disks have the suffix (F) & Spare disks have the suffix (S). // Failed disks have the suffix (F) & Spare disks have the suffix (S).
fail := int64(strings.Count(line, "(F)")) fail := int64(strings.Count(line, "(F)"))
spare := int64(strings.Count(line, "(S)")) spare := int64(strings.Count(line, "(S)"))
active, total, size, err := evalStatusLine(lines[i], lines[i+1]) active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing md device lines: %s", err) return nil, fmt.Errorf("error parsing md device lines: %w", err)
} }
syncLineIdx := i + 2 syncLineIdx := i + 2
@ -105,13 +116,19 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
// If device is syncing at the moment, get the number of currently // If device is syncing at the moment, get the number of currently
// synced bytes, otherwise that number equals the size of the device. // synced bytes, otherwise that number equals the size of the device.
syncedBlocks := size syncedBlocks := size
speed := float64(0)
finish := float64(0)
pct := float64(0)
recovering := strings.Contains(lines[syncLineIdx], "recovery") recovering := strings.Contains(lines[syncLineIdx], "recovery")
resyncing := strings.Contains(lines[syncLineIdx], "resync") resyncing := strings.Contains(lines[syncLineIdx], "resync")
checking := strings.Contains(lines[syncLineIdx], "check")
// Append recovery and resyncing state info. // Append recovery and resyncing state info.
if recovering || resyncing { if recovering || resyncing || checking {
if recovering { if recovering {
state = "recovering" state = "recovering"
} else if checking {
state = "checking"
} else { } else {
state = "resyncing" state = "resyncing"
} }
@ -121,74 +138,125 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
strings.Contains(lines[syncLineIdx], "DELAYED") { strings.Contains(lines[syncLineIdx], "DELAYED") {
syncedBlocks = 0 syncedBlocks = 0
} else { } else {
syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx]) syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing sync line in md device %s: %s", mdName, err) return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err)
} }
} }
} }
mdStats = append(mdStats, MDStat{ mdStats = append(mdStats, MDStat{
Name: mdName, Name: mdName,
ActivityState: state, ActivityState: state,
DisksActive: active, DisksActive: active,
DisksFailed: fail, DisksFailed: fail,
DisksSpare: spare, DisksDown: down,
DisksTotal: total, DisksSpare: spare,
BlocksTotal: size, DisksTotal: total,
BlocksSynced: syncedBlocks, BlocksTotal: size,
BlocksSynced: syncedBlocks,
BlocksSyncedPct: pct,
BlocksSyncedFinishTime: finish,
BlocksSyncedSpeed: speed,
Devices: evalComponentDevices(deviceFields),
}) })
} }
return mdStats, nil return mdStats, nil
} }
func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) { func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
sizeStr := strings.Fields(statusLine)[0] sizeStr := strings.Fields(statusLine)[0]
size, err = strconv.ParseInt(sizeStr, 10, 64) size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
} }
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
// In the device deviceLine, only disks have a number associated with them in []. // In the device deviceLine, only disks have a number associated with them in [].
total = int64(strings.Count(deviceLine, "[")) total = int64(strings.Count(deviceLine, "["))
return total, total, size, nil return total, total, 0, size, nil
} }
if strings.Contains(deviceLine, "inactive") { if strings.Contains(deviceLine, "inactive") {
return 0, 0, size, nil return 0, 0, 0, size, nil
} }
matches := statusLineRE.FindStringSubmatch(statusLine) matches := statusLineRE.FindStringSubmatch(statusLine)
if len(matches) != 4 { if len(matches) != 5 {
return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) return 0, 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
} }
total, err = strconv.ParseInt(matches[2], 10, 64) total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
} }
active, err = strconv.ParseInt(matches[3], 10, 64) active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
} }
down = int64(strings.Count(matches[4], "_"))
return active, total, size, nil return active, total, down, size, nil
} }
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) { func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) {
matches := recoveryLineRE.FindStringSubmatch(recoveryLine) matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 { if len(matches) != 2 {
return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) return 0, 0, 0, 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
} }
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil { if err != nil {
return 0, fmt.Errorf("%s in recoveryLine: %s", err, recoveryLine) return 0, 0, 0, 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err)
} }
return syncedBlocks, nil // Get percentage complete
matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, 0, 0, 0, fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLine)
}
pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
if err != nil {
return syncedBlocks, 0, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
}
// Get time expected left to complete
matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, pct, 0, 0, fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLine)
}
finish, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
return syncedBlocks, pct, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
}
// Get recovery speed
matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, pct, finish, 0, fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLine)
}
speed, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
return syncedBlocks, pct, finish, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
}
return syncedBlocks, pct, finish, speed, nil
}
func evalComponentDevices(deviceFields []string) []string {
mdComponentDevices := make([]string, 0)
if len(deviceFields) > 3 {
for _, field := range deviceFields[4:] {
match := componentDeviceRE.FindStringSubmatch(field)
if match == nil {
continue
}
mdComponentDevices = append(mdComponentDevices, match[1])
}
}
return mdComponentDevices
} }

View File

@ -28,9 +28,9 @@ import (
type Meminfo struct { type Meminfo struct {
// Total usable ram (i.e. physical ram minus a few reserved // Total usable ram (i.e. physical ram minus a few reserved
// bits and the kernel binary code) // bits and the kernel binary code)
MemTotal uint64 MemTotal *uint64
// The sum of LowFree+HighFree // The sum of LowFree+HighFree
MemFree uint64 MemFree *uint64
// An estimate of how much memory is available for starting // An estimate of how much memory is available for starting
// new applications, without swapping. Calculated from // new applications, without swapping. Calculated from
// MemFree, SReclaimable, the size of the file LRU lists, and // MemFree, SReclaimable, the size of the file LRU lists, and
@ -39,59 +39,59 @@ type Meminfo struct {
// well, and that not all reclaimable slab will be // well, and that not all reclaimable slab will be
// reclaimable, due to items being in use. The impact of those // reclaimable, due to items being in use. The impact of those
// factors will vary from system to system. // factors will vary from system to system.
MemAvailable uint64 MemAvailable *uint64
// Relatively temporary storage for raw disk blocks shouldn't // Relatively temporary storage for raw disk blocks shouldn't
// get tremendously large (20MB or so) // get tremendously large (20MB or so)
Buffers uint64 Buffers *uint64
Cached uint64 Cached *uint64
// Memory that once was swapped out, is swapped back in but // Memory that once was swapped out, is swapped back in but
// still also is in the swapfile (if memory is needed it // still also is in the swapfile (if memory is needed it
// doesn't need to be swapped out AGAIN because it is already // doesn't need to be swapped out AGAIN because it is already
// in the swapfile. This saves I/O) // in the swapfile. This saves I/O)
SwapCached uint64 SwapCached *uint64
// Memory that has been used more recently and usually not // Memory that has been used more recently and usually not
// reclaimed unless absolutely necessary. // reclaimed unless absolutely necessary.
Active uint64 Active *uint64
// Memory which has been less recently used. It is more // Memory which has been less recently used. It is more
// eligible to be reclaimed for other purposes // eligible to be reclaimed for other purposes
Inactive uint64 Inactive *uint64
ActiveAnon uint64 ActiveAnon *uint64
InactiveAnon uint64 InactiveAnon *uint64
ActiveFile uint64 ActiveFile *uint64
InactiveFile uint64 InactiveFile *uint64
Unevictable uint64 Unevictable *uint64
Mlocked uint64 Mlocked *uint64
// total amount of swap space available // total amount of swap space available
SwapTotal uint64 SwapTotal *uint64
// Memory which has been evicted from RAM, and is temporarily // Memory which has been evicted from RAM, and is temporarily
// on the disk // on the disk
SwapFree uint64 SwapFree *uint64
// Memory which is waiting to get written back to the disk // Memory which is waiting to get written back to the disk
Dirty uint64 Dirty *uint64
// Memory which is actively being written back to the disk // Memory which is actively being written back to the disk
Writeback uint64 Writeback *uint64
// Non-file backed pages mapped into userspace page tables // Non-file backed pages mapped into userspace page tables
AnonPages uint64 AnonPages *uint64
// files which have been mapped, such as libraries // files which have been mapped, such as libraries
Mapped uint64 Mapped *uint64
Shmem uint64 Shmem *uint64
// in-kernel data structures cache // in-kernel data structures cache
Slab uint64 Slab *uint64
// Part of Slab, that might be reclaimed, such as caches // Part of Slab, that might be reclaimed, such as caches
SReclaimable uint64 SReclaimable *uint64
// Part of Slab, that cannot be reclaimed on memory pressure // Part of Slab, that cannot be reclaimed on memory pressure
SUnreclaim uint64 SUnreclaim *uint64
KernelStack uint64 KernelStack *uint64
// amount of memory dedicated to the lowest level of page // amount of memory dedicated to the lowest level of page
// tables. // tables.
PageTables uint64 PageTables *uint64
// NFS pages sent to the server, but not yet committed to // NFS pages sent to the server, but not yet committed to
// stable storage // stable storage
NFSUnstable uint64 NFSUnstable *uint64
// Memory used for block device "bounce buffers" // Memory used for block device "bounce buffers"
Bounce uint64 Bounce *uint64
// Memory used by FUSE for temporary writeback buffers // Memory used by FUSE for temporary writeback buffers
WritebackTmp uint64 WritebackTmp *uint64
// Based on the overcommit ratio ('vm.overcommit_ratio'), // Based on the overcommit ratio ('vm.overcommit_ratio'),
// this is the total amount of memory currently available to // this is the total amount of memory currently available to
// be allocated on the system. This limit is only adhered to // be allocated on the system. This limit is only adhered to
@ -105,7 +105,7 @@ type Meminfo struct {
// yield a CommitLimit of 7.3G. // yield a CommitLimit of 7.3G.
// For more details, see the memory overcommit documentation // For more details, see the memory overcommit documentation
// in vm/overcommit-accounting. // in vm/overcommit-accounting.
CommitLimit uint64 CommitLimit *uint64
// The amount of memory presently allocated on the system. // The amount of memory presently allocated on the system.
// The committed memory is a sum of all of the memory which // The committed memory is a sum of all of the memory which
// has been allocated by processes, even if it has not been // has been allocated by processes, even if it has not been
@ -119,27 +119,27 @@ type Meminfo struct {
// This is useful if one needs to guarantee that processes will // This is useful if one needs to guarantee that processes will
// not fail due to lack of memory once that memory has been // not fail due to lack of memory once that memory has been
// successfully allocated. // successfully allocated.
CommittedAS uint64 CommittedAS *uint64
// total size of vmalloc memory area // total size of vmalloc memory area
VmallocTotal uint64 VmallocTotal *uint64
// amount of vmalloc area which is used // amount of vmalloc area which is used
VmallocUsed uint64 VmallocUsed *uint64
// largest contiguous block of vmalloc area which is free // largest contiguous block of vmalloc area which is free
VmallocChunk uint64 VmallocChunk *uint64
HardwareCorrupted uint64 HardwareCorrupted *uint64
AnonHugePages uint64 AnonHugePages *uint64
ShmemHugePages uint64 ShmemHugePages *uint64
ShmemPmdMapped uint64 ShmemPmdMapped *uint64
CmaTotal uint64 CmaTotal *uint64
CmaFree uint64 CmaFree *uint64
HugePagesTotal uint64 HugePagesTotal *uint64
HugePagesFree uint64 HugePagesFree *uint64
HugePagesRsvd uint64 HugePagesRsvd *uint64
HugePagesSurp uint64 HugePagesSurp *uint64
Hugepagesize uint64 Hugepagesize *uint64
DirectMap4k uint64 DirectMap4k *uint64
DirectMap2M uint64 DirectMap2M *uint64
DirectMap1G uint64 DirectMap1G *uint64
} }
// Meminfo returns an information about current kernel/system memory statistics. // Meminfo returns an information about current kernel/system memory statistics.
@ -152,7 +152,7 @@ func (fs FS) Meminfo() (Meminfo, error) {
m, err := parseMemInfo(bytes.NewReader(b)) m, err := parseMemInfo(bytes.NewReader(b))
if err != nil { if err != nil {
return Meminfo{}, fmt.Errorf("failed to parse meminfo: %v", err) return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err)
} }
return *m, nil return *m, nil
@ -175,101 +175,101 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
switch fields[0] { switch fields[0] {
case "MemTotal:": case "MemTotal:":
m.MemTotal = v m.MemTotal = &v
case "MemFree:": case "MemFree:":
m.MemFree = v m.MemFree = &v
case "MemAvailable:": case "MemAvailable:":
m.MemAvailable = v m.MemAvailable = &v
case "Buffers:": case "Buffers:":
m.Buffers = v m.Buffers = &v
case "Cached:": case "Cached:":
m.Cached = v m.Cached = &v
case "SwapCached:": case "SwapCached:":
m.SwapCached = v m.SwapCached = &v
case "Active:": case "Active:":
m.Active = v m.Active = &v
case "Inactive:": case "Inactive:":
m.Inactive = v m.Inactive = &v
case "Active(anon):": case "Active(anon):":
m.ActiveAnon = v m.ActiveAnon = &v
case "Inactive(anon):": case "Inactive(anon):":
m.InactiveAnon = v m.InactiveAnon = &v
case "Active(file):": case "Active(file):":
m.ActiveFile = v m.ActiveFile = &v
case "Inactive(file):": case "Inactive(file):":
m.InactiveFile = v m.InactiveFile = &v
case "Unevictable:": case "Unevictable:":
m.Unevictable = v m.Unevictable = &v
case "Mlocked:": case "Mlocked:":
m.Mlocked = v m.Mlocked = &v
case "SwapTotal:": case "SwapTotal:":
m.SwapTotal = v m.SwapTotal = &v
case "SwapFree:": case "SwapFree:":
m.SwapFree = v m.SwapFree = &v
case "Dirty:": case "Dirty:":
m.Dirty = v m.Dirty = &v
case "Writeback:": case "Writeback:":
m.Writeback = v m.Writeback = &v
case "AnonPages:": case "AnonPages:":
m.AnonPages = v m.AnonPages = &v
case "Mapped:": case "Mapped:":
m.Mapped = v m.Mapped = &v
case "Shmem:": case "Shmem:":
m.Shmem = v m.Shmem = &v
case "Slab:": case "Slab:":
m.Slab = v m.Slab = &v
case "SReclaimable:": case "SReclaimable:":
m.SReclaimable = v m.SReclaimable = &v
case "SUnreclaim:": case "SUnreclaim:":
m.SUnreclaim = v m.SUnreclaim = &v
case "KernelStack:": case "KernelStack:":
m.KernelStack = v m.KernelStack = &v
case "PageTables:": case "PageTables:":
m.PageTables = v m.PageTables = &v
case "NFS_Unstable:": case "NFS_Unstable:":
m.NFSUnstable = v m.NFSUnstable = &v
case "Bounce:": case "Bounce:":
m.Bounce = v m.Bounce = &v
case "WritebackTmp:": case "WritebackTmp:":
m.WritebackTmp = v m.WritebackTmp = &v
case "CommitLimit:": case "CommitLimit:":
m.CommitLimit = v m.CommitLimit = &v
case "Committed_AS:": case "Committed_AS:":
m.CommittedAS = v m.CommittedAS = &v
case "VmallocTotal:": case "VmallocTotal:":
m.VmallocTotal = v m.VmallocTotal = &v
case "VmallocUsed:": case "VmallocUsed:":
m.VmallocUsed = v m.VmallocUsed = &v
case "VmallocChunk:": case "VmallocChunk:":
m.VmallocChunk = v m.VmallocChunk = &v
case "HardwareCorrupted:": case "HardwareCorrupted:":
m.HardwareCorrupted = v m.HardwareCorrupted = &v
case "AnonHugePages:": case "AnonHugePages:":
m.AnonHugePages = v m.AnonHugePages = &v
case "ShmemHugePages:": case "ShmemHugePages:":
m.ShmemHugePages = v m.ShmemHugePages = &v
case "ShmemPmdMapped:": case "ShmemPmdMapped:":
m.ShmemPmdMapped = v m.ShmemPmdMapped = &v
case "CmaTotal:": case "CmaTotal:":
m.CmaTotal = v m.CmaTotal = &v
case "CmaFree:": case "CmaFree:":
m.CmaFree = v m.CmaFree = &v
case "HugePages_Total:": case "HugePages_Total:":
m.HugePagesTotal = v m.HugePagesTotal = &v
case "HugePages_Free:": case "HugePages_Free:":
m.HugePagesFree = v m.HugePagesFree = &v
case "HugePages_Rsvd:": case "HugePages_Rsvd:":
m.HugePagesRsvd = v m.HugePagesRsvd = &v
case "HugePages_Surp:": case "HugePages_Surp:":
m.HugePagesSurp = v m.HugePagesSurp = &v
case "Hugepagesize:": case "Hugepagesize:":
m.Hugepagesize = v m.Hugepagesize = &v
case "DirectMap4k:": case "DirectMap4k:":
m.DirectMap4k = v m.DirectMap4k = &v
case "DirectMap2M:": case "DirectMap2M:":
m.DirectMap2M = v m.DirectMap2M = &v
case "DirectMap1G:": case "DirectMap1G:":
m.DirectMap1G = v m.DirectMap1G = &v
} }
} }

View File

@ -77,7 +77,7 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
mountInfo := strings.Split(mountString, " ") mountInfo := strings.Split(mountString, " ")
mountInfoLength := len(mountInfo) mountInfoLength := len(mountInfo)
if mountInfoLength < 11 { if mountInfoLength < 10 {
return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString)
} }
@ -144,7 +144,7 @@ func mountOptionsParseOptionalFields(o []string) (map[string]string, error) {
return optionalFields, nil return optionalFields, nil
} }
// Parses the mount options, superblock options. // mountOptionsParser parses the mount options, superblock options.
func mountOptionsParser(mountOptions string) map[string]string { func mountOptionsParser(mountOptions string) map[string]string {
opts := make(map[string]string) opts := make(map[string]string)
options := strings.Split(mountOptions, ",") options := strings.Split(mountOptions, ",")
@ -161,7 +161,7 @@ func mountOptionsParser(mountOptions string) map[string]string {
return opts return opts
} }
// Retrieves mountinfo information from `/proc/self/mountinfo`. // GetMounts retrieves mountinfo information from `/proc/self/mountinfo`.
func GetMounts() ([]*MountInfo, error) { func GetMounts() ([]*MountInfo, error) {
data, err := util.ReadFileNoStat("/proc/self/mountinfo") data, err := util.ReadFileNoStat("/proc/self/mountinfo")
if err != nil { if err != nil {
@ -170,7 +170,7 @@ func GetMounts() ([]*MountInfo, error) {
return parseMountInfo(data) return parseMountInfo(data)
} }
// Retrieves mountinfo information from a processes' `/proc/<pid>/mountinfo`. // GetProcMounts retrieves mountinfo information from a processes' `/proc/<pid>/mountinfo`.
func GetProcMounts(pid int) ([]*MountInfo, error) { func GetProcMounts(pid int) ([]*MountInfo, error) {
data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid)) data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid))
if err != nil { if err != nil {

View File

@ -186,6 +186,8 @@ type NFSOperationStats struct {
CumulativeTotalResponseMilliseconds uint64 CumulativeTotalResponseMilliseconds uint64
// Duration from when a request was enqueued to when it was completely handled. // Duration from when a request was enqueued to when it was completely handled.
CumulativeTotalRequestMilliseconds uint64 CumulativeTotalRequestMilliseconds uint64
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
Errors uint64
} }
// A NFSTransportStats contains statistics for the NFS mount RPC requests and // A NFSTransportStats contains statistics for the NFS mount RPC requests and
@ -336,12 +338,12 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
if len(ss) == 0 { if len(ss) == 0 {
break break
} }
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
switch ss[0] { switch ss[0] {
case fieldOpts: case fieldOpts:
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
if stats.Opts == nil { if stats.Opts == nil {
stats.Opts = map[string]string{} stats.Opts = map[string]string{}
} }
@ -354,6 +356,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
} }
} }
case fieldAge: case fieldAge:
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
// Age integer is in seconds // Age integer is in seconds
d, err := time.ParseDuration(ss[1] + "s") d, err := time.ParseDuration(ss[1] + "s")
if err != nil { if err != nil {
@ -362,6 +367,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Age = d stats.Age = d
case fieldBytes: case fieldBytes:
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
bstats, err := parseNFSBytesStats(ss[1:]) bstats, err := parseNFSBytesStats(ss[1:])
if err != nil { if err != nil {
return nil, err return nil, err
@ -369,6 +377,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Bytes = *bstats stats.Bytes = *bstats
case fieldEvents: case fieldEvents:
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
estats, err := parseNFSEventsStats(ss[1:]) estats, err := parseNFSEventsStats(ss[1:])
if err != nil { if err != nil {
return nil, err return nil, err
@ -494,8 +505,8 @@ func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
// line is reached. // line is reached.
func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
const ( const (
// Number of expected fields in each per-operation statistics set // Minimum number of expected fields in each per-operation statistics set
numFields = 9 minFields = 9
) )
var ops []NFSOperationStats var ops []NFSOperationStats
@ -508,12 +519,12 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
break break
} }
if len(ss) != numFields { if len(ss) < minFields {
return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
} }
// Skip string operation name for integers // Skip string operation name for integers
ns := make([]uint64, 0, numFields-1) ns := make([]uint64, 0, minFields-1)
for _, st := range ss[1:] { for _, st := range ss[1:] {
n, err := strconv.ParseUint(st, 10, 64) n, err := strconv.ParseUint(st, 10, 64)
if err != nil { if err != nil {
@ -523,7 +534,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
ns = append(ns, n) ns = append(ns, n)
} }
ops = append(ops, NFSOperationStats{ opStats := NFSOperationStats{
Operation: strings.TrimSuffix(ss[0], ":"), Operation: strings.TrimSuffix(ss[0], ":"),
Requests: ns[0], Requests: ns[0],
Transmissions: ns[1], Transmissions: ns[1],
@ -533,7 +544,13 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
CumulativeQueueMilliseconds: ns[5], CumulativeQueueMilliseconds: ns[5],
CumulativeTotalResponseMilliseconds: ns[6], CumulativeTotalResponseMilliseconds: ns[6],
CumulativeTotalRequestMilliseconds: ns[7], CumulativeTotalRequestMilliseconds: ns[7],
}) }
if len(ns) > 8 {
opStats.Errors = ns[8]
}
ops = append(ops, opStats)
} }
return ops, s.Err() return ops, s.Err()

View File

@ -38,7 +38,7 @@ type ConntrackStatEntry struct {
SearchRestart uint64 SearchRestart uint64
} }
// Retrieves netfilter's conntrack statistics, split by CPU cores // ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores
func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
} }
@ -55,7 +55,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
stat, err := parseConntrackStat(bytes.NewReader(b)) stat, err := parseConntrackStat(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read conntrack stats from %q: %v", path, err) return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err)
} }
return stat, nil return stat, nil
@ -147,7 +147,7 @@ func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
func parseConntrackStatField(field string) (uint64, error) { func parseConntrackStatField(field string) (uint64, error) {
val, err := strconv.ParseUint(field, 16, 64) val, err := strconv.ParseUint(field, 16, 64)
if err != nil { if err != nil {
return 0, fmt.Errorf("couldn't parse \"%s\" field: %s", field, err) return 0, fmt.Errorf("couldn't parse %q field: %w", field, err)
} }
return val, err return val, err
} }

226
vendor/github.com/prometheus/procfs/net_ip_socket.go generated vendored Normal file
View File

@ -0,0 +1,226 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"encoding/hex"
"fmt"
"io"
"net"
"os"
"strconv"
"strings"
)
const (
// readLimit is used by io.LimitReader while reading the content of the
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
// as each line represents a single used socket.
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
// With e.g. 150 Byte per line and the maximum number of 65535,
// the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
readLimit = 4294967296 // Byte -> 4 GiB
)
// this contains generic data structures for both udp and tcp sockets
type (
// NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header.
NetIPSocket []*netIPSocketLine
// NetIPSocketSummary provides already computed values like the total queue lengths or
// the total number of used sockets. In contrast to NetIPSocket it does not collect
// the parsed lines into a slice.
NetIPSocketSummary struct {
// TxQueueLength shows the total queue length of all parsed tx_queue lengths.
TxQueueLength uint64
// RxQueueLength shows the total queue length of all parsed rx_queue lengths.
RxQueueLength uint64
// UsedSockets shows the total number of parsed lines representing the
// number of used sockets.
UsedSockets uint64
}
// netIPSocketLine represents the fields parsed from a single line
// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
// For the proc file format details, see https://linux.die.net/man/5/proc.
netIPSocketLine struct {
Sl uint64
LocalAddr net.IP
LocalPort uint64
RemAddr net.IP
RemPort uint64
St uint64
TxQueue uint64
RxQueue uint64
UID uint64
Inode uint64
}
)
func newNetIPSocket(file string) (NetIPSocket, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
var netIPSocket NetIPSocket
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetIPSocketLine(fields)
if err != nil {
return nil, err
}
netIPSocket = append(netIPSocket, line)
}
if err := s.Err(); err != nil {
return nil, err
}
return netIPSocket, nil
}
// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file.
func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
var netIPSocketSummary NetIPSocketSummary
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetIPSocketLine(fields)
if err != nil {
return nil, err
}
netIPSocketSummary.TxQueueLength += line.TxQueue
netIPSocketSummary.RxQueueLength += line.RxQueue
netIPSocketSummary.UsedSockets++
}
if err := s.Err(); err != nil {
return nil, err
}
return &netIPSocketSummary, nil
}
// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order.
func parseIP(hexIP string) (net.IP, error) {
var byteIP []byte
byteIP, err := hex.DecodeString(hexIP)
if err != nil {
return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP)
}
switch len(byteIP) {
case 4:
return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil
case 16:
i := net.IP{
byteIP[3], byteIP[2], byteIP[1], byteIP[0],
byteIP[7], byteIP[6], byteIP[5], byteIP[4],
byteIP[11], byteIP[10], byteIP[9], byteIP[8],
byteIP[15], byteIP[14], byteIP[13], byteIP[12],
}
return i, nil
default:
return nil, fmt.Errorf("Unable to parse IP %s", hexIP)
}
}
// parseNetIPSocketLine parses a single line, represented by a list of fields.
func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
line := &netIPSocketLine{}
if len(fields) < 10 {
return nil, fmt.Errorf(
"cannot parse net socket line as it has less then 10 columns %q",
strings.Join(fields, " "),
)
}
var err error // parse error
// sl
s := strings.Split(fields[0], ":")
if len(s) != 2 {
return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0])
}
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err)
}
// local_address
l := strings.Split(fields[1], ":")
if len(l) != 2 {
return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1])
}
if line.LocalAddr, err = parseIP(l[0]); err != nil {
return nil, err
}
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err)
}
// remote_address
r := strings.Split(fields[2], ":")
if len(r) != 2 {
return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1])
}
if line.RemAddr, err = parseIP(r[0]); err != nil {
return nil, err
}
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err)
}
// st
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse st value in socket line: %w", err)
}
// tx_queue and rx_queue
q := strings.Split(fields[4], ":")
if len(q) != 2 {
return nil, fmt.Errorf(
"cannot parse tx/rx queues in socket line as it has a missing colon %q",
fields[4],
)
}
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err)
}
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err)
}
// uid
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err)
}
// inode
if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse inode value in socket line: %w", err)
}
return line, nil
}

180
vendor/github.com/prometheus/procfs/net_protocols.go generated vendored Normal file
View File

@ -0,0 +1,180 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// NetProtocolStats stores the contents from /proc/net/protocols
type NetProtocolStats map[string]NetProtocolStatLine
// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We
// only care about the first six columns as the rest are not likely to change
// and only serve to provide a set of capabilities for each protocol.
type NetProtocolStatLine struct {
Name string // 0 The name of the protocol
Size uint64 // 1 The size, in bytes, of a given protocol structure. e.g. sizeof(struct tcp_sock) or sizeof(struct unix_sock)
Sockets int64 // 2 Number of sockets in use by this protocol
Memory int64 // 3 Number of 4KB pages allocated by all sockets of this protocol
Pressure int // 4 This is either yes, no, or NI (not implemented). For the sake of simplicity we treat NI as not experiencing memory pressure.
MaxHeader uint64 // 5 Protocol specific max header size
Slab bool // 6 Indicates whether or not memory is allocated from the SLAB
ModuleName string // 7 The name of the module that implemented this protocol or "kernel" if not from a module
Capabilities NetProtocolCapabilities
}
// NetProtocolCapabilities contains a list of capabilities for each protocol
type NetProtocolCapabilities struct {
Close bool // 8
Connect bool // 9
Disconnect bool // 10
Accept bool // 11
IoCtl bool // 12
Init bool // 13
Destroy bool // 14
Shutdown bool // 15
SetSockOpt bool // 16
GetSockOpt bool // 17
SendMsg bool // 18
RecvMsg bool // 19
SendPage bool // 20
Bind bool // 21
BacklogRcv bool // 22
Hash bool // 23
UnHash bool // 24
GetPort bool // 25
EnterMemoryPressure bool // 26
}
// NetProtocols reads stats from /proc/net/protocols and returns a map of
// PortocolStatLine entries. As of this writing no official Linux Documentation
// exists, however the source is fairly self-explanatory and the format seems
// stable since its introduction in 2.6.12-rc2
// Linux 2.6.12-rc2 - https://elixir.bootlin.com/linux/v2.6.12-rc2/source/net/core/sock.c#L1452
// Linux 5.10 - https://elixir.bootlin.com/linux/v5.10.4/source/net/core/sock.c#L3586
func (fs FS) NetProtocols() (NetProtocolStats, error) {
data, err := util.ReadFileNoStat(fs.proc.Path("net/protocols"))
if err != nil {
return NetProtocolStats{}, err
}
return parseNetProtocols(bufio.NewScanner(bytes.NewReader(data)))
}
func parseNetProtocols(s *bufio.Scanner) (NetProtocolStats, error) {
nps := NetProtocolStats{}
// Skip the header line
s.Scan()
for s.Scan() {
line, err := nps.parseLine(s.Text())
if err != nil {
return NetProtocolStats{}, err
}
nps[line.Name] = *line
}
return nps, nil
}
func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, error) {
line := &NetProtocolStatLine{Capabilities: NetProtocolCapabilities{}}
var err error
const enabled = "yes"
const disabled = "no"
fields := strings.Fields(rawLine)
line.Name = fields[0]
line.Size, err = strconv.ParseUint(fields[1], 10, 64)
if err != nil {
return nil, err
}
line.Sockets, err = strconv.ParseInt(fields[2], 10, 64)
if err != nil {
return nil, err
}
line.Memory, err = strconv.ParseInt(fields[3], 10, 64)
if err != nil {
return nil, err
}
if fields[4] == enabled {
line.Pressure = 1
} else if fields[4] == disabled {
line.Pressure = 0
} else {
line.Pressure = -1
}
line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
if err != nil {
return nil, err
}
if fields[6] == enabled {
line.Slab = true
} else if fields[6] == disabled {
line.Slab = false
} else {
return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name)
}
line.ModuleName = fields[7]
err = line.Capabilities.parseCapabilities(fields[8:])
if err != nil {
return nil, err
}
return line, nil
}
func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) error {
// The capabilities are all bools so we can loop over to map them
capabilityFields := [...]*bool{
&pc.Close,
&pc.Connect,
&pc.Disconnect,
&pc.Accept,
&pc.IoCtl,
&pc.Init,
&pc.Destroy,
&pc.Shutdown,
&pc.SetSockOpt,
&pc.GetSockOpt,
&pc.SendMsg,
&pc.RecvMsg,
&pc.SendPage,
&pc.Bind,
&pc.BacklogRcv,
&pc.Hash,
&pc.UnHash,
&pc.GetPort,
&pc.EnterMemoryPressure,
}
for i := 0; i < len(capabilities); i++ {
if capabilities[i] == "y" {
*capabilityFields[i] = true
} else if capabilities[i] == "n" {
*capabilityFields[i] = false
} else {
return fmt.Errorf("unable to parse capability block for protocol: position %d", i)
}
}
return nil
}

View File

@ -70,7 +70,7 @@ func readSockstat(name string) (*NetSockstat, error) {
stat, err := parseSockstat(bytes.NewReader(b)) stat, err := parseSockstat(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read sockstats from %q: %v", name, err) return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err)
} }
return stat, nil return stat, nil
@ -90,7 +90,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
// The remaining fields are key/value pairs. // The remaining fields are key/value pairs.
kvs, err := parseSockstatKVs(fields[1:]) kvs, err := parseSockstatKVs(fields[1:])
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %v", s.Text(), err) return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err)
} }
// The first field is the protocol. We must trim its colon suffix. // The first field is the protocol. We must trim its colon suffix.

View File

@ -51,7 +51,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
entries, err := parseSoftnet(bytes.NewReader(b)) entries, err := parseSoftnet(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %v", err) return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err)
} }
return entries, nil return entries, nil

64
vendor/github.com/prometheus/procfs/net_tcp.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
type (
// NetTCP represents the contents of /proc/net/tcp{,6} file without the header.
NetTCP []*netIPSocketLine
// NetTCPSummary provides already computed values like the total queue lengths or
// the total number of used sockets. In contrast to NetTCP it does not collect
// the parsed lines into a slice.
NetTCPSummary NetIPSocketSummary
)
// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
// read from /proc/net/tcp.
func (fs FS) NetTCP() (NetTCP, error) {
return newNetTCP(fs.proc.Path("net/tcp"))
}
// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
// read from /proc/net/tcp6.
func (fs FS) NetTCP6() (NetTCP, error) {
return newNetTCP(fs.proc.Path("net/tcp6"))
}
// NetTCPSummary returns already computed statistics like the total queue lengths
// for TCP datagrams read from /proc/net/tcp.
func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
return newNetTCPSummary(fs.proc.Path("net/tcp"))
}
// NetTCP6Summary returns already computed statistics like the total queue lengths
// for TCP datagrams read from /proc/net/tcp6.
func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
return newNetTCPSummary(fs.proc.Path("net/tcp6"))
}
// newNetTCP creates a new NetTCP{,6} from the contents of the given file.
func newNetTCP(file string) (NetTCP, error) {
n, err := newNetIPSocket(file)
n1 := NetTCP(n)
return n1, err
}
func newNetTCPSummary(file string) (*NetTCPSummary, error) {
n, err := newNetIPSocketSummary(file)
if n == nil {
return nil, err
}
n1 := NetTCPSummary(*n)
return &n1, err
}

View File

@ -13,58 +13,14 @@
package procfs package procfs
import (
"bufio"
"encoding/hex"
"fmt"
"io"
"net"
"os"
"strconv"
"strings"
)
const (
// readLimit is used by io.LimitReader while reading the content of the
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
// as each line represents a single used socket.
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
// With e.g. 150 Byte per line and the maximum number of 65535,
// the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
readLimit = 4294967296 // Byte -> 4 GiB
)
type ( type (
// NetUDP represents the contents of /proc/net/udp{,6} file without the header. // NetUDP represents the contents of /proc/net/udp{,6} file without the header.
NetUDP []*netUDPLine NetUDP []*netIPSocketLine
// NetUDPSummary provides already computed values like the total queue lengths or // NetUDPSummary provides already computed values like the total queue lengths or
// the total number of used sockets. In contrast to NetUDP it does not collect // the total number of used sockets. In contrast to NetUDP it does not collect
// the parsed lines into a slice. // the parsed lines into a slice.
NetUDPSummary struct { NetUDPSummary NetIPSocketSummary
// TxQueueLength shows the total queue length of all parsed tx_queue lengths.
TxQueueLength uint64
// RxQueueLength shows the total queue length of all parsed rx_queue lengths.
RxQueueLength uint64
// UsedSockets shows the total number of parsed lines representing the
// number of used sockets.
UsedSockets uint64
}
// netUDPLine represents the fields parsed from a single line
// in /proc/net/udp{,6}. Fields which are not used by UDP are skipped.
// For the proc file format details, see https://linux.die.net/man/5/proc.
netUDPLine struct {
Sl uint64
LocalAddr net.IP
LocalPort uint64
RemAddr net.IP
RemPort uint64
St uint64
TxQueue uint64
RxQueue uint64
UID uint64
}
) )
// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams // NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams
@ -93,137 +49,16 @@ func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) {
// newNetUDP creates a new NetUDP{,6} from the contents of the given file. // newNetUDP creates a new NetUDP{,6} from the contents of the given file.
func newNetUDP(file string) (NetUDP, error) { func newNetUDP(file string) (NetUDP, error) {
f, err := os.Open(file) n, err := newNetIPSocket(file)
if err != nil { n1 := NetUDP(n)
return nil, err return n1, err
}
defer f.Close()
netUDP := NetUDP{}
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetUDPLine(fields)
if err != nil {
return nil, err
}
netUDP = append(netUDP, line)
}
if err := s.Err(); err != nil {
return nil, err
}
return netUDP, nil
} }
// newNetUDPSummary creates a new NetUDP{,6} from the contents of the given file.
func newNetUDPSummary(file string) (*NetUDPSummary, error) { func newNetUDPSummary(file string) (*NetUDPSummary, error) {
f, err := os.Open(file) n, err := newNetIPSocketSummary(file)
if err != nil { if n == nil {
return nil, err return nil, err
} }
defer f.Close() n1 := NetUDPSummary(*n)
return &n1, err
netUDPSummary := &NetUDPSummary{}
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetUDPLine(fields)
if err != nil {
return nil, err
}
netUDPSummary.TxQueueLength += line.TxQueue
netUDPSummary.RxQueueLength += line.RxQueue
netUDPSummary.UsedSockets++
}
if err := s.Err(); err != nil {
return nil, err
}
return netUDPSummary, nil
}
// parseNetUDPLine parses a single line, represented by a list of fields.
func parseNetUDPLine(fields []string) (*netUDPLine, error) {
line := &netUDPLine{}
if len(fields) < 8 {
return nil, fmt.Errorf(
"cannot parse net udp socket line as it has less then 8 columns: %s",
strings.Join(fields, " "),
)
}
var err error // parse error
// sl
s := strings.Split(fields[0], ":")
if len(s) != 2 {
return nil, fmt.Errorf(
"cannot parse sl field in udp socket line: %s", fields[0])
}
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse sl value in udp socket line: %s", err)
}
// local_address
l := strings.Split(fields[1], ":")
if len(l) != 2 {
return nil, fmt.Errorf(
"cannot parse local_address field in udp socket line: %s", fields[1])
}
if line.LocalAddr, err = hex.DecodeString(l[0]); err != nil {
return nil, fmt.Errorf(
"cannot parse local_address value in udp socket line: %s", err)
}
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
return nil, fmt.Errorf(
"cannot parse local_address port value in udp socket line: %s", err)
}
// remote_address
r := strings.Split(fields[2], ":")
if len(r) != 2 {
return nil, fmt.Errorf(
"cannot parse rem_address field in udp socket line: %s", fields[1])
}
if line.RemAddr, err = hex.DecodeString(r[0]); err != nil {
return nil, fmt.Errorf(
"cannot parse rem_address value in udp socket line: %s", err)
}
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
return nil, fmt.Errorf(
"cannot parse rem_address port value in udp socket line: %s", err)
}
// st
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
return nil, fmt.Errorf(
"cannot parse st value in udp socket line: %s", err)
}
// tx_queue and rx_queue
q := strings.Split(fields[4], ":")
if len(q) != 2 {
return nil, fmt.Errorf(
"cannot parse tx/rx queues in udp socket line as it has a missing colon: %s",
fields[4],
)
}
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse tx_queue value in udp socket line: %s", err)
}
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse rx_queue value in udp socket line: %s", err)
}
// uid
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
return nil, fmt.Errorf(
"cannot parse uid value in udp socket line: %s", err)
}
return line, nil
} }

View File

@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
line := s.Text() line := s.Text()
item, err := nu.parseLine(line, hasInode, minFields) item, err := nu.parseLine(line, hasInode, minFields)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %v", line, err) return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err)
} }
nu.Rows = append(nu.Rows, item) nu.Rows = append(nu.Rows, item)
} }
if err := s.Err(); err != nil { if err := s.Err(); err != nil {
return nil, fmt.Errorf("failed to scan /proc/net/unix data: %v", err) return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err)
} }
return &nu, nil return &nu, nil
@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
users, err := u.parseUsers(fields[1]) users, err := u.parseUsers(fields[1])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse ref count(%s): %v", fields[1], err) return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err)
} }
flags, err := u.parseFlags(fields[3]) flags, err := u.parseFlags(fields[3])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse flags(%s): %v", fields[3], err) return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err)
} }
typ, err := u.parseType(fields[4]) typ, err := u.parseType(fields[4])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse type(%s): %v", fields[4], err) return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err)
} }
state, err := u.parseState(fields[5]) state, err := u.parseState(fields[5])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse state(%s): %v", fields[5], err) return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err)
} }
var inode uint64 var inode uint64
if hasInode { if hasInode {
inode, err = u.parseInode(fields[6]) inode, err = u.parseInode(fields[6])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse inode(%s): %v", fields[6], err) return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err)
} }
} }

68
vendor/github.com/prometheus/procfs/netstat.go generated vendored Normal file
View File

@ -0,0 +1,68 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"os"
"path/filepath"
"strconv"
"strings"
)
// NetStat contains statistics for all the counters from one file
type NetStat struct {
Filename string
Stats map[string][]uint64
}
// NetStat retrieves stats from /proc/net/stat/
func (fs FS) NetStat() ([]NetStat, error) {
statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*"))
if err != nil {
return nil, err
}
var netStatsTotal []NetStat
for _, filePath := range statFiles {
file, err := os.Open(filePath)
if err != nil {
return nil, err
}
netStatFile := NetStat{
Filename: filepath.Base(filePath),
Stats: make(map[string][]uint64),
}
scanner := bufio.NewScanner(file)
scanner.Scan()
// First string is always a header for stats
var headers []string
headers = append(headers, strings.Fields(scanner.Text())...)
// Other strings represent per-CPU counters
for scanner.Scan() {
for num, counter := range strings.Fields(scanner.Text()) {
value, err := strconv.ParseUint(counter, 16, 32)
if err != nil {
return nil, err
}
netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value)
}
}
netStatsTotal = append(netStatsTotal, netStatFile)
}
return netStatsTotal, nil
}

View File

@ -105,7 +105,7 @@ func (fs FS) AllProcs() (Procs, error) {
names, err := d.Readdirnames(-1) names, err := d.Readdirnames(-1)
if err != nil { if err != nil {
return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err)
} }
p := Procs{} p := Procs{}
@ -134,6 +134,27 @@ func (p Proc) CmdLine() ([]string, error) {
return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
} }
// Wchan returns the wchan (wait channel) of a process.
func (p Proc) Wchan() (string, error) {
f, err := os.Open(p.path("wchan"))
if err != nil {
return "", err
}
defer f.Close()
data, err := ioutil.ReadAll(f)
if err != nil {
return "", err
}
wchan := string(data)
if wchan == "" || wchan == "0" {
return "", nil
}
return wchan, nil
}
// Comm returns the command name of a process. // Comm returns the command name of a process.
func (p Proc) Comm() (string, error) { func (p Proc) Comm() (string, error) {
data, err := util.ReadFileNoStat(p.path("comm")) data, err := util.ReadFileNoStat(p.path("comm"))
@ -185,7 +206,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) {
for i, n := range names { for i, n := range names {
fd, err := strconv.ParseInt(n, 10, 32) fd, err := strconv.ParseInt(n, 10, 32)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not parse fd %s: %s", n, err) return nil, fmt.Errorf("could not parse fd %q: %w", n, err)
} }
fds[i] = uintptr(fd) fds[i] = uintptr(fd)
} }
@ -257,7 +278,7 @@ func (p Proc) fileDescriptors() ([]string, error) {
names, err := d.Readdirnames(-1) names, err := d.Readdirnames(-1)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) return nil, fmt.Errorf("could not read %q: %w", d.Name(), err)
} }
return names, nil return names, nil

98
vendor/github.com/prometheus/procfs/proc_cgroup.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID
// in this hierarchy
//
// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
type Cgroup struct {
// HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one
// hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number
HierarchyID int
// Controllers using this hierarchy of processes. Controllers are also known as subsystems. For
// Cgroups V2 this may be empty, as all active controllers use the same hierarchy
Controllers []string
// Path of this control group, relative to the mount point of the cgroupfs representing this specific
// hierarchy
Path string
}
// parseCgroupString parses each line of the /proc/[pid]/cgroup file
// Line format is hierarchyID:[controller1,controller2]:path
func parseCgroupString(cgroupStr string) (*Cgroup, error) {
var err error
fields := strings.SplitN(cgroupStr, ":", 3)
if len(fields) < 3 {
return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr)
}
cgroup := &Cgroup{
Path: fields[2],
Controllers: nil,
}
cgroup.HierarchyID, err = strconv.Atoi(fields[0])
if err != nil {
return nil, fmt.Errorf("failed to parse hierarchy ID")
}
if fields[1] != "" {
ssNames := strings.Split(fields[1], ",")
cgroup.Controllers = append(cgroup.Controllers, ssNames...)
}
return cgroup, nil
}
// parseCgroups reads each line of the /proc/[pid]/cgroup file
func parseCgroups(data []byte) ([]Cgroup, error) {
var cgroups []Cgroup
scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
mountString := scanner.Text()
parsedMounts, err := parseCgroupString(mountString)
if err != nil {
return nil, err
}
cgroups = append(cgroups, *parsedMounts)
}
err := scanner.Err()
return cgroups, err
}
// Cgroups reads from /proc/<pid>/cgroups and returns a []*Cgroup struct locating this PID in each process
// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
// so the len of the returned struct is equal to the number of active hierarchies on this system
func (p Proc) Cgroups() ([]Cgroup, error) {
data, err := util.ReadFileNoStat(p.path("cgroup"))
if err != nil {
return nil, err
}
return parseCgroups(data)
}

View File

@ -16,7 +16,7 @@ package procfs
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"errors" "fmt"
"regexp" "regexp"
"github.com/prometheus/procfs/internal/util" "github.com/prometheus/procfs/internal/util"
@ -41,7 +41,7 @@ type ProcFDInfo struct {
Flags string Flags string
// Mount point ID // Mount point ID
MntID string MntID string
// List of inotify lines (structed) in the fdinfo file (kernel 3.8+ only) // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only)
InotifyInfos []InotifyInfo InotifyInfos []InotifyInfo
} }
@ -112,7 +112,7 @@ func parseInotifyInfo(line string) (*InotifyInfo, error) {
} }
return i, nil return i, nil
} }
return nil, errors.New("invalid inode entry: " + line) return nil, fmt.Errorf("invalid inode entry: %q", line)
} }
// ProcFDInfos represents a list of ProcFDInfo structs. // ProcFDInfos represents a list of ProcFDInfo structs.

View File

@ -26,55 +26,55 @@ import (
// http://man7.org/linux/man-pages/man2/getrlimit.2.html. // http://man7.org/linux/man-pages/man2/getrlimit.2.html.
type ProcLimits struct { type ProcLimits struct {
// CPU time limit in seconds. // CPU time limit in seconds.
CPUTime int64 CPUTime uint64
// Maximum size of files that the process may create. // Maximum size of files that the process may create.
FileSize int64 FileSize uint64
// Maximum size of the process's data segment (initialized data, // Maximum size of the process's data segment (initialized data,
// uninitialized data, and heap). // uninitialized data, and heap).
DataSize int64 DataSize uint64
// Maximum size of the process stack in bytes. // Maximum size of the process stack in bytes.
StackSize int64 StackSize uint64
// Maximum size of a core file. // Maximum size of a core file.
CoreFileSize int64 CoreFileSize uint64
// Limit of the process's resident set in pages. // Limit of the process's resident set in pages.
ResidentSet int64 ResidentSet uint64
// Maximum number of processes that can be created for the real user ID of // Maximum number of processes that can be created for the real user ID of
// the calling process. // the calling process.
Processes int64 Processes uint64
// Value one greater than the maximum file descriptor number that can be // Value one greater than the maximum file descriptor number that can be
// opened by this process. // opened by this process.
OpenFiles int64 OpenFiles uint64
// Maximum number of bytes of memory that may be locked into RAM. // Maximum number of bytes of memory that may be locked into RAM.
LockedMemory int64 LockedMemory uint64
// Maximum size of the process's virtual memory address space in bytes. // Maximum size of the process's virtual memory address space in bytes.
AddressSpace int64 AddressSpace uint64
// Limit on the combined number of flock(2) locks and fcntl(2) leases that // Limit on the combined number of flock(2) locks and fcntl(2) leases that
// this process may establish. // this process may establish.
FileLocks int64 FileLocks uint64
// Limit of signals that may be queued for the real user ID of the calling // Limit of signals that may be queued for the real user ID of the calling
// process. // process.
PendingSignals int64 PendingSignals uint64
// Limit on the number of bytes that can be allocated for POSIX message // Limit on the number of bytes that can be allocated for POSIX message
// queues for the real user ID of the calling process. // queues for the real user ID of the calling process.
MsqqueueSize int64 MsqqueueSize uint64
// Limit of the nice priority set using setpriority(2) or nice(2). // Limit of the nice priority set using setpriority(2) or nice(2).
NicePriority int64 NicePriority uint64
// Limit of the real-time priority set using sched_setscheduler(2) or // Limit of the real-time priority set using sched_setscheduler(2) or
// sched_setparam(2). // sched_setparam(2).
RealtimePriority int64 RealtimePriority uint64
// Limit (in microseconds) on the amount of CPU time that a process // Limit (in microseconds) on the amount of CPU time that a process
// scheduled under a real-time scheduling policy may consume without making // scheduled under a real-time scheduling policy may consume without making
// a blocking system call. // a blocking system call.
RealtimeTimeout int64 RealtimeTimeout uint64
} }
const ( const (
limitsFields = 3 limitsFields = 4
limitsUnlimited = "unlimited" limitsUnlimited = "unlimited"
) )
var ( var (
limitsDelimiter = regexp.MustCompile(" +") limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`)
) )
// NewLimits returns the current soft limits of the process. // NewLimits returns the current soft limits of the process.
@ -96,46 +96,49 @@ func (p Proc) Limits() (ProcLimits, error) {
l = ProcLimits{} l = ProcLimits{}
s = bufio.NewScanner(f) s = bufio.NewScanner(f)
) )
s.Scan() // Skip limits header
for s.Scan() { for s.Scan() {
fields := limitsDelimiter.Split(s.Text(), limitsFields) //fields := limitsMatch.Split(s.Text(), limitsFields)
fields := limitsMatch.FindStringSubmatch(s.Text())
if len(fields) != limitsFields { if len(fields) != limitsFields {
return ProcLimits{}, fmt.Errorf( return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text())
"couldn't parse %s line %s", f.Name(), s.Text())
} }
switch fields[0] { switch fields[1] {
case "Max cpu time": case "Max cpu time":
l.CPUTime, err = parseInt(fields[1]) l.CPUTime, err = parseUint(fields[2])
case "Max file size": case "Max file size":
l.FileSize, err = parseInt(fields[1]) l.FileSize, err = parseUint(fields[2])
case "Max data size": case "Max data size":
l.DataSize, err = parseInt(fields[1]) l.DataSize, err = parseUint(fields[2])
case "Max stack size": case "Max stack size":
l.StackSize, err = parseInt(fields[1]) l.StackSize, err = parseUint(fields[2])
case "Max core file size": case "Max core file size":
l.CoreFileSize, err = parseInt(fields[1]) l.CoreFileSize, err = parseUint(fields[2])
case "Max resident set": case "Max resident set":
l.ResidentSet, err = parseInt(fields[1]) l.ResidentSet, err = parseUint(fields[2])
case "Max processes": case "Max processes":
l.Processes, err = parseInt(fields[1]) l.Processes, err = parseUint(fields[2])
case "Max open files": case "Max open files":
l.OpenFiles, err = parseInt(fields[1]) l.OpenFiles, err = parseUint(fields[2])
case "Max locked memory": case "Max locked memory":
l.LockedMemory, err = parseInt(fields[1]) l.LockedMemory, err = parseUint(fields[2])
case "Max address space": case "Max address space":
l.AddressSpace, err = parseInt(fields[1]) l.AddressSpace, err = parseUint(fields[2])
case "Max file locks": case "Max file locks":
l.FileLocks, err = parseInt(fields[1]) l.FileLocks, err = parseUint(fields[2])
case "Max pending signals": case "Max pending signals":
l.PendingSignals, err = parseInt(fields[1]) l.PendingSignals, err = parseUint(fields[2])
case "Max msgqueue size": case "Max msgqueue size":
l.MsqqueueSize, err = parseInt(fields[1]) l.MsqqueueSize, err = parseUint(fields[2])
case "Max nice priority": case "Max nice priority":
l.NicePriority, err = parseInt(fields[1]) l.NicePriority, err = parseUint(fields[2])
case "Max realtime priority": case "Max realtime priority":
l.RealtimePriority, err = parseInt(fields[1]) l.RealtimePriority, err = parseUint(fields[2])
case "Max realtime timeout": case "Max realtime timeout":
l.RealtimeTimeout, err = parseInt(fields[1]) l.RealtimeTimeout, err = parseUint(fields[2])
} }
if err != nil { if err != nil {
return ProcLimits{}, err return ProcLimits{}, err
@ -145,13 +148,13 @@ func (p Proc) Limits() (ProcLimits, error) {
return l, s.Err() return l, s.Err()
} }
func parseInt(s string) (int64, error) { func parseUint(s string) (uint64, error) {
if s == limitsUnlimited { if s == limitsUnlimited {
return -1, nil return 18446744073709551615, nil
} }
i, err := strconv.ParseInt(s, 10, 64) i, err := strconv.ParseUint(s, 10, 64)
if err != nil { if err != nil {
return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) return 0, fmt.Errorf("couldn't parse value %q: %w", s, err)
} }
return i, nil return i, nil
} }

View File

@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// +build !windows // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package procfs package procfs
@ -25,6 +25,7 @@ import (
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
// ProcMapPermissions contains permission settings read from /proc/[pid]/maps
type ProcMapPermissions struct { type ProcMapPermissions struct {
// mapping has the [R]ead flag set // mapping has the [R]ead flag set
Read bool Read bool

View File

@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
names, err := d.Readdirnames(-1) names, err := d.Readdirnames(-1)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) return nil, fmt.Errorf("failed to read contents of ns dir: %w", err)
} }
ns := make(Namespaces, len(names)) ns := make(Namespaces, len(names))
@ -52,13 +52,13 @@ func (p Proc) Namespaces() (Namespaces, error) {
fields := strings.SplitN(target, ":", 2) fields := strings.SplitN(target, ":", 2)
if len(fields) != 2 { if len(fields) != 2 {
return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target)
} }
typ := fields[0] typ := fields[0]
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err)
} }
ns[name] = Namespace{typ, uint32(inode)} ns[name] = Namespace{typ, uint32(inode)}

View File

@ -59,7 +59,7 @@ type PSIStats struct {
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
if err != nil { if err != nil {
return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource) return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err)
} }
return parsePSIStats(resource, bytes.NewReader(data)) return parsePSIStats(resource, bytes.NewReader(data))

165
vendor/github.com/prometheus/procfs/proc_smaps.go generated vendored Normal file
View File

@ -0,0 +1,165 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package procfs
import (
"bufio"
"errors"
"fmt"
"os"
"regexp"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
var (
// match the header line before each mapped zone in /proc/pid/smaps
procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
)
type ProcSMapsRollup struct {
// Amount of the mapping that is currently resident in RAM
Rss uint64
// Process's proportional share of this mapping
Pss uint64
// Size in bytes of clean shared pages
SharedClean uint64
// Size in bytes of dirty shared pages
SharedDirty uint64
// Size in bytes of clean private pages
PrivateClean uint64
// Size in bytes of dirty private pages
PrivateDirty uint64
// Amount of memory currently marked as referenced or accessed
Referenced uint64
// Amount of memory that does not belong to any file
Anonymous uint64
// Amount would-be-anonymous memory currently on swap
Swap uint64
// Process's proportional memory on swap
SwapPss uint64
}
// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the
// process.
//
// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will
// we read and summed.
func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) {
data, err := util.ReadFileNoStat(p.path("smaps_rollup"))
if err != nil && os.IsNotExist(err) {
return p.procSMapsRollupManual()
}
if err != nil {
return ProcSMapsRollup{}, err
}
lines := strings.Split(string(data), "\n")
smaps := ProcSMapsRollup{}
// skip first line which don't contains information we need
lines = lines[1:]
for _, line := range lines {
if line == "" {
continue
}
if err := smaps.parseLine(line); err != nil {
return ProcSMapsRollup{}, err
}
}
return smaps, nil
}
// Read /proc/pid/smaps and do the roll-up in Go code.
func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) {
file, err := os.Open(p.path("smaps"))
if err != nil {
return ProcSMapsRollup{}, err
}
defer file.Close()
smaps := ProcSMapsRollup{}
scan := bufio.NewScanner(file)
for scan.Scan() {
line := scan.Text()
if procSMapsHeaderLine.MatchString(line) {
continue
}
if err := smaps.parseLine(line); err != nil {
return ProcSMapsRollup{}, err
}
}
return smaps, nil
}
func (s *ProcSMapsRollup) parseLine(line string) error {
kv := strings.SplitN(line, ":", 2)
if len(kv) != 2 {
fmt.Println(line)
return errors.New("invalid net/dev line, missing colon")
}
k := kv[0]
if k == "VmFlags" {
return nil
}
v := strings.TrimSpace(kv[1])
v = strings.TrimRight(v, " kB")
vKBytes, err := strconv.ParseUint(v, 10, 64)
if err != nil {
return err
}
vBytes := vKBytes * 1024
s.addValue(k, v, vKBytes, vBytes)
return nil
}
func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) {
switch k {
case "Rss":
s.Rss += vUintBytes
case "Pss":
s.Pss += vUintBytes
case "Shared_Clean":
s.SharedClean += vUintBytes
case "Shared_Dirty":
s.SharedDirty += vUintBytes
case "Private_Clean":
s.PrivateClean += vUintBytes
case "Private_Dirty":
s.PrivateDirty += vUintBytes
case "Referenced":
s.Referenced += vUintBytes
case "Anonymous":
s.Anonymous += vUintBytes
case "Swap":
s.Swap += vUintBytes
case "SwapPss":
s.SwapPss += vUintBytes
}
}

View File

@ -100,6 +100,15 @@ type ProcStat struct {
VSize uint VSize uint
// Resident set size in pages. // Resident set size in pages.
RSS int RSS int
// Soft limit in bytes on the rss of the process.
RSSLimit uint64
// Real-time scheduling priority, a number in the range 1 to 99 for processes
// scheduled under a real-time policy, or 0, for non-real-time processes.
RTPriority uint
// Scheduling policy.
Policy uint
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
DelayAcctBlkIOTicks uint64
proc fs.FS proc fs.FS
} }
@ -119,7 +128,8 @@ func (p Proc) Stat() (ProcStat, error) {
} }
var ( var (
ignore int ignoreInt64 int64
ignoreUint64 uint64
s = ProcStat{PID: p.PID, proc: p.fs} s = ProcStat{PID: p.PID, proc: p.fs}
l = bytes.Index(data, []byte("(")) l = bytes.Index(data, []byte("("))
@ -127,10 +137,7 @@ func (p Proc) Stat() (ProcStat, error) {
) )
if l < 0 || r < 0 { if l < 0 || r < 0 {
return ProcStat{}, fmt.Errorf( return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data)
"unexpected format, couldn't extract comm: %s",
data,
)
} }
s.Comm = string(data[l+1 : r]) s.Comm = string(data[l+1 : r])
@ -154,10 +161,28 @@ func (p Proc) Stat() (ProcStat, error) {
&s.Priority, &s.Priority,
&s.Nice, &s.Nice,
&s.NumThreads, &s.NumThreads,
&ignore, &ignoreInt64,
&s.Starttime, &s.Starttime,
&s.VSize, &s.VSize,
&s.RSS, &s.RSS,
&s.RSSLimit,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreInt64,
&ignoreInt64,
&s.RTPriority,
&s.Policy,
&s.DelayAcctBlkIOTicks,
) )
if err != nil { if err != nil {
return ProcStat{}, err return ProcStat{}, err

View File

@ -72,8 +72,10 @@ type ProcStatus struct {
// Number of involuntary context switches. // Number of involuntary context switches.
NonVoluntaryCtxtSwitches uint64 NonVoluntaryCtxtSwitches uint64
// UIDs of the process (Real, effective, saved set, and filesystem UIDs (GIDs)) // UIDs of the process (Real, effective, saved set, and filesystem UIDs)
UIDs [4]string UIDs [4]string
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
GIDs [4]string
} }
// NewStatus returns the current status information of the process. // NewStatus returns the current status information of the process.
@ -119,6 +121,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
s.Name = vString s.Name = vString
case "Uid": case "Uid":
copy(s.UIDs[:], strings.Split(vString, "\t")) copy(s.UIDs[:], strings.Split(vString, "\t"))
case "Gid":
copy(s.GIDs[:], strings.Split(vString, "\t"))
case "VmPeak": case "VmPeak":
s.VmPeak = vUintBytes s.VmPeak = vUintBytes
case "VmSize": case "VmSize":

View File

@ -95,24 +95,27 @@ func (fs FS) Schedstat() (*Schedstat, error) {
return stats, nil return stats, nil
} }
func parseProcSchedstat(contents string) (stats ProcSchedstat, err error) { func parseProcSchedstat(contents string) (ProcSchedstat, error) {
var (
stats ProcSchedstat
err error
)
match := procLineRE.FindStringSubmatch(contents) match := procLineRE.FindStringSubmatch(contents)
if match != nil { if match != nil {
stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64) stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64)
if err != nil { if err != nil {
return return stats, err
} }
stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64) stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64)
if err != nil { if err != nil {
return return stats, err
} }
stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64) stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64)
return return stats, err
} }
err = errors.New("could not parse schedstat") return stats, errors.New("could not parse schedstat")
return
} }

151
vendor/github.com/prometheus/procfs/slab.go generated vendored Normal file
View File

@ -0,0 +1,151 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"regexp"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
var (
slabSpace = regexp.MustCompile(`\s+`)
slabVer = regexp.MustCompile(`slabinfo -`)
slabHeader = regexp.MustCompile(`# name`)
)
// Slab represents a slab pool in the kernel.
type Slab struct {
Name string
ObjActive int64
ObjNum int64
ObjSize int64
ObjPerSlab int64
PagesPerSlab int64
// tunables
Limit int64
Batch int64
SharedFactor int64
SlabActive int64
SlabNum int64
SharedAvail int64
}
// SlabInfo represents info for all slabs.
type SlabInfo struct {
Slabs []*Slab
}
func shouldParseSlab(line string) bool {
if slabVer.MatchString(line) {
return false
}
if slabHeader.MatchString(line) {
return false
}
return true
}
// parseV21SlabEntry is used to parse a line from /proc/slabinfo version 2.1.
func parseV21SlabEntry(line string) (*Slab, error) {
// First cleanup whitespace.
l := slabSpace.ReplaceAllString(line, " ")
s := strings.Split(l, " ")
if len(s) != 16 {
return nil, fmt.Errorf("unable to parse: %q", line)
}
var err error
i := &Slab{Name: s[0]}
i.ObjActive, err = strconv.ParseInt(s[1], 10, 64)
if err != nil {
return nil, err
}
i.ObjNum, err = strconv.ParseInt(s[2], 10, 64)
if err != nil {
return nil, err
}
i.ObjSize, err = strconv.ParseInt(s[3], 10, 64)
if err != nil {
return nil, err
}
i.ObjPerSlab, err = strconv.ParseInt(s[4], 10, 64)
if err != nil {
return nil, err
}
i.PagesPerSlab, err = strconv.ParseInt(s[5], 10, 64)
if err != nil {
return nil, err
}
i.Limit, err = strconv.ParseInt(s[8], 10, 64)
if err != nil {
return nil, err
}
i.Batch, err = strconv.ParseInt(s[9], 10, 64)
if err != nil {
return nil, err
}
i.SharedFactor, err = strconv.ParseInt(s[10], 10, 64)
if err != nil {
return nil, err
}
i.SlabActive, err = strconv.ParseInt(s[13], 10, 64)
if err != nil {
return nil, err
}
i.SlabNum, err = strconv.ParseInt(s[14], 10, 64)
if err != nil {
return nil, err
}
i.SharedAvail, err = strconv.ParseInt(s[15], 10, 64)
if err != nil {
return nil, err
}
return i, nil
}
// parseSlabInfo21 is used to parse a slabinfo 2.1 file.
func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) {
scanner := bufio.NewScanner(r)
s := SlabInfo{Slabs: []*Slab{}}
for scanner.Scan() {
line := scanner.Text()
if !shouldParseSlab(line) {
continue
}
slab, err := parseV21SlabEntry(line)
if err != nil {
return s, err
}
s.Slabs = append(s.Slabs, slab)
}
return s, nil
}
// SlabInfo reads data from /proc/slabinfo
func (fs FS) SlabInfo() (SlabInfo, error) {
// TODO: Consider passing options to allow for parsing different
// slabinfo versions. However, slabinfo 2.1 has been stable since
// kernel 2.6.10 and later.
data, err := util.ReadFileNoStat(fs.proc.Path("slabinfo"))
if err != nil {
return SlabInfo{}, err
}
return parseSlabInfo21(bytes.NewReader(data))
}

View File

@ -93,10 +93,10 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
&cpuStat.Guest, &cpuStat.GuestNice) &cpuStat.Guest, &cpuStat.GuestNice)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err)
} }
if count == 0 { if count == 0 {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line)
} }
cpuStat.User /= userHZ cpuStat.User /= userHZ
@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
if err != nil { if err != nil {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err)
} }
return cpuStat, cpuID, nil return cpuStat, cpuID, nil
@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
&softIRQStat.Hrtimer, &softIRQStat.Rcu) &softIRQStat.Hrtimer, &softIRQStat.Rcu)
if err != nil { if err != nil {
return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err)
} }
return softIRQStat, total, nil return softIRQStat, total, nil
@ -184,34 +184,34 @@ func (fs FS) Stat() (Stat, error) {
switch { switch {
case parts[0] == "btime": case parts[0] == "btime":
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err)
} }
case parts[0] == "intr": case parts[0] == "intr":
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err)
} }
numberedIRQs := parts[2:] numberedIRQs := parts[2:]
stat.IRQ = make([]uint64, len(numberedIRQs)) stat.IRQ = make([]uint64, len(numberedIRQs))
for i, count := range numberedIRQs { for i, count := range numberedIRQs {
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err)
} }
} }
case parts[0] == "ctxt": case parts[0] == "ctxt":
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err)
} }
case parts[0] == "processes": case parts[0] == "processes":
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err)
} }
case parts[0] == "procs_running": case parts[0] == "procs_running":
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err)
} }
case parts[0] == "procs_blocked": case parts[0] == "procs_blocked":
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err)
} }
case parts[0] == "softirq": case parts[0] == "softirq":
softIRQStats, total, err := parseSoftIRQStat(line) softIRQStats, total, err := parseSoftIRQStat(line)
@ -237,7 +237,7 @@ func (fs FS) Stat() (Stat, error) {
} }
if err := scanner.Err(); err != nil { if err := scanner.Err(); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s: %s", fileName, err) return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err)
} }
return stat, nil return stat, nil

View File

@ -112,8 +112,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) {
fields := strings.Fields(s.Text()) fields := strings.Fields(s.Text())
if len(fields) != 2 { if len(fields) != 2 {
return XfrmStat{}, fmt.Errorf( return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text())
"couldn't parse %s line %s", file.Name(), s.Text())
} }
name := fields[0] name := fields[0]

View File

@ -74,11 +74,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
func (fs FS) Zoneinfo() ([]Zoneinfo, error) { func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo")) data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo"))
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err) return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
} }
zoneinfo, err := parseZoneinfo(data) zoneinfo, err := parseZoneinfo(data)
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err) return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
} }
return zoneinfo, nil return zoneinfo, nil
} }
@ -99,7 +99,6 @@ func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) {
continue continue
} }
if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") { if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") {
zoneinfoElement.Zone = ""
continue continue
} }
parts := strings.Fields(strings.TrimSpace(line)) parts := strings.Fields(strings.TrimSpace(line))

3
vendor/modules.txt vendored
View File

@ -185,7 +185,7 @@ github.com/prometheus/client_model/go
github.com/prometheus/common/expfmt github.com/prometheus/common/expfmt
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
github.com/prometheus/common/model github.com/prometheus/common/model
# github.com/prometheus/procfs v0.7.3 => github.com/prometheus/procfs v0.0.11 # github.com/prometheus/procfs v0.7.3
github.com/prometheus/procfs github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util github.com/prometheus/procfs/internal/util
@ -364,4 +364,3 @@ gotest.tools/v3/skip
# github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220309172631-83b51522df43+incompatible # github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220309172631-83b51522df43+incompatible
# github.com/gogo/googleapis => github.com/gogo/googleapis v1.3.2 # github.com/gogo/googleapis => github.com/gogo/googleapis v1.3.2
# github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.6.0 # github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.6.0
# github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.11