mirror of https://github.com/docker/cli.git
vendor: github.com/docker/docker v20.10.3-0.20220309172631-83b51522df43
Changed `matcher.Matches(file)` to `matcher.MatchesOrParentMatches(file)`:
cli/command/image/build/context.go:95:9: SA1019: matcher.Matches is deprecated: This implementation is buggy (it only checks a single parent dir against the pattern) and will be removed soon. Use either MatchesOrParentMatches or MatchesUsingParentResults instead. (staticcheck)
return matcher.Matches(file)
^
And updated a test to match the JSON omitting empty RootFS.Type fields (in
practice, this field should never be empty in real situations, and always
be "layer"). Changed the test to use subtests to easier find which case
is failing.
full diff: 343665850e...83b51522df
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
parent
e90cb75152
commit
dc9e069ff2
|
@ -92,7 +92,7 @@ func filepathMatches(matcher *fileutils.PatternMatcher, file string) (bool, erro
|
||||||
// Don't let them exclude everything, kind of silly.
|
// Don't let them exclude everything, kind of silly.
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return matcher.Matches(file)
|
return matcher.MatchesOrParentMatches(file)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DetectArchiveReader detects whether the input stream is an archive or a
|
// DetectArchiveReader detects whether the input stream is an archive or a
|
||||||
|
|
|
@ -75,6 +75,8 @@ func TestNewInspectCommandSuccess(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
|
tc := tc
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
imageInspectInvocationCount = 0
|
imageInspectInvocationCount = 0
|
||||||
cli := test.NewFakeCli(&fakeClient{imageInspectFunc: tc.imageInspectFunc})
|
cli := test.NewFakeCli(&fakeClient{imageInspectFunc: tc.imageInspectFunc})
|
||||||
cmd := newInspectCommand(cli)
|
cmd := newInspectCommand(cli)
|
||||||
|
@ -84,5 +86,6 @@ func TestNewInspectCommandSuccess(t *testing.T) {
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("inspect-command-success.%s.golden", tc.name))
|
golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("inspect-command-success.%s.golden", tc.name))
|
||||||
assert.Check(t, is.Equal(imageInspectInvocationCount, tc.imageCount))
|
assert.Check(t, is.Equal(imageInspectInvocationCount, tc.imageCount))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,9 +19,7 @@
|
||||||
"Data": null,
|
"Data": null,
|
||||||
"Name": ""
|
"Name": ""
|
||||||
},
|
},
|
||||||
"RootFS": {
|
"RootFS": {},
|
||||||
"Type": ""
|
|
||||||
},
|
|
||||||
"Metadata": {
|
"Metadata": {
|
||||||
"LastTagTime": "0001-01-01T00:00:00Z"
|
"LastTagTime": "0001-01-01T00:00:00Z"
|
||||||
}
|
}
|
||||||
|
@ -46,9 +44,7 @@
|
||||||
"Data": null,
|
"Data": null,
|
||||||
"Name": ""
|
"Name": ""
|
||||||
},
|
},
|
||||||
"RootFS": {
|
"RootFS": {},
|
||||||
"Type": ""
|
|
||||||
},
|
|
||||||
"Metadata": {
|
"Metadata": {
|
||||||
"LastTagTime": "0001-01-01T00:00:00Z"
|
"LastTagTime": "0001-01-01T00:00:00Z"
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,9 +19,7 @@
|
||||||
"Data": null,
|
"Data": null,
|
||||||
"Name": ""
|
"Name": ""
|
||||||
},
|
},
|
||||||
"RootFS": {
|
"RootFS": {},
|
||||||
"Type": ""
|
|
||||||
},
|
|
||||||
"Metadata": {
|
"Metadata": {
|
||||||
"LastTagTime": "0001-01-01T00:00:00Z"
|
"LastTagTime": "0001-01-01T00:00:00Z"
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,7 @@ require (
|
||||||
github.com/coreos/etcd v3.3.27+incompatible // indirect
|
github.com/coreos/etcd v3.3.27+incompatible // indirect
|
||||||
github.com/creack/pty v1.1.11
|
github.com/creack/pty v1.1.11
|
||||||
github.com/docker/distribution v2.8.1+incompatible
|
github.com/docker/distribution v2.8.1+incompatible
|
||||||
github.com/docker/docker v20.10.7+incompatible
|
github.com/docker/docker v20.10.7+incompatible // see "replace" for the actual version
|
||||||
github.com/docker/docker-credential-helpers v0.6.4
|
github.com/docker/docker-credential-helpers v0.6.4
|
||||||
github.com/docker/go-connections v0.4.0
|
github.com/docker/go-connections v0.4.0
|
||||||
github.com/docker/go-units v0.4.0
|
github.com/docker/go-units v0.4.0
|
||||||
|
@ -23,9 +23,9 @@ require (
|
||||||
github.com/google/go-cmp v0.5.7
|
github.com/google/go-cmp v0.5.7
|
||||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||||
github.com/imdario/mergo v0.3.12
|
github.com/imdario/mergo v0.3.12
|
||||||
|
github.com/klauspost/compress v1.14.3 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.3.2
|
github.com/mitchellh/mapstructure v1.3.2
|
||||||
github.com/moby/buildkit v0.8.2-0.20210615162540-9f254e18360a // master (v0.9.0-dev)
|
github.com/moby/buildkit v0.8.2-0.20210615162540-9f254e18360a // master (v0.9.0-dev)
|
||||||
github.com/moby/sys/mount v0.3.0 // indirect
|
|
||||||
github.com/moby/sys/signal v0.7.0
|
github.com/moby/sys/signal v0.7.0
|
||||||
github.com/moby/sys/symlink v0.2.0 // indirect
|
github.com/moby/sys/symlink v0.2.0 // indirect
|
||||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6
|
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6
|
||||||
|
@ -49,7 +49,7 @@ require (
|
||||||
)
|
)
|
||||||
|
|
||||||
replace (
|
replace (
|
||||||
github.com/docker/docker => github.com/docker/docker v20.10.3-0.20210811141259-343665850e3a+incompatible // master (v21.xx-dev)
|
github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220309172631-83b51522df43+incompatible // master (v21.xx-dev)
|
||||||
github.com/gogo/googleapis => github.com/gogo/googleapis v1.3.2
|
github.com/gogo/googleapis => github.com/gogo/googleapis v1.3.2
|
||||||
github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.6.0
|
github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.6.0
|
||||||
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.11
|
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.11
|
||||||
|
|
10
vendor.sum
10
vendor.sum
|
@ -250,8 +250,8 @@ github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible
|
||||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
github.com/docker/docker v20.10.3-0.20210811141259-343665850e3a+incompatible h1:u4VL5McCDGrakMyJTrk6IRPk1A4NtmfJt07ARS3DXs0=
|
github.com/docker/docker v20.10.3-0.20220309172631-83b51522df43+incompatible h1:bL4hLpxukr5Ls3bzYrn3LCYIwML+XXCktZHaGBIN3og=
|
||||||
github.com/docker/docker v20.10.3-0.20210811141259-343665850e3a+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/docker v20.10.3-0.20220309172631-83b51522df43+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||||
github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o=
|
github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o=
|
||||||
github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
|
github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
|
||||||
|
@ -479,6 +479,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
||||||
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||||
|
github.com/klauspost/compress v1.14.3 h1:DQv1WP+iS4srNjibdnHtqu8JNWCDMluj5NzPnFJsnvk=
|
||||||
|
github.com/klauspost/compress v1.14.3/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
@ -531,12 +533,8 @@ github.com/moby/buildkit v0.8.2-0.20210615162540-9f254e18360a h1:1KdH8CRFygJ8oj8
|
||||||
github.com/moby/buildkit v0.8.2-0.20210615162540-9f254e18360a/go.mod h1:OieevFziOisPBM43fLKG+lPcVp9XW+BlUiws8VIpG6k=
|
github.com/moby/buildkit v0.8.2-0.20210615162540-9f254e18360a/go.mod h1:OieevFziOisPBM43fLKG+lPcVp9XW+BlUiws8VIpG6k=
|
||||||
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
||||||
github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM=
|
github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM=
|
||||||
github.com/moby/sys/mount v0.3.0 h1:bXZYMmq7DBQPwHRxH/MG+u9+XF90ZOwoXpHTOznMGp0=
|
|
||||||
github.com/moby/sys/mount v0.3.0/go.mod h1:U2Z3ur2rXPFrFmy4q6WMwWrBOAQGYtYTRVM8BIvzbwk=
|
|
||||||
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||||
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||||
github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI=
|
|
||||||
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
|
||||||
github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI=
|
github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI=
|
||||||
github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
|
github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
|
||||||
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
|
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package api // import "github.com/docker/docker/api"
|
package api // import "github.com/docker/docker/api"
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -13,19 +13,26 @@ import (
|
||||||
// CgroupnsMode represents the cgroup namespace mode of the container
|
// CgroupnsMode represents the cgroup namespace mode of the container
|
||||||
type CgroupnsMode string
|
type CgroupnsMode string
|
||||||
|
|
||||||
|
// cgroup namespace modes for containers
|
||||||
|
const (
|
||||||
|
CgroupnsModeEmpty CgroupnsMode = ""
|
||||||
|
CgroupnsModePrivate CgroupnsMode = "private"
|
||||||
|
CgroupnsModeHost CgroupnsMode = "host"
|
||||||
|
)
|
||||||
|
|
||||||
// IsPrivate indicates whether the container uses its own private cgroup namespace
|
// IsPrivate indicates whether the container uses its own private cgroup namespace
|
||||||
func (c CgroupnsMode) IsPrivate() bool {
|
func (c CgroupnsMode) IsPrivate() bool {
|
||||||
return c == "private"
|
return c == CgroupnsModePrivate
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsHost indicates whether the container shares the host's cgroup namespace
|
// IsHost indicates whether the container shares the host's cgroup namespace
|
||||||
func (c CgroupnsMode) IsHost() bool {
|
func (c CgroupnsMode) IsHost() bool {
|
||||||
return c == "host"
|
return c == CgroupnsModeHost
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsEmpty indicates whether the container cgroup namespace mode is unset
|
// IsEmpty indicates whether the container cgroup namespace mode is unset
|
||||||
func (c CgroupnsMode) IsEmpty() bool {
|
func (c CgroupnsMode) IsEmpty() bool {
|
||||||
return c == ""
|
return c == CgroupnsModeEmpty
|
||||||
}
|
}
|
||||||
|
|
||||||
// Valid indicates whether the cgroup namespace mode is valid
|
// Valid indicates whether the cgroup namespace mode is valid
|
||||||
|
@ -37,60 +44,69 @@ func (c CgroupnsMode) Valid() bool {
|
||||||
// values are platform specific
|
// values are platform specific
|
||||||
type Isolation string
|
type Isolation string
|
||||||
|
|
||||||
|
// Isolation modes for containers
|
||||||
|
const (
|
||||||
|
IsolationEmpty Isolation = "" // IsolationEmpty is unspecified (same behavior as default)
|
||||||
|
IsolationDefault Isolation = "default" // IsolationDefault is the default isolation mode on current daemon
|
||||||
|
IsolationProcess Isolation = "process" // IsolationProcess is process isolation mode
|
||||||
|
IsolationHyperV Isolation = "hyperv" // IsolationHyperV is HyperV isolation mode
|
||||||
|
)
|
||||||
|
|
||||||
// IsDefault indicates the default isolation technology of a container. On Linux this
|
// IsDefault indicates the default isolation technology of a container. On Linux this
|
||||||
// is the native driver. On Windows, this is a Windows Server Container.
|
// is the native driver. On Windows, this is a Windows Server Container.
|
||||||
func (i Isolation) IsDefault() bool {
|
func (i Isolation) IsDefault() bool {
|
||||||
return strings.ToLower(string(i)) == "default" || string(i) == ""
|
// TODO consider making isolation-mode strict (case-sensitive)
|
||||||
|
v := Isolation(strings.ToLower(string(i)))
|
||||||
|
return v == IsolationDefault || v == IsolationEmpty
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsHyperV indicates the use of a Hyper-V partition for isolation
|
// IsHyperV indicates the use of a Hyper-V partition for isolation
|
||||||
func (i Isolation) IsHyperV() bool {
|
func (i Isolation) IsHyperV() bool {
|
||||||
return strings.ToLower(string(i)) == "hyperv"
|
// TODO consider making isolation-mode strict (case-sensitive)
|
||||||
|
return Isolation(strings.ToLower(string(i))) == IsolationHyperV
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsProcess indicates the use of process isolation
|
// IsProcess indicates the use of process isolation
|
||||||
func (i Isolation) IsProcess() bool {
|
func (i Isolation) IsProcess() bool {
|
||||||
return strings.ToLower(string(i)) == "process"
|
// TODO consider making isolation-mode strict (case-sensitive)
|
||||||
|
return Isolation(strings.ToLower(string(i))) == IsolationProcess
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
// IsolationEmpty is unspecified (same behavior as default)
|
|
||||||
IsolationEmpty = Isolation("")
|
|
||||||
// IsolationDefault is the default isolation mode on current daemon
|
|
||||||
IsolationDefault = Isolation("default")
|
|
||||||
// IsolationProcess is process isolation mode
|
|
||||||
IsolationProcess = Isolation("process")
|
|
||||||
// IsolationHyperV is HyperV isolation mode
|
|
||||||
IsolationHyperV = Isolation("hyperv")
|
|
||||||
)
|
|
||||||
|
|
||||||
// IpcMode represents the container ipc stack.
|
// IpcMode represents the container ipc stack.
|
||||||
type IpcMode string
|
type IpcMode string
|
||||||
|
|
||||||
|
// IpcMode constants
|
||||||
|
const (
|
||||||
|
IPCModeNone IpcMode = "none"
|
||||||
|
IPCModeHost IpcMode = "host"
|
||||||
|
IPCModeContainer IpcMode = "container"
|
||||||
|
IPCModePrivate IpcMode = "private"
|
||||||
|
IPCModeShareable IpcMode = "shareable"
|
||||||
|
)
|
||||||
|
|
||||||
// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared.
|
// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared.
|
||||||
func (n IpcMode) IsPrivate() bool {
|
func (n IpcMode) IsPrivate() bool {
|
||||||
return n == "private"
|
return n == IPCModePrivate
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsHost indicates whether the container shares the host's ipc namespace.
|
// IsHost indicates whether the container shares the host's ipc namespace.
|
||||||
func (n IpcMode) IsHost() bool {
|
func (n IpcMode) IsHost() bool {
|
||||||
return n == "host"
|
return n == IPCModeHost
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsShareable indicates whether the container's ipc namespace can be shared with another container.
|
// IsShareable indicates whether the container's ipc namespace can be shared with another container.
|
||||||
func (n IpcMode) IsShareable() bool {
|
func (n IpcMode) IsShareable() bool {
|
||||||
return n == "shareable"
|
return n == IPCModeShareable
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsContainer indicates whether the container uses another container's ipc namespace.
|
// IsContainer indicates whether the container uses another container's ipc namespace.
|
||||||
func (n IpcMode) IsContainer() bool {
|
func (n IpcMode) IsContainer() bool {
|
||||||
parts := strings.SplitN(string(n), ":", 2)
|
return strings.HasPrefix(string(n), string(IPCModeContainer)+":")
|
||||||
return len(parts) > 1 && parts[0] == "container"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsNone indicates whether container IpcMode is set to "none".
|
// IsNone indicates whether container IpcMode is set to "none".
|
||||||
func (n IpcMode) IsNone() bool {
|
func (n IpcMode) IsNone() bool {
|
||||||
return n == "none"
|
return n == IPCModeNone
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsEmpty indicates whether container IpcMode is empty
|
// IsEmpty indicates whether container IpcMode is empty
|
||||||
|
@ -105,9 +121,8 @@ func (n IpcMode) Valid() bool {
|
||||||
|
|
||||||
// Container returns the name of the container ipc stack is going to be used.
|
// Container returns the name of the container ipc stack is going to be used.
|
||||||
func (n IpcMode) Container() string {
|
func (n IpcMode) Container() string {
|
||||||
parts := strings.SplitN(string(n), ":", 2)
|
if n.IsContainer() {
|
||||||
if len(parts) > 1 && parts[0] == "container" {
|
return strings.TrimPrefix(string(n), string(IPCModeContainer)+":")
|
||||||
return parts[1]
|
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
@ -326,7 +341,7 @@ type LogMode string
|
||||||
|
|
||||||
// Available logging modes
|
// Available logging modes
|
||||||
const (
|
const (
|
||||||
LogModeUnset = ""
|
LogModeUnset LogMode = ""
|
||||||
LogModeBlocking LogMode = "blocking"
|
LogModeBlocking LogMode = "blocking"
|
||||||
LogModeNonBlock LogMode = "non-blocking"
|
LogModeNonBlock LogMode = "non-blocking"
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package container // import "github.com/docker/docker/api/types/container"
|
package container // import "github.com/docker/docker/api/types/container"
|
||||||
|
|
|
@ -1,33 +1,26 @@
|
||||||
package events // import "github.com/docker/docker/api/types/events"
|
package events // import "github.com/docker/docker/api/types/events"
|
||||||
|
|
||||||
|
// Type is used for event-types.
|
||||||
|
type Type = string
|
||||||
|
|
||||||
|
// List of known event types.
|
||||||
const (
|
const (
|
||||||
// BuilderEventType is the event type that the builder generates
|
BuilderEventType Type = "builder" // BuilderEventType is the event type that the builder generates.
|
||||||
BuilderEventType = "builder"
|
ConfigEventType Type = "config" // ConfigEventType is the event type that configs generate.
|
||||||
// ContainerEventType is the event type that containers generate
|
ContainerEventType Type = "container" // ContainerEventType is the event type that containers generate.
|
||||||
ContainerEventType = "container"
|
DaemonEventType Type = "daemon" // DaemonEventType is the event type that daemon generate.
|
||||||
// DaemonEventType is the event type that daemon generate
|
ImageEventType Type = "image" // ImageEventType is the event type that images generate.
|
||||||
DaemonEventType = "daemon"
|
NetworkEventType Type = "network" // NetworkEventType is the event type that networks generate.
|
||||||
// ImageEventType is the event type that images generate
|
NodeEventType Type = "node" // NodeEventType is the event type that nodes generate.
|
||||||
ImageEventType = "image"
|
PluginEventType Type = "plugin" // PluginEventType is the event type that plugins generate.
|
||||||
// NetworkEventType is the event type that networks generate
|
SecretEventType Type = "secret" // SecretEventType is the event type that secrets generate.
|
||||||
NetworkEventType = "network"
|
ServiceEventType Type = "service" // ServiceEventType is the event type that services generate.
|
||||||
// PluginEventType is the event type that plugins generate
|
VolumeEventType Type = "volume" // VolumeEventType is the event type that volumes generate.
|
||||||
PluginEventType = "plugin"
|
|
||||||
// VolumeEventType is the event type that volumes generate
|
|
||||||
VolumeEventType = "volume"
|
|
||||||
// ServiceEventType is the event type that services generate
|
|
||||||
ServiceEventType = "service"
|
|
||||||
// NodeEventType is the event type that nodes generate
|
|
||||||
NodeEventType = "node"
|
|
||||||
// SecretEventType is the event type that secrets generate
|
|
||||||
SecretEventType = "secret"
|
|
||||||
// ConfigEventType is the event type that configs generate
|
|
||||||
ConfigEventType = "config"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Actor describes something that generates events,
|
// Actor describes something that generates events,
|
||||||
// like a container, or a network, or a volume.
|
// like a container, or a network, or a volume.
|
||||||
// It has a defined name and a set or attributes.
|
// It has a defined name and a set of attributes.
|
||||||
// The container attributes are its labels, other actors
|
// The container attributes are its labels, other actors
|
||||||
// can generate these attributes from other properties.
|
// can generate these attributes from other properties.
|
||||||
type Actor struct {
|
type Actor struct {
|
||||||
|
@ -39,11 +32,11 @@ type Actor struct {
|
||||||
type Message struct {
|
type Message struct {
|
||||||
// Deprecated information from JSONMessage.
|
// Deprecated information from JSONMessage.
|
||||||
// With data only in container events.
|
// With data only in container events.
|
||||||
Status string `json:"status,omitempty"`
|
Status string `json:"status,omitempty"` // Deprecated: use Action instead.
|
||||||
ID string `json:"id,omitempty"`
|
ID string `json:"id,omitempty"` // Deprecated: use Actor.ID instead.
|
||||||
From string `json:"from,omitempty"`
|
From string `json:"from,omitempty"` // Deprecated: use Actor.Attributes["image"] instead.
|
||||||
|
|
||||||
Type string
|
Type Type
|
||||||
Action string
|
Action string
|
||||||
Actor Actor
|
Actor Actor
|
||||||
// Engine events are local scope. Cluster events are swarm scope.
|
// Engine events are local scope. Cluster events are swarm scope.
|
||||||
|
|
|
@ -3,15 +3,21 @@ package types
|
||||||
// This file was generated by the swagger tool.
|
// This file was generated by the swagger tool.
|
||||||
// Editing this file might prove futile when you re-run the swagger generate command
|
// Editing this file might prove futile when you re-run the swagger generate command
|
||||||
|
|
||||||
// GraphDriverData Information about a container's graph driver.
|
// GraphDriverData Information about the storage driver used to store the container's and
|
||||||
|
// image's filesystem.
|
||||||
|
//
|
||||||
// swagger:model GraphDriverData
|
// swagger:model GraphDriverData
|
||||||
type GraphDriverData struct {
|
type GraphDriverData struct {
|
||||||
|
|
||||||
// data
|
// Low-level storage metadata, provided as key/value pairs.
|
||||||
|
//
|
||||||
|
// This information is driver-specific, and depends on the storage-driver
|
||||||
|
// in use, and should be used for informational purposes only.
|
||||||
|
//
|
||||||
// Required: true
|
// Required: true
|
||||||
Data map[string]string `json:"Data"`
|
Data map[string]string `json:"Data"`
|
||||||
|
|
||||||
// name
|
// Name of the storage driver.
|
||||||
// Required: true
|
// Required: true
|
||||||
Name string `json:"Name"`
|
Name string `json:"Name"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,38 +19,119 @@ import (
|
||||||
|
|
||||||
// RootFS returns Image's RootFS description including the layer IDs.
|
// RootFS returns Image's RootFS description including the layer IDs.
|
||||||
type RootFS struct {
|
type RootFS struct {
|
||||||
Type string
|
Type string `json:",omitempty"`
|
||||||
Layers []string `json:",omitempty"`
|
Layers []string `json:",omitempty"`
|
||||||
BaseLayer string `json:",omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ImageInspect contains response of Engine API:
|
// ImageInspect contains response of Engine API:
|
||||||
// GET "/images/{name:.*}/json"
|
// GET "/images/{name:.*}/json"
|
||||||
type ImageInspect struct {
|
type ImageInspect struct {
|
||||||
|
// ID is the content-addressable ID of an image.
|
||||||
|
//
|
||||||
|
// This identified is a content-addressable digest calculated from the
|
||||||
|
// image's configuration (which includes the digests of layers used by
|
||||||
|
// the image).
|
||||||
|
//
|
||||||
|
// Note that this digest differs from the `RepoDigests` below, which
|
||||||
|
// holds digests of image manifests that reference the image.
|
||||||
ID string `json:"Id"`
|
ID string `json:"Id"`
|
||||||
|
|
||||||
|
// RepoTags is a list of image names/tags in the local image cache that
|
||||||
|
// reference this image.
|
||||||
|
//
|
||||||
|
// Multiple image tags can refer to the same imagem and this list may be
|
||||||
|
// empty if no tags reference the image, in which case the image is
|
||||||
|
// "untagged", in which case it can still be referenced by its ID.
|
||||||
RepoTags []string
|
RepoTags []string
|
||||||
|
|
||||||
|
// RepoDigests is a list of content-addressable digests of locally available
|
||||||
|
// image manifests that the image is referenced from. Multiple manifests can
|
||||||
|
// refer to the same image.
|
||||||
|
//
|
||||||
|
// These digests are usually only available if the image was either pulled
|
||||||
|
// from a registry, or if the image was pushed to a registry, which is when
|
||||||
|
// the manifest is generated and its digest calculated.
|
||||||
RepoDigests []string
|
RepoDigests []string
|
||||||
|
|
||||||
|
// Parent is the ID of the parent image.
|
||||||
|
//
|
||||||
|
// Depending on how the image was created, this field may be empty and
|
||||||
|
// is only set for images that were built/created locally. This field
|
||||||
|
// is empty if the image was pulled from an image registry.
|
||||||
Parent string
|
Parent string
|
||||||
|
|
||||||
|
// Comment is an optional message that can be set when committing or
|
||||||
|
// importing the image.
|
||||||
Comment string
|
Comment string
|
||||||
|
|
||||||
|
// Created is the date and time at which the image was created, formatted in
|
||||||
|
// RFC 3339 nano-seconds (time.RFC3339Nano).
|
||||||
Created string
|
Created string
|
||||||
|
|
||||||
|
// Container is the ID of the container that was used to create the image.
|
||||||
|
//
|
||||||
|
// Depending on how the image was created, this field may be empty.
|
||||||
Container string
|
Container string
|
||||||
|
|
||||||
|
// ContainerConfig is the configuration of the container that was committed
|
||||||
|
// into the image.
|
||||||
ContainerConfig *container.Config
|
ContainerConfig *container.Config
|
||||||
|
|
||||||
|
// DockerVersion is the version of Docker that was used to build the image.
|
||||||
|
//
|
||||||
|
// Depending on how the image was created, this field may be empty.
|
||||||
DockerVersion string
|
DockerVersion string
|
||||||
|
|
||||||
|
// Author is the name of the author that was specified when committing the
|
||||||
|
// image, or as specified through MAINTAINER (deprecated) in the Dockerfile.
|
||||||
Author string
|
Author string
|
||||||
Config *container.Config
|
Config *container.Config
|
||||||
|
|
||||||
|
// Architecture is the hardware CPU architecture that the image runs on.
|
||||||
Architecture string
|
Architecture string
|
||||||
|
|
||||||
|
// Variant is the CPU architecture variant (presently ARM-only).
|
||||||
Variant string `json:",omitempty"`
|
Variant string `json:",omitempty"`
|
||||||
|
|
||||||
|
// OS is the Operating System the image is built to run on.
|
||||||
Os string
|
Os string
|
||||||
|
|
||||||
|
// OsVersion is the version of the Operating System the image is built to
|
||||||
|
// run on (especially for Windows).
|
||||||
OsVersion string `json:",omitempty"`
|
OsVersion string `json:",omitempty"`
|
||||||
|
|
||||||
|
// Size is the total size of the image including all layers it is composed of.
|
||||||
Size int64
|
Size int64
|
||||||
VirtualSize int64
|
|
||||||
|
// VirtualSize is the total size of the image including all layers it is
|
||||||
|
// composed of.
|
||||||
|
//
|
||||||
|
// In versions of Docker before v1.10, this field was calculated from
|
||||||
|
// the image itself and all of its parent images. Docker v1.10 and up
|
||||||
|
// store images self-contained, and no longer use a parent-chain, making
|
||||||
|
// this field an equivalent of the Size field.
|
||||||
|
//
|
||||||
|
// This field is kept for backward compatibility, but may be removed in
|
||||||
|
// a future version of the API.
|
||||||
|
VirtualSize int64 // TODO(thaJeztah): deprecate this field
|
||||||
|
|
||||||
|
// GraphDriver holds information about the storage driver used to store the
|
||||||
|
// container's and image's filesystem.
|
||||||
GraphDriver GraphDriverData
|
GraphDriver GraphDriverData
|
||||||
|
|
||||||
|
// RootFS contains information about the image's RootFS, including the
|
||||||
|
// layer IDs.
|
||||||
RootFS RootFS
|
RootFS RootFS
|
||||||
|
|
||||||
|
// Metadata of the image in the local cache.
|
||||||
|
//
|
||||||
|
// This information is local to the daemon, and not part of the image itself.
|
||||||
Metadata ImageMetadata
|
Metadata ImageMetadata
|
||||||
}
|
}
|
||||||
|
|
||||||
// ImageMetadata contains engine-local data about the image
|
// ImageMetadata contains engine-local data about the image
|
||||||
type ImageMetadata struct {
|
type ImageMetadata struct {
|
||||||
|
// LastTagTime is the date and time at which the image was last tagged.
|
||||||
LastTagTime time.Time `json:",omitempty"`
|
LastTagTime time.Time `json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -421,13 +502,44 @@ type DefaultNetworkSettings struct {
|
||||||
// MountPoint represents a mount point configuration inside the container.
|
// MountPoint represents a mount point configuration inside the container.
|
||||||
// This is used for reporting the mountpoints in use by a container.
|
// This is used for reporting the mountpoints in use by a container.
|
||||||
type MountPoint struct {
|
type MountPoint struct {
|
||||||
|
// Type is the type of mount, see `Type<foo>` definitions in
|
||||||
|
// github.com/docker/docker/api/types/mount.Type
|
||||||
Type mount.Type `json:",omitempty"`
|
Type mount.Type `json:",omitempty"`
|
||||||
|
|
||||||
|
// Name is the name reference to the underlying data defined by `Source`
|
||||||
|
// e.g., the volume name.
|
||||||
Name string `json:",omitempty"`
|
Name string `json:",omitempty"`
|
||||||
|
|
||||||
|
// Source is the source location of the mount.
|
||||||
|
//
|
||||||
|
// For volumes, this contains the storage location of the volume (within
|
||||||
|
// `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains
|
||||||
|
// the source (host) part of the bind-mount. For `tmpfs` mount points, this
|
||||||
|
// field is empty.
|
||||||
Source string
|
Source string
|
||||||
|
|
||||||
|
// Destination is the path relative to the container root (`/`) where the
|
||||||
|
// Source is mounted inside the container.
|
||||||
Destination string
|
Destination string
|
||||||
|
|
||||||
|
// Driver is the volume driver used to create the volume (if it is a volume).
|
||||||
Driver string `json:",omitempty"`
|
Driver string `json:",omitempty"`
|
||||||
|
|
||||||
|
// Mode is a comma separated list of options supplied by the user when
|
||||||
|
// creating the bind/volume mount.
|
||||||
|
//
|
||||||
|
// The default is platform-specific (`"z"` on Linux, empty on Windows).
|
||||||
Mode string
|
Mode string
|
||||||
|
|
||||||
|
// RW indicates whether the mount is mounted writable (read-write).
|
||||||
RW bool
|
RW bool
|
||||||
|
|
||||||
|
// Propagation describes how mounts are propagated from the host into the
|
||||||
|
// mount point, and vice-versa. Refer to the Linux kernel documentation
|
||||||
|
// for details:
|
||||||
|
// https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt
|
||||||
|
//
|
||||||
|
// This field is not used on Windows.
|
||||||
Propagation mount.Propagation
|
Propagation mount.Propagation
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,9 @@ import (
|
||||||
// compare compares two version strings
|
// compare compares two version strings
|
||||||
// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise.
|
// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise.
|
||||||
func compare(v1, v2 string) int {
|
func compare(v1, v2 string) int {
|
||||||
|
if v1 == v2 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
var (
|
var (
|
||||||
currTab = strings.Split(v1, ".")
|
currTab = strings.Split(v1, ".")
|
||||||
otherTab = strings.Split(v2, ".")
|
otherTab = strings.Split(v2, ".")
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package git // import "github.com/docker/docker/builder/remotecontext/git"
|
package git // import "github.com/docker/docker/builder/remotecontext/git"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
@ -34,7 +33,7 @@ func Clone(remoteURL string) (string, error) {
|
||||||
func cloneGitRepo(repo gitRepo) (checkoutDir string, err error) {
|
func cloneGitRepo(repo gitRepo) (checkoutDir string, err error) {
|
||||||
fetch := fetchArgs(repo.remote, repo.ref)
|
fetch := fetchArgs(repo.remote, repo.ref)
|
||||||
|
|
||||||
root, err := ioutil.TempDir("", "docker-build-git")
|
root, err := os.MkdirTemp("", "docker-build-git")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
|
@ -135,9 +135,6 @@ func NewClientWithOpts(ops ...Opt) (*Client, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := c.client.Transport.(http.RoundTripper); !ok {
|
|
||||||
return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", c.client.Transport)
|
|
||||||
}
|
|
||||||
if c.scheme == "" {
|
if c.scheme == "" {
|
||||||
c.scheme = "http"
|
c.scheme = "http"
|
||||||
|
|
||||||
|
@ -281,21 +278,6 @@ func ParseHostURL(host string) (*url.URL, error) {
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CustomHTTPHeaders returns the custom http headers stored by the client.
|
|
||||||
func (cli *Client) CustomHTTPHeaders() map[string]string {
|
|
||||||
m := make(map[string]string)
|
|
||||||
for k, v := range cli.customHTTPHeaders {
|
|
||||||
m[k] = v
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetCustomHTTPHeaders that will be set on every HTTP request made by the client.
|
|
||||||
// Deprecated: use WithHTTPHeaders when creating the client.
|
|
||||||
func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) {
|
|
||||||
cli.customHTTPHeaders = headers
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dialer returns a dialer for a raw stream connection, with HTTP/1.1 header, that can be used for proxying the daemon connection.
|
// Dialer returns a dialer for a raw stream connection, with HTTP/1.1 header, that can be used for proxying the daemon connection.
|
||||||
// Used by `docker dial-stdio` (docker/cli#889).
|
// Used by `docker dial-stdio` (docker/cli#889).
|
||||||
func (cli *Client) Dialer() func(context.Context) (net.Conn, error) {
|
func (cli *Client) Dialer() func(context.Context) (net.Conn, error) {
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build linux || freebsd || openbsd || netbsd || darwin || solaris || illumos || dragonfly
|
||||||
// +build linux freebsd openbsd netbsd darwin solaris illumos dragonfly
|
// +build linux freebsd openbsd netbsd darwin solaris illumos dragonfly
|
||||||
|
|
||||||
package client // import "github.com/docker/docker/client"
|
package client // import "github.com/docker/docker/client"
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io/ioutil"
|
"io"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/swarm"
|
"github.com/docker/docker/api/types/swarm"
|
||||||
)
|
)
|
||||||
|
@ -23,7 +23,7 @@ func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.C
|
||||||
return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id)
|
return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(resp.body)
|
body, err := io.ReadAll(resp.body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return swarm.Config{}, nil, err
|
return swarm.Config{}, nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
|
@ -41,7 +41,7 @@ func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID stri
|
||||||
return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID)
|
return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID)
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(serverResp.body)
|
body, err := io.ReadAll(serverResp.body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.ContainerJSON{}, nil, err
|
return types.ContainerJSON{}, nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,7 +33,9 @@ func (cli *Client) ContainerWait(ctx context.Context, containerID string, condit
|
||||||
errC := make(chan error, 1)
|
errC := make(chan error, 1)
|
||||||
|
|
||||||
query := url.Values{}
|
query := url.Values{}
|
||||||
|
if condition != "" {
|
||||||
query.Set("condition", string(condition))
|
query.Set("condition", string(condition))
|
||||||
|
}
|
||||||
|
|
||||||
resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil)
|
resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io/ioutil"
|
"io"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
)
|
)
|
||||||
|
@ -20,7 +20,7 @@ func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (typ
|
||||||
return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID)
|
return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID)
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(serverResp.body)
|
body, err := io.ReadAll(serverResp.body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.ImageInspect{}, nil, err
|
return types.ImageInspect{}, nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
|
@ -39,7 +39,7 @@ func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string,
|
||||||
return networkResource, nil, wrapResponseError(err, resp, "network", networkID)
|
return networkResource, nil, wrapResponseError(err, resp, "network", networkID)
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(resp.body)
|
body, err := io.ReadAll(resp.body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return networkResource, nil, err
|
return networkResource, nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io/ioutil"
|
"io"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/swarm"
|
"github.com/docker/docker/api/types/swarm"
|
||||||
)
|
)
|
||||||
|
@ -20,7 +20,7 @@ func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm
|
||||||
return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID)
|
return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID)
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(serverResp.body)
|
body, err := io.ReadAll(serverResp.body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return swarm.Node{}, nil, err
|
return swarm.Node{}, nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,32 +24,13 @@ type Opt func(*Client) error
|
||||||
// DOCKER_CERT_PATH to load the TLS certificates from.
|
// DOCKER_CERT_PATH to load the TLS certificates from.
|
||||||
// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default.
|
// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default.
|
||||||
func FromEnv(c *Client) error {
|
func FromEnv(c *Client) error {
|
||||||
if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" {
|
ops := []Opt{
|
||||||
options := tlsconfig.Options{
|
WithTLSClientConfigFromEnv(),
|
||||||
CAFile: filepath.Join(dockerCertPath, "ca.pem"),
|
WithHostFromEnv(),
|
||||||
CertFile: filepath.Join(dockerCertPath, "cert.pem"),
|
WithVersionFromEnv(),
|
||||||
KeyFile: filepath.Join(dockerCertPath, "key.pem"),
|
|
||||||
InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "",
|
|
||||||
}
|
}
|
||||||
tlsc, err := tlsconfig.Client(options)
|
for _, op := range ops {
|
||||||
if err != nil {
|
if err := op(c); err != nil {
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
c.client = &http.Client{
|
|
||||||
Transport: &http.Transport{TLSClientConfig: tlsc},
|
|
||||||
CheckRedirect: CheckRedirect,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if host := os.Getenv("DOCKER_HOST"); host != "" {
|
|
||||||
if err := WithHost(host)(c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if version := os.Getenv("DOCKER_API_VERSION"); version != "" {
|
|
||||||
if err := WithVersion(version)(c); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -93,6 +74,18 @@ func WithHost(host string) Opt {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithHostFromEnv overrides the client host with the host specified in the
|
||||||
|
// DOCKER_HOST environment variable. If DOCKER_HOST is not set, the host is
|
||||||
|
// not modified.
|
||||||
|
func WithHostFromEnv() Opt {
|
||||||
|
return func(c *Client) error {
|
||||||
|
if host := os.Getenv("DOCKER_HOST"); host != "" {
|
||||||
|
return WithHost(host)(c)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithHTTPClient overrides the client http client with the specified one
|
// WithHTTPClient overrides the client http client with the specified one
|
||||||
func WithHTTPClient(client *http.Client) Opt {
|
func WithHTTPClient(client *http.Client) Opt {
|
||||||
return func(c *Client) error {
|
return func(c *Client) error {
|
||||||
|
@ -148,6 +141,38 @@ func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithTLSClientConfigFromEnv configures the client's TLS settings with the
|
||||||
|
// settings in the DOCKER_CERT_PATH and DOCKER_TLS_VERIFY environment variables.
|
||||||
|
// If DOCKER_CERT_PATH is not set or empty, TLS configuration is not modified.
|
||||||
|
//
|
||||||
|
// Supported environment variables:
|
||||||
|
// DOCKER_CERT_PATH directory to load the TLS certificates (ca.pem, cert.pem, key.pem) from.
|
||||||
|
// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default.
|
||||||
|
func WithTLSClientConfigFromEnv() Opt {
|
||||||
|
return func(c *Client) error {
|
||||||
|
dockerCertPath := os.Getenv("DOCKER_CERT_PATH")
|
||||||
|
if dockerCertPath == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
options := tlsconfig.Options{
|
||||||
|
CAFile: filepath.Join(dockerCertPath, "ca.pem"),
|
||||||
|
CertFile: filepath.Join(dockerCertPath, "cert.pem"),
|
||||||
|
KeyFile: filepath.Join(dockerCertPath, "key.pem"),
|
||||||
|
InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "",
|
||||||
|
}
|
||||||
|
tlsc, err := tlsconfig.Client(options)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.client = &http.Client{
|
||||||
|
Transport: &http.Transport{TLSClientConfig: tlsc},
|
||||||
|
CheckRedirect: CheckRedirect,
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithVersion overrides the client version with the specified one. If an empty
|
// WithVersion overrides the client version with the specified one. If an empty
|
||||||
// version is specified, the value will be ignored to allow version negotiation.
|
// version is specified, the value will be ignored to allow version negotiation.
|
||||||
func WithVersion(version string) Opt {
|
func WithVersion(version string) Opt {
|
||||||
|
@ -160,6 +185,18 @@ func WithVersion(version string) Opt {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithVersionFromEnv overrides the client version with the version specified in
|
||||||
|
// the DOCKER_API_VERSION environment variable. If DOCKER_API_VERSION is not set,
|
||||||
|
// the version is not modified.
|
||||||
|
func WithVersionFromEnv() Opt {
|
||||||
|
return func(c *Client) error {
|
||||||
|
if version := os.Getenv("DOCKER_API_VERSION"); version != "" {
|
||||||
|
return WithVersion(version)(c)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithAPIVersionNegotiation enables automatic API version negotiation for the client.
|
// WithAPIVersionNegotiation enables automatic API version negotiation for the client.
|
||||||
// With this option enabled, the client automatically negotiates the API version
|
// With this option enabled, the client automatically negotiates the API version
|
||||||
// to use when making requests. API version negotiation is performed on the first
|
// to use when making requests. API version negotiation is performed on the first
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io/ioutil"
|
"io"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
)
|
)
|
||||||
|
@ -20,7 +20,7 @@ func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*type
|
||||||
return nil, nil, wrapResponseError(err, resp, "plugin", name)
|
return nil, nil, wrapResponseError(err, resp, "plugin", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(resp.body)
|
body, err := io.ReadAll(resp.body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
@ -155,12 +154,10 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp
|
||||||
if err.Timeout() {
|
if err.Timeout() {
|
||||||
return serverResp, ErrorConnectionFailed(cli.host)
|
return serverResp, ErrorConnectionFailed(cli.host)
|
||||||
}
|
}
|
||||||
if !err.Temporary() {
|
|
||||||
if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") {
|
if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") {
|
||||||
return serverResp, ErrorConnectionFailed(cli.host)
|
return serverResp, ErrorConnectionFailed(cli.host)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Although there's not a strongly typed error for this in go-winio,
|
// Although there's not a strongly typed error for this in go-winio,
|
||||||
// lots of people are using the default configuration for the docker
|
// lots of people are using the default configuration for the docker
|
||||||
|
@ -206,7 +203,7 @@ func (cli *Client) checkResponseErr(serverResp serverResponse) error {
|
||||||
R: serverResp.body,
|
R: serverResp.body,
|
||||||
N: int64(bodyMax),
|
N: int64(bodyMax),
|
||||||
}
|
}
|
||||||
body, err = ioutil.ReadAll(bodyR)
|
body, err = io.ReadAll(bodyR)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -241,14 +238,14 @@ func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request
|
||||||
// Add CLI Config's HTTP Headers BEFORE we set the Docker headers
|
// Add CLI Config's HTTP Headers BEFORE we set the Docker headers
|
||||||
// then the user can't change OUR headers
|
// then the user can't change OUR headers
|
||||||
for k, v := range cli.customHTTPHeaders {
|
for k, v := range cli.customHTTPHeaders {
|
||||||
if versions.LessThan(cli.version, "1.25") && k == "User-Agent" {
|
if versions.LessThan(cli.version, "1.25") && http.CanonicalHeaderKey(k) == "User-Agent" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
req.Header.Set(k, v)
|
req.Header.Set(k, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range headers {
|
for k, v := range headers {
|
||||||
req.Header[k] = v
|
req.Header[http.CanonicalHeaderKey(k)] = v
|
||||||
}
|
}
|
||||||
return req
|
return req
|
||||||
}
|
}
|
||||||
|
@ -266,7 +263,7 @@ func encodeData(data interface{}) (*bytes.Buffer, error) {
|
||||||
func ensureReaderClosed(response serverResponse) {
|
func ensureReaderClosed(response serverResponse) {
|
||||||
if response.body != nil {
|
if response.body != nil {
|
||||||
// Drain up to 512 bytes and close the body to let the Transport reuse the connection
|
// Drain up to 512 bytes and close the body to let the Transport reuse the connection
|
||||||
io.CopyN(ioutil.Discard, response.body, 512)
|
io.CopyN(io.Discard, response.body, 512)
|
||||||
response.body.Close()
|
response.body.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io/ioutil"
|
"io"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/swarm"
|
"github.com/docker/docker/api/types/swarm"
|
||||||
)
|
)
|
||||||
|
@ -23,7 +23,7 @@ func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.S
|
||||||
return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id)
|
return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(resp.body)
|
body, err := io.ReadAll(resp.body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return swarm.Secret{}, nil, err
|
return swarm.Secret{}, nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/swarm"
|
"github.com/docker/docker/api/types/swarm"
|
||||||
digest "github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
|
@ -25,7 +25,7 @@ func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string,
|
||||||
return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID)
|
return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID)
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(serverResp.body)
|
body, err := io.ReadAll(serverResp.body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return swarm.Service{}, nil, err
|
return swarm.Service{}, nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io/ioutil"
|
"io"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/swarm"
|
"github.com/docker/docker/api/types/swarm"
|
||||||
)
|
)
|
||||||
|
@ -20,7 +20,7 @@ func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm
|
||||||
return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID)
|
return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID)
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(serverResp.body)
|
body, err := io.ReadAll(serverResp.body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return swarm.Task{}, nil, err
|
return swarm.Task{}, nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io/ioutil"
|
"io"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
)
|
)
|
||||||
|
@ -28,7 +28,7 @@ func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (t
|
||||||
return volume, nil, wrapResponseError(err, resp, "volume", volumeID)
|
return volume, nil, wrapResponseError(err, resp, "volume", volumeID)
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(resp.body)
|
body, err := io.ReadAll(resp.body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return volume, nil, err
|
return volume, nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -100,10 +100,10 @@ func FromStatusCode(err error, statusCode int) error {
|
||||||
err = System(err)
|
err = System(err)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
logrus.WithFields(logrus.Fields{
|
logrus.WithError(err).WithFields(logrus.Fields{
|
||||||
"module": "api",
|
"module": "api",
|
||||||
"status_code": fmt.Sprintf("%d", statusCode),
|
"status_code": statusCode,
|
||||||
}).Debugf("FIXME: Got an status-code for which error does not match any expected type!!!: %d", statusCode)
|
}).Debug("FIXME: Got an status-code for which error does not match any expected type!!!")
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case statusCode >= 200 && statusCode < 400:
|
case statusCode >= 200 && statusCode < 400:
|
||||||
|
|
|
@ -7,9 +7,9 @@ import (
|
||||||
"compress/bzip2"
|
"compress/bzip2"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
@ -23,6 +23,7 @@ import (
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
"github.com/docker/docker/pkg/pools"
|
"github.com/docker/docker/pkg/pools"
|
||||||
"github.com/docker/docker/pkg/system"
|
"github.com/docker/docker/pkg/system"
|
||||||
|
"github.com/klauspost/compress/zstd"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
exec "golang.org/x/sys/execabs"
|
exec "golang.org/x/sys/execabs"
|
||||||
)
|
)
|
||||||
|
@ -84,6 +85,8 @@ const (
|
||||||
Gzip
|
Gzip
|
||||||
// Xz is xz compression algorithm.
|
// Xz is xz compression algorithm.
|
||||||
Xz
|
Xz
|
||||||
|
// Zstd is zstd compression algorithm.
|
||||||
|
Zstd
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -122,14 +125,59 @@ func IsArchivePath(path string) bool {
|
||||||
return err == nil
|
return err == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
zstdMagicSkippableStart = 0x184D2A50
|
||||||
|
zstdMagicSkippableMask = 0xFFFFFFF0
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
bzip2Magic = []byte{0x42, 0x5A, 0x68}
|
||||||
|
gzipMagic = []byte{0x1F, 0x8B, 0x08}
|
||||||
|
xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}
|
||||||
|
zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
|
||||||
|
)
|
||||||
|
|
||||||
|
type matcher = func([]byte) bool
|
||||||
|
|
||||||
|
func magicNumberMatcher(m []byte) matcher {
|
||||||
|
return func(source []byte) bool {
|
||||||
|
return bytes.HasPrefix(source, m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// zstdMatcher detects zstd compression algorithm.
|
||||||
|
// Zstandard compressed data is made of one or more frames.
|
||||||
|
// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames.
|
||||||
|
// See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details.
|
||||||
|
func zstdMatcher() matcher {
|
||||||
|
return func(source []byte) bool {
|
||||||
|
if bytes.HasPrefix(source, zstdMagic) {
|
||||||
|
// Zstandard frame
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// skippable frame
|
||||||
|
if len(source) < 8 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// magic number from 0x184D2A50 to 0x184D2A5F.
|
||||||
|
if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// DetectCompression detects the compression algorithm of the source.
|
// DetectCompression detects the compression algorithm of the source.
|
||||||
func DetectCompression(source []byte) Compression {
|
func DetectCompression(source []byte) Compression {
|
||||||
for compression, m := range map[Compression][]byte{
|
compressionMap := map[Compression]matcher{
|
||||||
Bzip2: {0x42, 0x5A, 0x68},
|
Bzip2: magicNumberMatcher(bzip2Magic),
|
||||||
Gzip: {0x1F, 0x8B, 0x08},
|
Gzip: magicNumberMatcher(gzipMagic),
|
||||||
Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
|
Xz: magicNumberMatcher(xzMagic),
|
||||||
} {
|
Zstd: zstdMatcher(),
|
||||||
if bytes.HasPrefix(source, m) {
|
}
|
||||||
|
for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} {
|
||||||
|
fn := compressionMap[compression]
|
||||||
|
if fn(source) {
|
||||||
return compression
|
return compression
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -216,6 +264,13 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
|
||||||
}
|
}
|
||||||
readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
|
readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
|
||||||
return wrapReadCloser(readBufWrapper, cancel), nil
|
return wrapReadCloser(readBufWrapper, cancel), nil
|
||||||
|
case Zstd:
|
||||||
|
zstdReader, err := zstd.NewReader(buf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader)
|
||||||
|
return readBufWrapper, nil
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
|
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
|
||||||
}
|
}
|
||||||
|
@ -342,16 +397,70 @@ func (compression *Compression) Extension() string {
|
||||||
return "tar.gz"
|
return "tar.gz"
|
||||||
case Xz:
|
case Xz:
|
||||||
return "tar.xz"
|
return "tar.xz"
|
||||||
|
case Zstd:
|
||||||
|
return "tar.zst"
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to
|
||||||
|
// prevent tar.FileInfoHeader from introspecting it and potentially calling into
|
||||||
|
// glibc.
|
||||||
|
type nosysFileInfo struct {
|
||||||
|
os.FileInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fi nosysFileInfo) Sys() interface{} {
|
||||||
|
// A Sys value of type *tar.Header is safe as it is system-independent.
|
||||||
|
// The tar.FileInfoHeader function copies the fields into the returned
|
||||||
|
// header without performing any OS lookups.
|
||||||
|
if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok {
|
||||||
|
return sys
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sysStat, if non-nil, populates hdr from system-dependent fields of fi.
|
||||||
|
var sysStat func(fi os.FileInfo, hdr *tar.Header) error
|
||||||
|
|
||||||
|
// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi.
|
||||||
|
//
|
||||||
|
// Compared to the archive/tar.FileInfoHeader function, this function is safe to
|
||||||
|
// call from a chrooted process as it does not populate fields which would
|
||||||
|
// require operating system lookups. It behaves identically to
|
||||||
|
// tar.FileInfoHeader when fi is a FileInfo value returned from
|
||||||
|
// tar.Header.FileInfo().
|
||||||
|
//
|
||||||
|
// When fi is a FileInfo for a native file, such as returned from os.Stat() and
|
||||||
|
// os.Lstat(), the returned Header value differs from one returned from
|
||||||
|
// tar.FileInfoHeader in the following ways. The Uname and Gname fields are not
|
||||||
|
// set as OS lookups would be required to populate them. The AccessTime and
|
||||||
|
// ChangeTime fields are not currently set (not yet implemented) although that
|
||||||
|
// is subject to change. Callers which require the AccessTime or ChangeTime
|
||||||
|
// fields to be zeroed should explicitly zero them out in the returned Header
|
||||||
|
// value to avoid any compatibility issues in the future.
|
||||||
|
func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) {
|
||||||
|
hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if sysStat != nil {
|
||||||
|
return hdr, sysStat(fi, hdr)
|
||||||
|
}
|
||||||
|
return hdr, nil
|
||||||
|
}
|
||||||
|
|
||||||
// FileInfoHeader creates a populated Header from fi.
|
// FileInfoHeader creates a populated Header from fi.
|
||||||
// Compared to archive pkg this function fills in more information.
|
//
|
||||||
// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
|
// Compared to the archive/tar package, this function fills in less information
|
||||||
// which have been deleted since Go 1.9 archive/tar.
|
// but is safe to call from a chrooted process. The AccessTime and ChangeTime
|
||||||
|
// fields are not set in the returned header, ModTime is truncated to one-second
|
||||||
|
// precision, and the Uname and Gname fields are only set when fi is a FileInfo
|
||||||
|
// value returned from tar.Header.FileInfo(). Also, regardless of Go version,
|
||||||
|
// this function fills file type bits (e.g. hdr.Mode |= modeISDIR), which have
|
||||||
|
// been deleted since Go 1.9 archive/tar.
|
||||||
func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
|
func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
|
||||||
hdr, err := tar.FileInfoHeader(fi, link)
|
hdr, err := FileInfoHeaderNoLookups(fi, link)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -361,9 +470,6 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro
|
||||||
hdr.ChangeTime = time.Time{}
|
hdr.ChangeTime = time.Time{}
|
||||||
hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
|
hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
|
||||||
hdr.Name = canonicalTarName(name, fi.IsDir())
|
hdr.Name = canonicalTarName(name, fi.IsDir())
|
||||||
if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return hdr, nil
|
return hdr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -623,6 +729,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||||
}
|
}
|
||||||
|
|
||||||
case tar.TypeLink:
|
case tar.TypeLink:
|
||||||
|
//#nosec G305 -- The target path is checked for path traversal.
|
||||||
targetPath := filepath.Join(extractDir, hdr.Linkname)
|
targetPath := filepath.Join(extractDir, hdr.Linkname)
|
||||||
// check for hardlink breakout
|
// check for hardlink breakout
|
||||||
if !strings.HasPrefix(targetPath, extractDir) {
|
if !strings.HasPrefix(targetPath, extractDir) {
|
||||||
|
@ -635,7 +742,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||||
case tar.TypeSymlink:
|
case tar.TypeSymlink:
|
||||||
// path -> hdr.Linkname = targetPath
|
// path -> hdr.Linkname = targetPath
|
||||||
// e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
|
// e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
|
||||||
targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
|
targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) //#nosec G305 -- The target path is checked for path traversal.
|
||||||
|
|
||||||
// the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
|
// the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
|
||||||
// that symlink would first have to be created, which would be caught earlier, at this very check:
|
// that symlink would first have to be created, which would be caught earlier, at this very check:
|
||||||
|
@ -808,6 +915,11 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||||
for _, include := range options.IncludeFiles {
|
for _, include := range options.IncludeFiles {
|
||||||
rebaseName := options.RebaseNames[include]
|
rebaseName := options.RebaseNames[include]
|
||||||
|
|
||||||
|
var (
|
||||||
|
parentMatchInfo []fileutils.MatchInfo
|
||||||
|
parentDirs []string
|
||||||
|
)
|
||||||
|
|
||||||
walkRoot := getWalkRoot(srcPath, include)
|
walkRoot := getWalkRoot(srcPath, include)
|
||||||
filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
|
filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -834,11 +946,30 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||||
// is asking for that file no matter what - which is true
|
// is asking for that file no matter what - which is true
|
||||||
// for some files, like .dockerignore and Dockerfile (sometimes)
|
// for some files, like .dockerignore and Dockerfile (sometimes)
|
||||||
if include != relFilePath {
|
if include != relFilePath {
|
||||||
skip, err = pm.Matches(relFilePath)
|
for len(parentDirs) != 0 {
|
||||||
|
lastParentDir := parentDirs[len(parentDirs)-1]
|
||||||
|
if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
parentDirs = parentDirs[:len(parentDirs)-1]
|
||||||
|
parentMatchInfo = parentMatchInfo[:len(parentMatchInfo)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
var matchInfo fileutils.MatchInfo
|
||||||
|
if len(parentMatchInfo) != 0 {
|
||||||
|
skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, parentMatchInfo[len(parentMatchInfo)-1])
|
||||||
|
} else {
|
||||||
|
skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, fileutils.MatchInfo{})
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("Error matching %s: %v", relFilePath, err)
|
logrus.Errorf("Error matching %s: %v", relFilePath, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if f.IsDir() {
|
||||||
|
parentDirs = append(parentDirs, relFilePath)
|
||||||
|
parentMatchInfo = append(parentMatchInfo, matchInfo)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if skip {
|
if skip {
|
||||||
|
@ -964,6 +1095,7 @@ loop:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//#nosec G305 -- The joined path is checked for path traversal.
|
||||||
path := filepath.Join(dest, hdr.Name)
|
path := filepath.Join(dest, hdr.Name)
|
||||||
rel, err := filepath.Rel(dest, path)
|
rel, err := filepath.Rel(dest, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1028,6 +1160,7 @@ loop:
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, hdr := range dirs {
|
for _, hdr := range dirs {
|
||||||
|
//#nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice.
|
||||||
path := filepath.Join(dest, hdr.Name)
|
path := filepath.Join(dest, hdr.Name)
|
||||||
|
|
||||||
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
||||||
|
@ -1170,7 +1303,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
||||||
}
|
}
|
||||||
defer srcF.Close()
|
defer srcF.Close()
|
||||||
|
|
||||||
hdr, err := tar.FileInfoHeader(srcSt, "")
|
hdr, err := FileInfoHeaderNoLookups(srcSt, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1261,7 +1394,7 @@ func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
|
||||||
// of that file as an archive. The archive can only be read once - as soon as reading completes,
|
// of that file as an archive. The archive can only be read once - as soon as reading completes,
|
||||||
// the file will be deleted.
|
// the file will be deleted.
|
||||||
func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
|
func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
|
||||||
f, err := ioutil.TempFile(dir, "")
|
f, err := os.CreateTemp(dir, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,7 +59,7 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os
|
||||||
Gname: hdr.Gname,
|
Gname: hdr.Gname,
|
||||||
AccessTime: hdr.AccessTime,
|
AccessTime: hdr.AccessTime,
|
||||||
ChangeTime: hdr.ChangeTime,
|
ChangeTime: hdr.ChangeTime,
|
||||||
}
|
} //#nosec G305 -- An archive is being created, not extracted.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !linux
|
||||||
// +build !linux
|
// +build !linux
|
||||||
|
|
||||||
package archive // import "github.com/docker/docker/pkg/archive"
|
package archive // import "github.com/docker/docker/pkg/archive"
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package archive // import "github.com/docker/docker/pkg/archive"
|
package archive // import "github.com/docker/docker/pkg/archive"
|
||||||
|
@ -16,6 +17,10 @@ import (
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
sysStat = statUnix
|
||||||
|
}
|
||||||
|
|
||||||
// fixVolumePathPrefix does platform specific processing to ensure that if
|
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||||
// the path being passed in is not in a volume path format, convert it to one.
|
// the path being passed in is not in a volume path format, convert it to one.
|
||||||
func fixVolumePathPrefix(srcPath string) string {
|
func fixVolumePathPrefix(srcPath string) string {
|
||||||
|
@ -44,19 +49,24 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||||
return perm // noop for unix as golang APIs provide perm bits correctly
|
return perm // noop for unix as golang APIs provide perm bits correctly
|
||||||
}
|
}
|
||||||
|
|
||||||
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
|
// statUnix populates hdr from system-dependent fields of fi without performing
|
||||||
s, ok := stat.(*syscall.Stat_t)
|
// any OS lookups.
|
||||||
|
func statUnix(fi os.FileInfo, hdr *tar.Header) error {
|
||||||
|
s, ok := fi.Sys().(*syscall.Stat_t)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
hdr.Uid = int(s.Uid)
|
||||||
|
hdr.Gid = int(s.Gid)
|
||||||
|
|
||||||
if ok {
|
|
||||||
// Currently go does not fill in the major/minors
|
|
||||||
if s.Mode&unix.S_IFBLK != 0 ||
|
if s.Mode&unix.S_IFBLK != 0 ||
|
||||||
s.Mode&unix.S_IFCHR != 0 {
|
s.Mode&unix.S_IFCHR != 0 {
|
||||||
hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert
|
hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert
|
||||||
hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert
|
hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -348,7 +347,7 @@ func ChangesDirs(newDir, oldDir string) ([]Change, error) {
|
||||||
oldRoot, newRoot *FileInfo
|
oldRoot, newRoot *FileInfo
|
||||||
)
|
)
|
||||||
if oldDir == "" {
|
if oldDir == "" {
|
||||||
emptyDir, err := ioutil.TempDir("", "empty")
|
emptyDir, err := os.MkdirTemp("", "empty")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !linux
|
||||||
// +build !linux
|
// +build !linux
|
||||||
|
|
||||||
package archive // import "github.com/docker/docker/pkg/archive"
|
package archive // import "github.com/docker/docker/pkg/archive"
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package archive // import "github.com/docker/docker/pkg/archive"
|
package archive // import "github.com/docker/docker/pkg/archive"
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -261,7 +260,7 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir
|
||||||
// The destination exists as a directory. No alteration
|
// The destination exists as a directory. No alteration
|
||||||
// to srcContent is needed as its contents can be
|
// to srcContent is needed as its contents can be
|
||||||
// simply extracted to the destination directory.
|
// simply extracted to the destination directory.
|
||||||
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
|
return dstInfo.Path, io.NopCloser(srcContent), nil
|
||||||
case dstInfo.Exists && srcInfo.IsDir:
|
case dstInfo.Exists && srcInfo.IsDir:
|
||||||
// The destination exists as some type of file and the source
|
// The destination exists as some type of file and the source
|
||||||
// content is a directory. This is an error condition since
|
// content is a directory. This is an error condition since
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package archive // import "github.com/docker/docker/pkg/archive"
|
package archive // import "github.com/docker/docker/pkg/archive"
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
@ -100,7 +99,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
||||||
basename := filepath.Base(hdr.Name)
|
basename := filepath.Base(hdr.Name)
|
||||||
aufsHardlinks[basename] = hdr
|
aufsHardlinks[basename] = hdr
|
||||||
if aufsTempdir == "" {
|
if aufsTempdir == "" {
|
||||||
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
|
if aufsTempdir, err = os.MkdirTemp("", "dockerplnk"); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
defer os.RemoveAll(aufsTempdir)
|
defer os.RemoveAll(aufsTempdir)
|
||||||
|
@ -114,6 +113,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
//#nosec G305 -- The joined path is guarded against path traversal.
|
||||||
path := filepath.Join(dest, hdr.Name)
|
path := filepath.Join(dest, hdr.Name)
|
||||||
rel, err := filepath.Rel(dest, path)
|
rel, err := filepath.Rel(dest, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -210,6 +210,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, hdr := range dirs {
|
for _, hdr := range dirs {
|
||||||
|
//#nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice.
|
||||||
path := filepath.Join(dest, hdr.Name)
|
path := filepath.Join(dest, hdr.Name)
|
||||||
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !linux
|
||||||
// +build !linux
|
// +build !linux
|
||||||
|
|
||||||
package archive // import "github.com/docker/docker/pkg/archive"
|
package archive // import "github.com/docker/docker/pkg/archive"
|
||||||
|
|
|
@ -9,8 +9,30 @@ import (
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"text/scanner"
|
"text/scanner"
|
||||||
|
"unicode/utf8"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// escapeBytes is a bitmap used to check whether a character should be escaped when creating the regex.
|
||||||
|
var escapeBytes [8]byte
|
||||||
|
|
||||||
|
// shouldEscape reports whether a rune should be escaped as part of the regex.
|
||||||
|
//
|
||||||
|
// This only includes characters that require escaping in regex but are also NOT valid filepath pattern characters.
|
||||||
|
// Additionally, '\' is not excluded because there is specific logic to properly handle this, as it's a path separator
|
||||||
|
// on Windows.
|
||||||
|
//
|
||||||
|
// Adapted from regexp::QuoteMeta in go stdlib.
|
||||||
|
// See https://cs.opensource.google/go/go/+/refs/tags/go1.17.2:src/regexp/regexp.go;l=703-715;drc=refs%2Ftags%2Fgo1.17.2
|
||||||
|
func shouldEscape(b rune) bool {
|
||||||
|
return b < utf8.RuneSelf && escapeBytes[b%8]&(1<<(b/8)) != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
for _, b := range []byte(`.+()|{}$`) {
|
||||||
|
escapeBytes[b%8] |= 1 << (b / 8)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// PatternMatcher allows checking paths against a list of patterns
|
// PatternMatcher allows checking paths against a list of patterns
|
||||||
type PatternMatcher struct {
|
type PatternMatcher struct {
|
||||||
patterns []*Pattern
|
patterns []*Pattern
|
||||||
|
@ -55,8 +77,16 @@ func NewPatternMatcher(patterns []string) (*PatternMatcher, error) {
|
||||||
return pm, nil
|
return pm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Matches matches path against all the patterns. Matches is not safe to be
|
// Matches returns true if "file" matches any of the patterns
|
||||||
// called concurrently
|
// and isn't excluded by any of the subsequent patterns.
|
||||||
|
//
|
||||||
|
// The "file" argument should be a slash-delimited path.
|
||||||
|
//
|
||||||
|
// Matches is not safe to call concurrently.
|
||||||
|
//
|
||||||
|
// Deprecated: This implementation is buggy (it only checks a single parent dir
|
||||||
|
// against the pattern) and will be removed soon. Use either
|
||||||
|
// MatchesOrParentMatches or MatchesUsingParentResults instead.
|
||||||
func (pm *PatternMatcher) Matches(file string) (bool, error) {
|
func (pm *PatternMatcher) Matches(file string) (bool, error) {
|
||||||
matched := false
|
matched := false
|
||||||
file = filepath.FromSlash(file)
|
file = filepath.FromSlash(file)
|
||||||
|
@ -64,10 +94,11 @@ func (pm *PatternMatcher) Matches(file string) (bool, error) {
|
||||||
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
|
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
|
||||||
|
|
||||||
for _, pattern := range pm.patterns {
|
for _, pattern := range pm.patterns {
|
||||||
negative := false
|
// Skip evaluation if this is an inclusion and the filename
|
||||||
|
// already matched the pattern, or it's an exclusion and it has
|
||||||
if pattern.exclusion {
|
// not matched the pattern yet.
|
||||||
negative = true
|
if pattern.exclusion != matched {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
match, err := pattern.match(file)
|
match, err := pattern.match(file)
|
||||||
|
@ -83,13 +114,165 @@ func (pm *PatternMatcher) Matches(file string) (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if match {
|
if match {
|
||||||
matched = !negative
|
matched = !pattern.exclusion
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return matched, nil
|
return matched, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MatchesOrParentMatches returns true if "file" matches any of the patterns
|
||||||
|
// and isn't excluded by any of the subsequent patterns.
|
||||||
|
//
|
||||||
|
// The "file" argument should be a slash-delimited path.
|
||||||
|
//
|
||||||
|
// Matches is not safe to call concurrently.
|
||||||
|
func (pm *PatternMatcher) MatchesOrParentMatches(file string) (bool, error) {
|
||||||
|
matched := false
|
||||||
|
file = filepath.FromSlash(file)
|
||||||
|
parentPath := filepath.Dir(file)
|
||||||
|
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
|
||||||
|
|
||||||
|
for _, pattern := range pm.patterns {
|
||||||
|
// Skip evaluation if this is an inclusion and the filename
|
||||||
|
// already matched the pattern, or it's an exclusion and it has
|
||||||
|
// not matched the pattern yet.
|
||||||
|
if pattern.exclusion != matched {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
match, err := pattern.match(file)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !match && parentPath != "." {
|
||||||
|
// Check to see if the pattern matches one of our parent dirs.
|
||||||
|
for i := range parentPathDirs {
|
||||||
|
match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator)))
|
||||||
|
if match {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if match {
|
||||||
|
matched = !pattern.exclusion
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return matched, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchesUsingParentResult returns true if "file" matches any of the patterns
|
||||||
|
// and isn't excluded by any of the subsequent patterns. The functionality is
|
||||||
|
// the same as Matches, but as an optimization, the caller keeps track of
|
||||||
|
// whether the parent directory matched.
|
||||||
|
//
|
||||||
|
// The "file" argument should be a slash-delimited path.
|
||||||
|
//
|
||||||
|
// MatchesUsingParentResult is not safe to call concurrently.
|
||||||
|
//
|
||||||
|
// Deprecated: this function does behave correctly in some cases (see
|
||||||
|
// https://github.com/docker/buildx/issues/850).
|
||||||
|
//
|
||||||
|
// Use MatchesUsingParentResults instead.
|
||||||
|
func (pm *PatternMatcher) MatchesUsingParentResult(file string, parentMatched bool) (bool, error) {
|
||||||
|
matched := parentMatched
|
||||||
|
file = filepath.FromSlash(file)
|
||||||
|
|
||||||
|
for _, pattern := range pm.patterns {
|
||||||
|
// Skip evaluation if this is an inclusion and the filename
|
||||||
|
// already matched the pattern, or it's an exclusion and it has
|
||||||
|
// not matched the pattern yet.
|
||||||
|
if pattern.exclusion != matched {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
match, err := pattern.match(file)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if match {
|
||||||
|
matched = !pattern.exclusion
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return matched, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchInfo tracks information about parent dir matches while traversing a
|
||||||
|
// filesystem.
|
||||||
|
type MatchInfo struct {
|
||||||
|
parentMatched []bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchesUsingParentResults returns true if "file" matches any of the patterns
|
||||||
|
// and isn't excluded by any of the subsequent patterns. The functionality is
|
||||||
|
// the same as Matches, but as an optimization, the caller passes in
|
||||||
|
// intermediate results from matching the parent directory.
|
||||||
|
//
|
||||||
|
// The "file" argument should be a slash-delimited path.
|
||||||
|
//
|
||||||
|
// MatchesUsingParentResults is not safe to call concurrently.
|
||||||
|
func (pm *PatternMatcher) MatchesUsingParentResults(file string, parentMatchInfo MatchInfo) (bool, MatchInfo, error) {
|
||||||
|
parentMatched := parentMatchInfo.parentMatched
|
||||||
|
if len(parentMatched) != 0 && len(parentMatched) != len(pm.patterns) {
|
||||||
|
return false, MatchInfo{}, errors.New("wrong number of values in parentMatched")
|
||||||
|
}
|
||||||
|
|
||||||
|
file = filepath.FromSlash(file)
|
||||||
|
matched := false
|
||||||
|
|
||||||
|
matchInfo := MatchInfo{
|
||||||
|
parentMatched: make([]bool, len(pm.patterns)),
|
||||||
|
}
|
||||||
|
for i, pattern := range pm.patterns {
|
||||||
|
match := false
|
||||||
|
// If the parent matched this pattern, we don't need to recheck.
|
||||||
|
if len(parentMatched) != 0 {
|
||||||
|
match = parentMatched[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
if !match {
|
||||||
|
// Skip evaluation if this is an inclusion and the filename
|
||||||
|
// already matched the pattern, or it's an exclusion and it has
|
||||||
|
// not matched the pattern yet.
|
||||||
|
if pattern.exclusion != matched {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
match, err = pattern.match(file)
|
||||||
|
if err != nil {
|
||||||
|
return false, matchInfo, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the zero value of MatchInfo was passed in, we don't have
|
||||||
|
// any information about the parent dir's match results, and we
|
||||||
|
// apply the same logic as MatchesOrParentMatches.
|
||||||
|
if !match && len(parentMatched) == 0 {
|
||||||
|
if parentPath := filepath.Dir(file); parentPath != "." {
|
||||||
|
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
|
||||||
|
// Check to see if the pattern matches one of our parent dirs.
|
||||||
|
for i := range parentPathDirs {
|
||||||
|
match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator)))
|
||||||
|
if match {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
matchInfo.parentMatched[i] = match
|
||||||
|
|
||||||
|
if match {
|
||||||
|
matched = !pattern.exclusion
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return matched, matchInfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Exclusions returns true if any of the patterns define exclusions
|
// Exclusions returns true if any of the patterns define exclusions
|
||||||
func (pm *PatternMatcher) Exclusions() bool {
|
func (pm *PatternMatcher) Exclusions() bool {
|
||||||
return pm.exclusions
|
return pm.exclusions
|
||||||
|
@ -102,12 +285,23 @@ func (pm *PatternMatcher) Patterns() []*Pattern {
|
||||||
|
|
||||||
// Pattern defines a single regexp used to filter file paths.
|
// Pattern defines a single regexp used to filter file paths.
|
||||||
type Pattern struct {
|
type Pattern struct {
|
||||||
|
matchType matchType
|
||||||
cleanedPattern string
|
cleanedPattern string
|
||||||
dirs []string
|
dirs []string
|
||||||
regexp *regexp.Regexp
|
regexp *regexp.Regexp
|
||||||
exclusion bool
|
exclusion bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type matchType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
unknownMatch matchType = iota
|
||||||
|
exactMatch
|
||||||
|
prefixMatch
|
||||||
|
suffixMatch
|
||||||
|
regexpMatch
|
||||||
|
)
|
||||||
|
|
||||||
func (p *Pattern) String() string {
|
func (p *Pattern) String() string {
|
||||||
return p.cleanedPattern
|
return p.cleanedPattern
|
||||||
}
|
}
|
||||||
|
@ -118,19 +312,34 @@ func (p *Pattern) Exclusion() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pattern) match(path string) (bool, error) {
|
func (p *Pattern) match(path string) (bool, error) {
|
||||||
|
if p.matchType == unknownMatch {
|
||||||
if p.regexp == nil {
|
if err := p.compile(string(os.PathSeparator)); err != nil {
|
||||||
if err := p.compile(); err != nil {
|
|
||||||
return false, filepath.ErrBadPattern
|
return false, filepath.ErrBadPattern
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
b := p.regexp.MatchString(path)
|
switch p.matchType {
|
||||||
|
case exactMatch:
|
||||||
|
return path == p.cleanedPattern, nil
|
||||||
|
case prefixMatch:
|
||||||
|
// strip trailing **
|
||||||
|
return strings.HasPrefix(path, p.cleanedPattern[:len(p.cleanedPattern)-2]), nil
|
||||||
|
case suffixMatch:
|
||||||
|
// strip leading **
|
||||||
|
suffix := p.cleanedPattern[2:]
|
||||||
|
if strings.HasSuffix(path, suffix) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
// **/foo matches "foo"
|
||||||
|
return suffix[0] == os.PathSeparator && path == suffix[1:], nil
|
||||||
|
case regexpMatch:
|
||||||
|
return p.regexp.MatchString(path), nil
|
||||||
|
}
|
||||||
|
|
||||||
return b, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pattern) compile() error {
|
func (p *Pattern) compile(sl string) error {
|
||||||
regStr := "^"
|
regStr := "^"
|
||||||
pattern := p.cleanedPattern
|
pattern := p.cleanedPattern
|
||||||
// Go through the pattern and convert it to a regexp.
|
// Go through the pattern and convert it to a regexp.
|
||||||
|
@ -138,13 +347,13 @@ func (p *Pattern) compile() error {
|
||||||
var scan scanner.Scanner
|
var scan scanner.Scanner
|
||||||
scan.Init(strings.NewReader(pattern))
|
scan.Init(strings.NewReader(pattern))
|
||||||
|
|
||||||
sl := string(os.PathSeparator)
|
|
||||||
escSL := sl
|
escSL := sl
|
||||||
if sl == `\` {
|
if sl == `\` {
|
||||||
escSL += `\`
|
escSL += `\`
|
||||||
}
|
}
|
||||||
|
|
||||||
for scan.Peek() != scanner.EOF {
|
p.matchType = exactMatch
|
||||||
|
for i := 0; scan.Peek() != scanner.EOF; i++ {
|
||||||
ch := scan.Next()
|
ch := scan.Next()
|
||||||
|
|
||||||
if ch == '*' {
|
if ch == '*' {
|
||||||
|
@ -159,21 +368,33 @@ func (p *Pattern) compile() error {
|
||||||
|
|
||||||
if scan.Peek() == scanner.EOF {
|
if scan.Peek() == scanner.EOF {
|
||||||
// is "**EOF" - to align with .gitignore just accept all
|
// is "**EOF" - to align with .gitignore just accept all
|
||||||
|
if p.matchType == exactMatch {
|
||||||
|
p.matchType = prefixMatch
|
||||||
|
} else {
|
||||||
regStr += ".*"
|
regStr += ".*"
|
||||||
|
p.matchType = regexpMatch
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// is "**"
|
// is "**"
|
||||||
// Note that this allows for any # of /'s (even 0) because
|
// Note that this allows for any # of /'s (even 0) because
|
||||||
// the .* will eat everything, even /'s
|
// the .* will eat everything, even /'s
|
||||||
regStr += "(.*" + escSL + ")?"
|
regStr += "(.*" + escSL + ")?"
|
||||||
|
p.matchType = regexpMatch
|
||||||
|
}
|
||||||
|
|
||||||
|
if i == 0 {
|
||||||
|
p.matchType = suffixMatch
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// is "*" so map it to anything but "/"
|
// is "*" so map it to anything but "/"
|
||||||
regStr += "[^" + escSL + "]*"
|
regStr += "[^" + escSL + "]*"
|
||||||
|
p.matchType = regexpMatch
|
||||||
}
|
}
|
||||||
} else if ch == '?' {
|
} else if ch == '?' {
|
||||||
// "?" is any char except "/"
|
// "?" is any char except "/"
|
||||||
regStr += "[^" + escSL + "]"
|
regStr += "[^" + escSL + "]"
|
||||||
} else if ch == '.' || ch == '$' {
|
p.matchType = regexpMatch
|
||||||
|
} else if shouldEscape(ch) {
|
||||||
// Escape some regexp special chars that have no meaning
|
// Escape some regexp special chars that have no meaning
|
||||||
// in golang's filepath.Match
|
// in golang's filepath.Match
|
||||||
regStr += `\` + string(ch)
|
regStr += `\` + string(ch)
|
||||||
|
@ -189,14 +410,22 @@ func (p *Pattern) compile() error {
|
||||||
}
|
}
|
||||||
if scan.Peek() != scanner.EOF {
|
if scan.Peek() != scanner.EOF {
|
||||||
regStr += `\` + string(scan.Next())
|
regStr += `\` + string(scan.Next())
|
||||||
|
p.matchType = regexpMatch
|
||||||
} else {
|
} else {
|
||||||
regStr += `\`
|
regStr += `\`
|
||||||
}
|
}
|
||||||
|
} else if ch == '[' || ch == ']' {
|
||||||
|
regStr += string(ch)
|
||||||
|
p.matchType = regexpMatch
|
||||||
} else {
|
} else {
|
||||||
regStr += string(ch)
|
regStr += string(ch)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if p.matchType != regexpMatch {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
regStr += "$"
|
regStr += "$"
|
||||||
|
|
||||||
re, err := regexp.Compile(regStr)
|
re, err := regexp.Compile(regStr)
|
||||||
|
@ -205,11 +434,15 @@ func (p *Pattern) compile() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
p.regexp = re
|
p.regexp = re
|
||||||
|
p.matchType = regexpMatch
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Matches returns true if file matches any of the patterns
|
// Matches returns true if file matches any of the patterns
|
||||||
// and isn't excluded by any of the subsequent patterns.
|
// and isn't excluded by any of the subsequent patterns.
|
||||||
|
//
|
||||||
|
// This implementation is buggy (it only checks a single parent dir against the
|
||||||
|
// pattern) and will be removed soon. Use MatchesOrParentMatches instead.
|
||||||
func Matches(file string, patterns []string) (bool, error) {
|
func Matches(file string, patterns []string) (bool, error) {
|
||||||
pm, err := NewPatternMatcher(patterns)
|
pm, err := NewPatternMatcher(patterns)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -225,6 +458,23 @@ func Matches(file string, patterns []string) (bool, error) {
|
||||||
return pm.Matches(file)
|
return pm.Matches(file)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MatchesOrParentMatches returns true if file matches any of the patterns
|
||||||
|
// and isn't excluded by any of the subsequent patterns.
|
||||||
|
func MatchesOrParentMatches(file string, patterns []string) (bool, error) {
|
||||||
|
pm, err := NewPatternMatcher(patterns)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
file = filepath.Clean(file)
|
||||||
|
|
||||||
|
if file == "." {
|
||||||
|
// Don't let them exclude everything, kind of silly.
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return pm.MatchesOrParentMatches(file)
|
||||||
|
}
|
||||||
|
|
||||||
// CopyFile copies from src to dst until either EOF is reached
|
// CopyFile copies from src to dst until either EOF is reached
|
||||||
// on src or an error occurs. It verifies src exists and removes
|
// on src or an error occurs. It verifies src exists and removes
|
||||||
// the dst if it exists.
|
// the dst if it exists.
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
|
//go:build linux || freebsd
|
||||||
// +build linux freebsd
|
// +build linux freebsd
|
||||||
|
|
||||||
package fileutils // import "github.com/docker/docker/pkg/fileutils"
|
package fileutils // import "github.com/docker/docker/pkg/fileutils"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
@ -13,7 +13,7 @@ import (
|
||||||
// GetTotalUsedFds Returns the number of used File Descriptors by
|
// GetTotalUsedFds Returns the number of used File Descriptors by
|
||||||
// reading it via /proc filesystem.
|
// reading it via /proc filesystem.
|
||||||
func GetTotalUsedFds() int {
|
func GetTotalUsedFds() int {
|
||||||
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
|
if fds, err := os.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
|
||||||
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
|
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
|
||||||
} else {
|
} else {
|
||||||
return len(fds)
|
return len(fds)
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !linux
|
||||||
// +build !linux
|
// +build !linux
|
||||||
|
|
||||||
package homedir // import "github.com/docker/docker/pkg/homedir"
|
package homedir // import "github.com/docker/docker/pkg/homedir"
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package homedir // import "github.com/docker/docker/pkg/homedir"
|
package homedir // import "github.com/docker/docker/pkg/homedir"
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package idtools // import "github.com/docker/docker/pkg/idtools"
|
package idtools // import "github.com/docker/docker/pkg/idtools"
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !linux
|
||||||
// +build !linux
|
// +build !linux
|
||||||
|
|
||||||
package idtools // import "github.com/docker/docker/pkg/idtools"
|
package idtools // import "github.com/docker/docker/pkg/idtools"
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package idtools // import "github.com/docker/docker/pkg/idtools"
|
package idtools // import "github.com/docker/docker/pkg/idtools"
|
||||||
|
|
|
@ -50,12 +50,12 @@ func NewBytesPipe() *BytesPipe {
|
||||||
// It can allocate new []byte slices in a process of writing.
|
// It can allocate new []byte slices in a process of writing.
|
||||||
func (bp *BytesPipe) Write(p []byte) (int, error) {
|
func (bp *BytesPipe) Write(p []byte) (int, error) {
|
||||||
bp.mu.Lock()
|
bp.mu.Lock()
|
||||||
|
defer bp.mu.Unlock()
|
||||||
|
|
||||||
written := 0
|
written := 0
|
||||||
loop0:
|
loop0:
|
||||||
for {
|
for {
|
||||||
if bp.closeErr != nil {
|
if bp.closeErr != nil {
|
||||||
bp.mu.Unlock()
|
|
||||||
return written, ErrClosed
|
return written, ErrClosed
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,7 +72,6 @@ loop0:
|
||||||
// errBufferFull is an error we expect to get if the buffer is full
|
// errBufferFull is an error we expect to get if the buffer is full
|
||||||
if err != nil && err != errBufferFull {
|
if err != nil && err != errBufferFull {
|
||||||
bp.wait.Broadcast()
|
bp.wait.Broadcast()
|
||||||
bp.mu.Unlock()
|
|
||||||
return written, err
|
return written, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,7 +99,6 @@ loop0:
|
||||||
bp.buf = append(bp.buf, getBuffer(nextCap))
|
bp.buf = append(bp.buf, getBuffer(nextCap))
|
||||||
}
|
}
|
||||||
bp.wait.Broadcast()
|
bp.wait.Broadcast()
|
||||||
bp.mu.Unlock()
|
|
||||||
return written, nil
|
return written, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -126,17 +124,14 @@ func (bp *BytesPipe) Close() error {
|
||||||
// Data could be read only once.
|
// Data could be read only once.
|
||||||
func (bp *BytesPipe) Read(p []byte) (n int, err error) {
|
func (bp *BytesPipe) Read(p []byte) (n int, err error) {
|
||||||
bp.mu.Lock()
|
bp.mu.Lock()
|
||||||
|
defer bp.mu.Unlock()
|
||||||
if bp.bufLen == 0 {
|
if bp.bufLen == 0 {
|
||||||
if bp.closeErr != nil {
|
if bp.closeErr != nil {
|
||||||
err := bp.closeErr
|
return 0, bp.closeErr
|
||||||
bp.mu.Unlock()
|
|
||||||
return 0, err
|
|
||||||
}
|
}
|
||||||
bp.wait.Wait()
|
bp.wait.Wait()
|
||||||
if bp.bufLen == 0 && bp.closeErr != nil {
|
if bp.bufLen == 0 && bp.closeErr != nil {
|
||||||
err := bp.closeErr
|
return 0, bp.closeErr
|
||||||
bp.mu.Unlock()
|
|
||||||
return 0, err
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -161,7 +156,6 @@ func (bp *BytesPipe) Read(p []byte) (n int, err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bp.wait.Broadcast()
|
bp.wait.Broadcast()
|
||||||
bp.mu.Unlock()
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,6 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
)
|
)
|
||||||
|
@ -11,7 +10,7 @@ import (
|
||||||
// temporary file and closing it atomically changes the temporary file to
|
// temporary file and closing it atomically changes the temporary file to
|
||||||
// destination path. Writing and closing concurrently is not allowed.
|
// destination path. Writing and closing concurrently is not allowed.
|
||||||
func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
|
func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
|
||||||
f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
|
f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -94,7 +93,7 @@ type AtomicWriteSet struct {
|
||||||
// commit. If no temporary directory is given the system
|
// commit. If no temporary directory is given the system
|
||||||
// default is used.
|
// default is used.
|
||||||
func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) {
|
func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) {
|
||||||
td, err := ioutil.TempDir(tmpDir, "write-set-")
|
td, err := os.MkdirTemp(tmpDir, "write-set-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,9 +2,12 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
|
// make sure crypto.SHA256, crypto.sha512 and crypto.SHA384 are registered
|
||||||
|
// TODO remove once https://github.com/opencontainers/go-digest/pull/64 is merged.
|
||||||
|
_ "crypto/sha256"
|
||||||
|
_ "crypto/sha512"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser
|
// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser
|
||||||
|
@ -49,15 +52,6 @@ func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// HashData returns the sha256 sum of src.
|
|
||||||
func HashData(src io.Reader) (string, error) {
|
|
||||||
h := sha256.New()
|
|
||||||
if _, err := io.Copy(h, src); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnEOFReader wraps an io.ReadCloser and a function
|
// OnEOFReader wraps an io.ReadCloser and a function
|
||||||
// the function will run at the end of file or close the file.
|
// the function will run at the end of file or close the file.
|
||||||
type OnEOFReader struct {
|
type OnEOFReader struct {
|
||||||
|
|
|
@ -1,10 +1,11 @@
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package ioutils // import "github.com/docker/docker/pkg/ioutils"
|
package ioutils // import "github.com/docker/docker/pkg/ioutils"
|
||||||
|
|
||||||
import "io/ioutil"
|
import "os"
|
||||||
|
|
||||||
// TempDir on Unix systems is equivalent to ioutil.TempDir.
|
// TempDir on Unix systems is equivalent to os.MkdirTemp.
|
||||||
func TempDir(dir, prefix string) (string, error) {
|
func TempDir(dir, prefix string) (string, error) {
|
||||||
return ioutil.TempDir(dir, prefix)
|
return os.MkdirTemp(dir, prefix)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,14 +1,14 @@
|
||||||
package ioutils // import "github.com/docker/docker/pkg/ioutils"
|
package ioutils // import "github.com/docker/docker/pkg/ioutils"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
"os"
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/longpath"
|
"github.com/docker/docker/pkg/longpath"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
|
// TempDir is the equivalent of os.MkdirTemp, except that the result is in Windows longpath format.
|
||||||
func TempDir(dir, prefix string) (string, error) {
|
func TempDir(dir, prefix string) (string, error) {
|
||||||
tempDir, err := ioutil.TempDir(dir, prefix)
|
tempDir, err := os.MkdirTemp(dir, prefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
package system // import "github.com/docker/docker/pkg/system"
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
package system // import "github.com/docker/docker/pkg/system"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
)
|
)
|
||||||
|
@ -63,5 +63,5 @@ func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, erro
|
||||||
// to find the pathname of the file. It is the caller's responsibility
|
// to find the pathname of the file. It is the caller's responsibility
|
||||||
// to remove the file when no longer needed.
|
// to remove the file when no longer needed.
|
||||||
func TempFileSequential(dir, prefix string) (f *os.File, err error) {
|
func TempFileSequential(dir, prefix string) (f *os.File, err error) {
|
||||||
return ioutil.TempFile(dir, prefix)
|
return os.CreateTemp(dir, prefix)
|
||||||
}
|
}
|
||||||
|
|
|
@ -258,7 +258,7 @@ func nextSuffix() string {
|
||||||
return strconv.Itoa(int(1e9 + r%1e9))[1:]
|
return strconv.Itoa(int(1e9 + r%1e9))[1:]
|
||||||
}
|
}
|
||||||
|
|
||||||
// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential
|
// TempFileSequential is a copy of os.CreateTemp, modified to use sequential
|
||||||
// file access. Below is the original comment from golang:
|
// file access. Below is the original comment from golang:
|
||||||
// TempFile creates a new temporary file in the directory dir
|
// TempFile creates a new temporary file in the directory dir
|
||||||
// with a name beginning with prefix, opens the file for reading
|
// with a name beginning with prefix, opens the file for reading
|
||||||
|
|
|
@ -1,29 +1,18 @@
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
package system // import "github.com/docker/docker/pkg/system"
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// containerdRuntimeSupported determines if ContainerD should be the runtime.
|
// containerdRuntimeSupported determines if containerd should be the runtime.
|
||||||
// As of March 2019, this is an experimental feature.
|
|
||||||
containerdRuntimeSupported = false
|
containerdRuntimeSupported = false
|
||||||
)
|
)
|
||||||
|
|
||||||
// InitContainerdRuntime sets whether to use ContainerD for runtime
|
// InitContainerdRuntime sets whether to use containerd for runtime on Windows.
|
||||||
// on Windows. This is an experimental feature still in development, and
|
func InitContainerdRuntime(cdPath string) {
|
||||||
// also requires an environment variable to be set (so as not to turn the
|
if len(cdPath) > 0 {
|
||||||
// feature on from simply experimental which would also mean LCOW.
|
|
||||||
func InitContainerdRuntime(experimental bool, cdPath string) {
|
|
||||||
if experimental && len(cdPath) > 0 && len(os.Getenv("DOCKER_WINDOWS_CONTAINERD_RUNTIME")) > 0 {
|
|
||||||
logrus.Warnf("Using ContainerD runtime. This feature is experimental")
|
|
||||||
containerdRuntimeSupported = true
|
containerdRuntimeSupported = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported.
|
// ContainerdRuntimeSupported returns true if the use of containerd runtime is supported.
|
||||||
func ContainerdRuntimeSupported() bool {
|
func ContainerdRuntimeSupported() bool {
|
||||||
return containerdRuntimeSupported
|
return containerdRuntimeSupported
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
package system // import "github.com/docker/docker/pkg/system"
|
||||||
|
|
|
@ -6,8 +6,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
units "github.com/docker/go-units"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ReadMemInfo retrieves memory statistics of the host system and returns a
|
// ReadMemInfo retrieves memory statistics of the host system and returns a
|
||||||
|
@ -42,7 +40,8 @@ func parseMemInfo(reader io.Reader) (*MemInfo, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
bytes := int64(size) * units.KiB
|
// Convert to KiB
|
||||||
|
bytes := int64(size) * 1024
|
||||||
|
|
||||||
switch parts[0] {
|
switch parts[0] {
|
||||||
case "MemTotal:":
|
case "MemTotal:":
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !linux && !windows
|
||||||
// +build !linux,!windows
|
// +build !linux,!windows
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
package system // import "github.com/docker/docker/pkg/system"
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
package system // import "github.com/docker/docker/pkg/system"
|
||||||
|
@ -6,12 +7,6 @@ import (
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Mknod creates a filesystem node (file, device special file or named pipe) named path
|
|
||||||
// with attributes specified by mode and dev.
|
|
||||||
func Mknod(path string, mode uint32, dev int) error {
|
|
||||||
return unix.Mknod(path, mode, dev)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
|
// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
|
||||||
// and minor number of the newly created device special file.
|
// and minor number of the newly created device special file.
|
||||||
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
|
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
//go:build freebsd
|
||||||
|
// +build freebsd
|
||||||
|
|
||||||
|
package system // import "github.com/docker/docker/pkg/system"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mknod creates a filesystem node (file, device special file or named pipe) named path
|
||||||
|
// with attributes specified by mode and dev.
|
||||||
|
func Mknod(path string, mode uint32, dev int) error {
|
||||||
|
return unix.Mknod(path, mode, uint64(dev))
|
||||||
|
}
|
|
@ -0,0 +1,14 @@
|
||||||
|
//go:build !freebsd && !windows
|
||||||
|
// +build !freebsd,!windows
|
||||||
|
|
||||||
|
package system // import "github.com/docker/docker/pkg/system"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mknod creates a filesystem node (file, device special file or named pipe) named path
|
||||||
|
// with attributes specified by mode and dev.
|
||||||
|
func Mknod(path string, mode uint32, dev int) error {
|
||||||
|
return unix.Mknod(path, mode, dev)
|
||||||
|
}
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
package system // import "github.com/docker/docker/pkg/system"
|
||||||
|
|
|
@ -1,10 +1,11 @@
|
||||||
|
//go:build linux || freebsd || darwin
|
||||||
// +build linux freebsd darwin
|
// +build linux freebsd darwin
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
package system // import "github.com/docker/docker/pkg/system"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
@ -30,7 +31,7 @@ func KillProcess(pid int) {
|
||||||
// http://man7.org/linux/man-pages/man5/proc.5.html
|
// http://man7.org/linux/man-pages/man5/proc.5.html
|
||||||
func IsProcessZombie(pid int) (bool, error) {
|
func IsProcessZombie(pid int) (bool, error) {
|
||||||
statPath := fmt.Sprintf("/proc/%d/stat", pid)
|
statPath := fmt.Sprintf("/proc/%d/stat", pid)
|
||||||
dataBytes, err := ioutil.ReadFile(statPath)
|
dataBytes, err := os.ReadFile(statPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,78 +0,0 @@
|
||||||
// +build !darwin,!windows
|
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/moby/sys/mount"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can
|
|
||||||
// often be remedied.
|
|
||||||
// Only use `EnsureRemoveAll` if you really want to make every effort to remove
|
|
||||||
// a directory.
|
|
||||||
//
|
|
||||||
// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there
|
|
||||||
// can be a race between reading directory entries and then actually attempting
|
|
||||||
// to remove everything in the directory.
|
|
||||||
// These types of errors do not need to be returned since it's ok for the dir to
|
|
||||||
// be gone we can just retry the remove operation.
|
|
||||||
//
|
|
||||||
// This should not return a `os.ErrNotExist` kind of error under any circumstances
|
|
||||||
func EnsureRemoveAll(dir string) error {
|
|
||||||
notExistErr := make(map[string]bool)
|
|
||||||
|
|
||||||
// track retries
|
|
||||||
exitOnErr := make(map[string]int)
|
|
||||||
maxRetry := 50
|
|
||||||
|
|
||||||
// Attempt to unmount anything beneath this dir first
|
|
||||||
mount.RecursiveUnmount(dir)
|
|
||||||
|
|
||||||
for {
|
|
||||||
err := os.RemoveAll(dir)
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
pe, ok := err.(*os.PathError)
|
|
||||||
if !ok {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
if notExistErr[pe.Path] {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
notExistErr[pe.Path] = true
|
|
||||||
|
|
||||||
// There is a race where some subdir can be removed but after the parent
|
|
||||||
// dir entries have been read.
|
|
||||||
// So the path could be from `os.Remove(subdir)`
|
|
||||||
// If the reported non-existent path is not the passed in `dir` we
|
|
||||||
// should just retry, but otherwise return with no error.
|
|
||||||
if pe.Path == dir {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if pe.Err != syscall.EBUSY {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if e := mount.Unmount(pe.Path); e != nil {
|
|
||||||
return errors.Wrapf(e, "error while removing %s", dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exitOnErr[pe.Path] == maxRetry {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
exitOnErr[pe.Path]++
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,6 +0,0 @@
|
||||||
package system
|
|
||||||
|
|
||||||
import "os"
|
|
||||||
|
|
||||||
// EnsureRemoveAll is an alias to os.RemoveAll on Windows
|
|
||||||
var EnsureRemoveAll = os.RemoveAll
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build freebsd || netbsd
|
||||||
// +build freebsd netbsd
|
// +build freebsd netbsd
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
package system // import "github.com/docker/docker/pkg/system"
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
package system // import "github.com/docker/docker/pkg/system"
|
||||||
|
|
|
@ -1,65 +1,23 @@
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
package system // import "github.com/docker/docker/pkg/system"
|
||||||
|
|
||||||
import (
|
import "golang.org/x/sys/windows"
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Deprecated: use github.com/docker/pkg/idtools.SeTakeOwnershipPrivilege
|
// Deprecated: use github.com/docker/pkg/idtools.SeTakeOwnershipPrivilege
|
||||||
SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege"
|
SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege"
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Deprecated: use github.com/docker/pkg/idtools.ContainerAdministratorSidString
|
// Deprecated: use github.com/docker/pkg/idtools.ContainerAdministratorSidString
|
||||||
ContainerAdministratorSidString = "S-1-5-93-2-1"
|
ContainerAdministratorSidString = "S-1-5-93-2-1"
|
||||||
// Deprecated: use github.com/docker/pkg/idtools.ContainerUserSidString
|
// Deprecated: use github.com/docker/pkg/idtools.ContainerUserSidString
|
||||||
ContainerUserSidString = "S-1-5-93-2-2"
|
ContainerUserSidString = "S-1-5-93-2-2"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
// VER_NT_WORKSTATION, see https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-osversioninfoexa
|
||||||
ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0")
|
const verNTWorkstation = 0x00000001 // VER_NT_WORKSTATION
|
||||||
procGetVersionExW = modkernel32.NewProc("GetVersionExW")
|
|
||||||
)
|
|
||||||
|
|
||||||
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-osversioninfoexa
|
|
||||||
// TODO: use golang.org/x/sys/windows.OsVersionInfoEx (needs OSVersionInfoSize to be exported)
|
|
||||||
type osVersionInfoEx struct {
|
|
||||||
OSVersionInfoSize uint32
|
|
||||||
MajorVersion uint32
|
|
||||||
MinorVersion uint32
|
|
||||||
BuildNumber uint32
|
|
||||||
PlatformID uint32
|
|
||||||
CSDVersion [128]uint16
|
|
||||||
ServicePackMajor uint16
|
|
||||||
ServicePackMinor uint16
|
|
||||||
SuiteMask uint16
|
|
||||||
ProductType byte
|
|
||||||
Reserve byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsWindowsClient returns true if the SKU is client. It returns false on
|
// IsWindowsClient returns true if the SKU is client. It returns false on
|
||||||
// Windows server, or if an error occurred when making the GetVersionExW
|
// Windows server, or if an error occurred when making the GetVersionExW
|
||||||
// syscall.
|
// syscall.
|
||||||
func IsWindowsClient() bool {
|
func IsWindowsClient() bool {
|
||||||
osviex := &osVersionInfoEx{OSVersionInfoSize: 284}
|
ver := windows.RtlGetVersion()
|
||||||
r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex)))
|
return ver != nil && ver.ProductType == verNTWorkstation
|
||||||
if r1 == 0 {
|
|
||||||
logrus.WithError(err).Warn("GetVersionExW failed - assuming server SKU")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// VER_NT_WORKSTATION, see https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-osversioninfoexa
|
|
||||||
const verNTWorkstation = 0x00000001 // VER_NT_WORKSTATION
|
|
||||||
return osviex.ProductType == verNTWorkstation
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasWin32KSupport determines whether containers that depend on win32k can
|
|
||||||
// run on this machine. Win32k is the driver used to implement windowing.
|
|
||||||
func HasWin32KSupport() bool {
|
|
||||||
// For now, check for ntuser API support on the host. In the future, a host
|
|
||||||
// may support win32k in containers even if the host does not support ntuser
|
|
||||||
// APIs.
|
|
||||||
return ntuserApiset.Load() == nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
package system // import "github.com/docker/docker/pkg/system"
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build linux || freebsd
|
||||||
// +build linux freebsd
|
// +build linux freebsd
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
package system // import "github.com/docker/docker/pkg/system"
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !linux && !freebsd
|
||||||
// +build !linux,!freebsd
|
// +build !linux,!freebsd
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
package system // import "github.com/docker/docker/pkg/system"
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !linux
|
||||||
// +build !linux
|
// +build !linux
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
package system // import "github.com/docker/docker/pkg/system"
|
||||||
|
|
|
@ -26,10 +26,24 @@ type serviceConfig struct {
|
||||||
registrytypes.ServiceConfig
|
registrytypes.ServiceConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(thaJeztah) both the "index.docker.io" and "registry-1.docker.io" domains
|
||||||
|
// are here for historic reasons and backward-compatibility. These domains
|
||||||
|
// are still supported by Docker Hub (and will continue to be supported), but
|
||||||
|
// there are new domains already in use, and plans to consolidate all legacy
|
||||||
|
// domains to new "canonical" domains. Once those domains are decided on, we
|
||||||
|
// should update these consts (but making sure to preserve compatibility with
|
||||||
|
// existing installs, clients, and user configuration).
|
||||||
const (
|
const (
|
||||||
// DefaultNamespace is the default namespace
|
// DefaultNamespace is the default namespace
|
||||||
DefaultNamespace = "docker.io"
|
DefaultNamespace = "docker.io"
|
||||||
// IndexHostname is the index hostname
|
// DefaultRegistryHost is the hostname for the default (Docker Hub) registry
|
||||||
|
// used for pushing and pulling images. This hostname is hard-coded to handle
|
||||||
|
// the conversion from image references without registry name (e.g. "ubuntu",
|
||||||
|
// or "ubuntu:latest"), as well as references using the "docker.io" domain
|
||||||
|
// name, which is used as canonical reference for images on Docker Hub, but
|
||||||
|
// does not match the domain-name of Docker Hub's registry.
|
||||||
|
DefaultRegistryHost = "registry-1.docker.io"
|
||||||
|
// IndexHostname is the index hostname, used for authentication and image search.
|
||||||
IndexHostname = "index.docker.io"
|
IndexHostname = "index.docker.io"
|
||||||
// IndexServer is used for user auth and image search
|
// IndexServer is used for user auth and image search
|
||||||
IndexServer = "https://" + IndexHostname + "/v1/"
|
IndexServer = "https://" + IndexHostname + "/v1/"
|
||||||
|
@ -38,10 +52,10 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// DefaultV2Registry is the URI of the default v2 registry
|
// DefaultV2Registry is the URI of the default (Docker Hub) registry.
|
||||||
DefaultV2Registry = &url.URL{
|
DefaultV2Registry = &url.URL{
|
||||||
Scheme: "https",
|
Scheme: "https",
|
||||||
Host: "registry-1.docker.io",
|
Host: DefaultRegistryHost,
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrInvalidRepositoryName is an error returned if the repository name did
|
// ErrInvalidRepositoryName is an error returned if the repository name did
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package registry // import "github.com/docker/docker/registry"
|
package registry // import "github.com/docker/docker/registry"
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -34,7 +34,8 @@ func NewV1Endpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := validateEndpoint(endpoint); err != nil {
|
err = validateEndpoint(endpoint)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -68,20 +69,6 @@ func validateEndpoint(endpoint *V1Endpoint) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) *V1Endpoint {
|
|
||||||
endpoint := &V1Endpoint{
|
|
||||||
IsSecure: tlsConfig == nil || !tlsConfig.InsecureSkipVerify,
|
|
||||||
URL: new(url.URL),
|
|
||||||
}
|
|
||||||
|
|
||||||
*endpoint.URL = address
|
|
||||||
|
|
||||||
// TODO(tiborvass): make sure a ConnectTimeout transport is used
|
|
||||||
tr := NewTransport(tlsConfig)
|
|
||||||
endpoint.client = HTTPClient(transport.NewTransport(tr, Headers(userAgent, metaHeaders)...))
|
|
||||||
return endpoint
|
|
||||||
}
|
|
||||||
|
|
||||||
// trimV1Address trims the version off the address and returns the
|
// trimV1Address trims the version off the address and returns the
|
||||||
// trimmed address or an error if there is a non-V1 version.
|
// trimmed address or an error if there is a non-V1 version.
|
||||||
func trimV1Address(address string) (string, error) {
|
func trimV1Address(address string) (string, error) {
|
||||||
|
@ -90,10 +77,7 @@ func trimV1Address(address string) (string, error) {
|
||||||
apiVersionStr string
|
apiVersionStr string
|
||||||
)
|
)
|
||||||
|
|
||||||
if strings.HasSuffix(address, "/") {
|
address = strings.TrimSuffix(address, "/")
|
||||||
address = address[:len(address)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
chunks = strings.Split(address, "/")
|
chunks = strings.Split(address, "/")
|
||||||
apiVersionStr = chunks[len(chunks)-1]
|
apiVersionStr = chunks[len(chunks)-1]
|
||||||
if apiVersionStr == "v1" {
|
if apiVersionStr == "v1" {
|
||||||
|
@ -124,9 +108,14 @@ func newV1EndpointFromStr(address string, tlsConfig *tls.Config, userAgent strin
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoint := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders)
|
// TODO(tiborvass): make sure a ConnectTimeout transport is used
|
||||||
|
tr := NewTransport(tlsConfig)
|
||||||
|
|
||||||
return endpoint, nil
|
return &V1Endpoint{
|
||||||
|
IsSecure: tlsConfig == nil || !tlsConfig.InsecureSkipVerify,
|
||||||
|
URL: uri,
|
||||||
|
client: HTTPClient(transport.NewTransport(tr, Headers(userAgent, metaHeaders)...)),
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the formatted URL for the root of this registry Endpoint
|
// Get the formatted URL for the root of this registry Endpoint
|
||||||
|
@ -142,29 +131,28 @@ func (e *V1Endpoint) Path(path string) string {
|
||||||
|
|
||||||
// Ping returns a PingResult which indicates whether the registry is standalone or not.
|
// Ping returns a PingResult which indicates whether the registry is standalone or not.
|
||||||
func (e *V1Endpoint) Ping() (PingResult, error) {
|
func (e *V1Endpoint) Ping() (PingResult, error) {
|
||||||
logrus.Debugf("attempting v1 ping for registry endpoint %s", e)
|
|
||||||
|
|
||||||
if e.String() == IndexServer {
|
if e.String() == IndexServer {
|
||||||
// Skip the check, we know this one is valid
|
// Skip the check, we know this one is valid
|
||||||
// (and we never want to fallback to http in case of error)
|
// (and we never want to fallback to http in case of error)
|
||||||
return PingResult{Standalone: false}, nil
|
return PingResult{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logrus.Debugf("attempting v1 ping for registry endpoint %s", e)
|
||||||
req, err := http.NewRequest(http.MethodGet, e.Path("_ping"), nil)
|
req, err := http.NewRequest(http.MethodGet, e.Path("_ping"), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return PingResult{Standalone: false}, err
|
return PingResult{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := e.client.Do(req)
|
resp, err := e.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return PingResult{Standalone: false}, err
|
return PingResult{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
jsonString, err := ioutil.ReadAll(resp.Body)
|
jsonString, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err)
|
return PingResult{}, fmt.Errorf("error while reading the http response: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the header is absent, we assume true for compatibility with earlier
|
// If the header is absent, we assume true for compatibility with earlier
|
||||||
|
@ -177,13 +165,12 @@ func (e *V1Endpoint) Ping() (PingResult, error) {
|
||||||
// don't stop here. Just assume sane defaults
|
// don't stop here. Just assume sane defaults
|
||||||
}
|
}
|
||||||
if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" {
|
if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" {
|
||||||
logrus.Debugf("Registry version header: '%s'", hdr)
|
|
||||||
info.Version = hdr
|
info.Version = hdr
|
||||||
}
|
}
|
||||||
logrus.Debugf("PingResult.Version: %q", info.Version)
|
logrus.Debugf("PingResult.Version: %q", info.Version)
|
||||||
|
|
||||||
standalone := resp.Header.Get("X-Docker-Registry-Standalone")
|
standalone := resp.Header.Get("X-Docker-Registry-Standalone")
|
||||||
logrus.Debugf("Registry standalone header: '%s'", standalone)
|
|
||||||
// Accepted values are "true" (case-insensitive) and "1".
|
// Accepted values are "true" (case-insensitive) and "1".
|
||||||
if strings.EqualFold(standalone, "true") || standalone == "1" {
|
if strings.EqualFold(standalone, "true") || standalone == "1" {
|
||||||
info.Standalone = true
|
info.Standalone = true
|
||||||
|
|
|
@ -3,9 +3,7 @@ package registry // import "github.com/docker/docker/registry"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
@ -18,12 +16,6 @@ import (
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrAlreadyExists is an error returned if an image being pushed
|
|
||||||
// already exists on the remote side
|
|
||||||
ErrAlreadyExists = errors.New("Image already exists")
|
|
||||||
)
|
|
||||||
|
|
||||||
// HostCertsDir returns the config directory for a specific host
|
// HostCertsDir returns the config directory for a specific host
|
||||||
func HostCertsDir(hostname string) (string, error) {
|
func HostCertsDir(hostname string) (string, error) {
|
||||||
certsDir := CertsDir()
|
certsDir := CertsDir()
|
||||||
|
@ -54,7 +46,7 @@ func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) {
|
||||||
return tlsConfig, nil
|
return tlsConfig, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasFile(files []os.FileInfo, name string) bool {
|
func hasFile(files []os.DirEntry, name string) bool {
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
if f.Name() == name {
|
if f.Name() == name {
|
||||||
return true
|
return true
|
||||||
|
@ -67,7 +59,7 @@ func hasFile(files []os.FileInfo, name string) bool {
|
||||||
// including roots and certificate pairs and updates the
|
// including roots and certificate pairs and updates the
|
||||||
// provided TLS configuration.
|
// provided TLS configuration.
|
||||||
func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error {
|
func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error {
|
||||||
fs, err := ioutil.ReadDir(directory)
|
fs, err := os.ReadDir(directory)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -82,7 +74,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error {
|
||||||
tlsConfig.RootCAs = systemPool
|
tlsConfig.RootCAs = systemPool
|
||||||
}
|
}
|
||||||
logrus.Debugf("crt: %s", filepath.Join(directory, f.Name()))
|
logrus.Debugf("crt: %s", filepath.Join(directory, f.Name()))
|
||||||
data, err := ioutil.ReadFile(filepath.Join(directory, f.Name()))
|
data, err := os.ReadFile(filepath.Join(directory, f.Name()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -183,7 +175,6 @@ func NewTransport(tlsConfig *tls.Config) *http.Transport {
|
||||||
direct := &net.Dialer{
|
direct := &net.Dialer{
|
||||||
Timeout: 30 * time.Second,
|
Timeout: 30 * time.Second,
|
||||||
KeepAlive: 30 * time.Second,
|
KeepAlive: 30 * time.Second,
|
||||||
DualStack: true,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
base := &http.Transport{
|
base := &http.Transport{
|
||||||
|
|
|
@ -246,18 +246,12 @@ type APIEndpoint struct {
|
||||||
TLSConfig *tls.Config
|
TLSConfig *tls.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint
|
|
||||||
// Deprecated: this function is deprecated and will be removed in a future update
|
|
||||||
func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) *V1Endpoint {
|
|
||||||
return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TLSConfig constructs a client TLS configuration based on server defaults
|
// TLSConfig constructs a client TLS configuration based on server defaults
|
||||||
func (s *DefaultService) TLSConfig(hostname string) (*tls.Config, error) {
|
func (s *DefaultService) TLSConfig(hostname string) (*tls.Config, error) {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
return newTLSConfig(hostname, isSecureIndex(s.config, hostname))
|
return s.tlsConfig(hostname)
|
||||||
}
|
}
|
||||||
|
|
||||||
// tlsConfig constructs a client TLS configuration based on server defaults
|
// tlsConfig constructs a client TLS configuration based on server defaults
|
||||||
|
@ -265,10 +259,6 @@ func (s *DefaultService) tlsConfig(hostname string) (*tls.Config, error) {
|
||||||
return newTLSConfig(hostname, isSecureIndex(s.config, hostname))
|
return newTLSConfig(hostname, isSecureIndex(s.config, hostname))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *DefaultService) tlsConfigForMirror(mirrorURL *url.URL) (*tls.Config, error) {
|
|
||||||
return s.tlsConfig(mirrorURL.Host)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LookupPullEndpoints creates a list of v2 endpoints to try to pull from, in order of preference.
|
// LookupPullEndpoints creates a list of v2 endpoints to try to pull from, in order of preference.
|
||||||
// It gives preference to mirrors over the actual registry, and HTTPS over plain HTTP.
|
// It gives preference to mirrors over the actual registry, and HTTPS over plain HTTP.
|
||||||
func (s *DefaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) {
|
func (s *DefaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) {
|
||||||
|
|
|
@ -18,7 +18,7 @@ func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndp
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
mirrorTLSConfig, err := s.tlsConfigForMirror(mirrorURL)
|
mirrorTLSConfig, err := s.tlsConfig(mirrorURL.Host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,9 +45,8 @@ func (av APIVersion) String() string {
|
||||||
|
|
||||||
// API Version identifiers.
|
// API Version identifiers.
|
||||||
const (
|
const (
|
||||||
_ = iota
|
APIVersion1 APIVersion = 1
|
||||||
APIVersion1 APIVersion = iota
|
APIVersion2 APIVersion = 2
|
||||||
APIVersion2
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var apiVersions = map[APIVersion]string{
|
var apiVersions = map[APIVersion]string{
|
||||||
|
|
|
@ -0,0 +1,2 @@
|
||||||
|
* -text
|
||||||
|
*.bin -text -diff
|
|
@ -0,0 +1,25 @@
|
||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
*.exe
|
||||||
|
*.test
|
||||||
|
*.prof
|
||||||
|
/s2/cmd/_s2sx/sfx-exe
|
|
@ -0,0 +1,141 @@
|
||||||
|
# This is an example goreleaser.yaml file with some sane defaults.
|
||||||
|
# Make sure to check the documentation at http://goreleaser.com
|
||||||
|
before:
|
||||||
|
hooks:
|
||||||
|
- ./gen.sh
|
||||||
|
- go install mvdan.cc/garble@latest
|
||||||
|
|
||||||
|
builds:
|
||||||
|
-
|
||||||
|
id: "s2c"
|
||||||
|
binary: s2c
|
||||||
|
main: ./s2/cmd/s2c/main.go
|
||||||
|
flags:
|
||||||
|
- -trimpath
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos:
|
||||||
|
- aix
|
||||||
|
- linux
|
||||||
|
- freebsd
|
||||||
|
- netbsd
|
||||||
|
- windows
|
||||||
|
- darwin
|
||||||
|
goarch:
|
||||||
|
- 386
|
||||||
|
- amd64
|
||||||
|
- arm
|
||||||
|
- arm64
|
||||||
|
- ppc64
|
||||||
|
- ppc64le
|
||||||
|
- mips64
|
||||||
|
- mips64le
|
||||||
|
goarm:
|
||||||
|
- 7
|
||||||
|
gobinary: garble
|
||||||
|
-
|
||||||
|
id: "s2d"
|
||||||
|
binary: s2d
|
||||||
|
main: ./s2/cmd/s2d/main.go
|
||||||
|
flags:
|
||||||
|
- -trimpath
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos:
|
||||||
|
- aix
|
||||||
|
- linux
|
||||||
|
- freebsd
|
||||||
|
- netbsd
|
||||||
|
- windows
|
||||||
|
- darwin
|
||||||
|
goarch:
|
||||||
|
- 386
|
||||||
|
- amd64
|
||||||
|
- arm
|
||||||
|
- arm64
|
||||||
|
- ppc64
|
||||||
|
- ppc64le
|
||||||
|
- mips64
|
||||||
|
- mips64le
|
||||||
|
goarm:
|
||||||
|
- 7
|
||||||
|
gobinary: garble
|
||||||
|
-
|
||||||
|
id: "s2sx"
|
||||||
|
binary: s2sx
|
||||||
|
main: ./s2/cmd/_s2sx/main.go
|
||||||
|
flags:
|
||||||
|
- -modfile=s2sx.mod
|
||||||
|
- -trimpath
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos:
|
||||||
|
- aix
|
||||||
|
- linux
|
||||||
|
- freebsd
|
||||||
|
- netbsd
|
||||||
|
- windows
|
||||||
|
- darwin
|
||||||
|
goarch:
|
||||||
|
- 386
|
||||||
|
- amd64
|
||||||
|
- arm
|
||||||
|
- arm64
|
||||||
|
- ppc64
|
||||||
|
- ppc64le
|
||||||
|
- mips64
|
||||||
|
- mips64le
|
||||||
|
goarm:
|
||||||
|
- 7
|
||||||
|
gobinary: garble
|
||||||
|
|
||||||
|
archives:
|
||||||
|
-
|
||||||
|
id: s2-binaries
|
||||||
|
name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}"
|
||||||
|
replacements:
|
||||||
|
aix: AIX
|
||||||
|
darwin: OSX
|
||||||
|
linux: Linux
|
||||||
|
windows: Windows
|
||||||
|
386: i386
|
||||||
|
amd64: x86_64
|
||||||
|
freebsd: FreeBSD
|
||||||
|
netbsd: NetBSD
|
||||||
|
format_overrides:
|
||||||
|
- goos: windows
|
||||||
|
format: zip
|
||||||
|
files:
|
||||||
|
- unpack/*
|
||||||
|
- s2/LICENSE
|
||||||
|
- s2/README.md
|
||||||
|
checksum:
|
||||||
|
name_template: 'checksums.txt'
|
||||||
|
snapshot:
|
||||||
|
name_template: "{{ .Tag }}-next"
|
||||||
|
changelog:
|
||||||
|
sort: asc
|
||||||
|
filters:
|
||||||
|
exclude:
|
||||||
|
- '^doc:'
|
||||||
|
- '^docs:'
|
||||||
|
- '^test:'
|
||||||
|
- '^tests:'
|
||||||
|
- '^Update\sREADME.md'
|
||||||
|
|
||||||
|
nfpms:
|
||||||
|
-
|
||||||
|
file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||||
|
vendor: Klaus Post
|
||||||
|
homepage: https://github.com/klauspost/compress
|
||||||
|
maintainer: Klaus Post <klauspost@gmail.com>
|
||||||
|
description: S2 Compression Tool
|
||||||
|
license: BSD 3-Clause
|
||||||
|
formats:
|
||||||
|
- deb
|
||||||
|
- rpm
|
||||||
|
replacements:
|
||||||
|
darwin: Darwin
|
||||||
|
linux: Linux
|
||||||
|
freebsd: FreeBSD
|
||||||
|
amd64: x86_64
|
104
vendor/github.com/moby/sys/mountinfo/LICENSE → vendor/github.com/klauspost/compress/LICENSE
generated
vendored
104
vendor/github.com/moby/sys/mountinfo/LICENSE → vendor/github.com/klauspost/compress/LICENSE
generated
vendored
|
@ -1,3 +1,35 @@
|
||||||
|
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||||
|
Copyright (c) 2019 Klaus Post. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
------------------
|
||||||
|
|
||||||
|
Files: gzhttp/*
|
||||||
|
|
||||||
Apache License
|
Apache License
|
||||||
Version 2.0, January 2004
|
Version 2.0, January 2004
|
||||||
|
@ -187,7 +219,7 @@
|
||||||
same "printed page" as the copyright notice for easier
|
same "printed page" as the copyright notice for easier
|
||||||
identification within third-party archives.
|
identification within third-party archives.
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
Copyright 2016-2017 The New York Times Company
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
@ -200,3 +232,73 @@
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
See the License for the specific language governing permissions and
|
See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
|
|
||||||
|
------------------
|
||||||
|
|
||||||
|
Files: s2/cmd/internal/readahead/*
|
||||||
|
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2015 Klaus Post
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
|
||||||
|
---------------------
|
||||||
|
Files: snappy/*
|
||||||
|
Files: internal/snapref/*
|
||||||
|
|
||||||
|
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
Files: s2/cmd/internal/filepathx/*
|
||||||
|
|
||||||
|
Copyright 2016 The filepathx Authors
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,460 @@
|
||||||
|
# compress
|
||||||
|
|
||||||
|
This package provides various compression algorithms.
|
||||||
|
|
||||||
|
* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go.
|
||||||
|
* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy.
|
||||||
|
* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
|
||||||
|
* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams.
|
||||||
|
* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.
|
||||||
|
* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.
|
||||||
|
* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.
|
||||||
|
* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here.
|
||||||
|
|
||||||
|
[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)
|
||||||
|
[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml)
|
||||||
|
[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge)
|
||||||
|
|
||||||
|
# changelog
|
||||||
|
|
||||||
|
* Jan 25, 2022 (v1.14.2)
|
||||||
|
* zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476)
|
||||||
|
* zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469)
|
||||||
|
* zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470)
|
||||||
|
* zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472)
|
||||||
|
* flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473)
|
||||||
|
* zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475)
|
||||||
|
|
||||||
|
* Jan 11, 2022 (v1.14.1)
|
||||||
|
* s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462)
|
||||||
|
* flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458)
|
||||||
|
* zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)
|
||||||
|
* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
|
||||||
|
* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
|
||||||
|
|
||||||
|
* Aug 30, 2021 (v1.13.5)
|
||||||
|
* gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
|
||||||
|
* s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
|
||||||
|
* zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426)
|
||||||
|
* Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421)
|
||||||
|
|
||||||
|
* Aug 12, 2021 (v1.13.4)
|
||||||
|
* Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy).
|
||||||
|
* zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415)
|
||||||
|
|
||||||
|
* Aug 3, 2021 (v1.13.3)
|
||||||
|
* zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404)
|
||||||
|
* zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411)
|
||||||
|
* gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406)
|
||||||
|
* s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399)
|
||||||
|
* zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401)
|
||||||
|
* zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410)
|
||||||
|
|
||||||
|
* Jun 14, 2021 (v1.13.1)
|
||||||
|
* s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396)
|
||||||
|
* zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394)
|
||||||
|
* gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389)
|
||||||
|
* s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395)
|
||||||
|
|
||||||
|
* Jun 3, 2021 (v1.13.0)
|
||||||
|
* Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors.
|
||||||
|
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
|
||||||
|
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
|
||||||
|
|
||||||
|
* May 25, 2021 (v1.12.3)
|
||||||
|
* deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374)
|
||||||
|
* deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375)
|
||||||
|
* zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373)
|
||||||
|
|
||||||
|
* Apr 27, 2021 (v1.12.2)
|
||||||
|
* zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365)
|
||||||
|
* zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363)
|
||||||
|
* deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367)
|
||||||
|
* s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358)
|
||||||
|
* s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362)
|
||||||
|
* s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368)
|
||||||
|
|
||||||
|
* Apr 14, 2021 (v1.12.1)
|
||||||
|
* snappy package removed. Upstream added as dependency.
|
||||||
|
* s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353)
|
||||||
|
* s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352)
|
||||||
|
* s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348)
|
||||||
|
* s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352)
|
||||||
|
* zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
|
||||||
|
* s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>See changes prior to v1.12.1</summary>
|
||||||
|
|
||||||
|
* Mar 26, 2021 (v1.11.13)
|
||||||
|
* zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
|
||||||
|
* zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336)
|
||||||
|
* deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338)
|
||||||
|
* s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341)
|
||||||
|
|
||||||
|
* Mar 5, 2021 (v1.11.12)
|
||||||
|
* s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives).
|
||||||
|
* s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328)
|
||||||
|
|
||||||
|
* Mar 1, 2021 (v1.11.9)
|
||||||
|
* s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324)
|
||||||
|
* s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325)
|
||||||
|
* s2: Fix binaries.
|
||||||
|
|
||||||
|
* Feb 25, 2021 (v1.11.8)
|
||||||
|
* s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended.
|
||||||
|
* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
|
||||||
|
* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
|
||||||
|
* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
|
||||||
|
* zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313)
|
||||||
|
|
||||||
|
* Jan 14, 2021 (v1.11.7)
|
||||||
|
* Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309)
|
||||||
|
* s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310)
|
||||||
|
* s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311)
|
||||||
|
* s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308)
|
||||||
|
* s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312)
|
||||||
|
|
||||||
|
* Jan 7, 2021 (v1.11.6)
|
||||||
|
* zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306)
|
||||||
|
* zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305)
|
||||||
|
|
||||||
|
* Dec 20, 2020 (v1.11.4)
|
||||||
|
* zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304)
|
||||||
|
* Add header decoder [#299](https://github.com/klauspost/compress/pull/299)
|
||||||
|
* s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297)
|
||||||
|
* Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300)
|
||||||
|
* zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303)
|
||||||
|
|
||||||
|
* Nov 15, 2020 (v1.11.3)
|
||||||
|
* inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293)
|
||||||
|
* zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295)
|
||||||
|
|
||||||
|
* Oct 11, 2020 (v1.11.2)
|
||||||
|
* s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291)
|
||||||
|
|
||||||
|
* Oct 1, 2020 (v1.11.1)
|
||||||
|
* zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286)
|
||||||
|
|
||||||
|
* Sept 8, 2020 (v1.11.0)
|
||||||
|
* zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281)
|
||||||
|
* zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282)
|
||||||
|
* inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274)
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>See changes prior to v1.11.0</summary>
|
||||||
|
|
||||||
|
* July 8, 2020 (v1.10.11)
|
||||||
|
* zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278)
|
||||||
|
* huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275)
|
||||||
|
|
||||||
|
* June 23, 2020 (v1.10.10)
|
||||||
|
* zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270)
|
||||||
|
|
||||||
|
* June 16, 2020 (v1.10.9):
|
||||||
|
* zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268)
|
||||||
|
* zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266)
|
||||||
|
* Fuzzit tests removed. The service has been purchased and is no longer available.
|
||||||
|
|
||||||
|
* June 5, 2020 (v1.10.8):
|
||||||
|
* 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265)
|
||||||
|
|
||||||
|
* June 1, 2020 (v1.10.7):
|
||||||
|
* Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries)
|
||||||
|
* Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259)
|
||||||
|
* Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263)
|
||||||
|
|
||||||
|
* May 21, 2020: (v1.10.6)
|
||||||
|
* zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252)
|
||||||
|
* zstd: Stricter decompression checks.
|
||||||
|
|
||||||
|
* April 12, 2020: (v1.10.5)
|
||||||
|
* s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239)
|
||||||
|
|
||||||
|
* Apr 8, 2020: (v1.10.4)
|
||||||
|
* zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247)
|
||||||
|
* Mar 11, 2020: (v1.10.3)
|
||||||
|
* s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245)
|
||||||
|
* s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244)
|
||||||
|
* zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240)
|
||||||
|
* zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241)
|
||||||
|
* zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238)
|
||||||
|
|
||||||
|
* Feb 27, 2020: (v1.10.2)
|
||||||
|
* Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232)
|
||||||
|
* Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227)
|
||||||
|
|
||||||
|
* Feb 18, 2020: (v1.10.1)
|
||||||
|
* Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226)
|
||||||
|
* deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224)
|
||||||
|
* Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224)
|
||||||
|
|
||||||
|
* Feb 4, 2020: (v1.10.0)
|
||||||
|
* Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216)
|
||||||
|
* Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218)
|
||||||
|
* Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214)
|
||||||
|
* Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186)
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>See changes prior to v1.10.0</summary>
|
||||||
|
|
||||||
|
* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206).
|
||||||
|
* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204)
|
||||||
|
* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed.
|
||||||
|
* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases.
|
||||||
|
* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192)
|
||||||
|
* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder.
|
||||||
|
* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199)
|
||||||
|
* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features
|
||||||
|
* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197)
|
||||||
|
* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198)
|
||||||
|
* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit.
|
||||||
|
* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191)
|
||||||
|
* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188)
|
||||||
|
* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187)
|
||||||
|
* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines.
|
||||||
|
* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate.
|
||||||
|
* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184)
|
||||||
|
* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate.
|
||||||
|
* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180)
|
||||||
|
* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB.
|
||||||
|
* Nov 11, 2019: Reduce inflate memory use by 1KB.
|
||||||
|
* Nov 10, 2019: Less allocations in deflate bit writer.
|
||||||
|
* Nov 10, 2019: Fix inconsistent error returned by zstd decoder.
|
||||||
|
* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174)
|
||||||
|
* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173)
|
||||||
|
* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172)
|
||||||
|
* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105)
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>See changes prior to v1.9.0</summary>
|
||||||
|
|
||||||
|
* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169)
|
||||||
|
* Oct 3, 2019: Fix inconsistent results on broken zstd streams.
|
||||||
|
* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools)
|
||||||
|
* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools).
|
||||||
|
* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip).
|
||||||
|
* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes).
|
||||||
|
* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option.
|
||||||
|
* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables.
|
||||||
|
* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode.
|
||||||
|
* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding.
|
||||||
|
* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy.
|
||||||
|
* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing.
|
||||||
|
* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing.
|
||||||
|
* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147)
|
||||||
|
* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146)
|
||||||
|
* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144)
|
||||||
|
* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142)
|
||||||
|
* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder.
|
||||||
|
* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder.
|
||||||
|
* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content.
|
||||||
|
* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix.
|
||||||
|
* June 17, 2019: zstd decompression bugfix.
|
||||||
|
* June 17, 2019: fix 32 bit builds.
|
||||||
|
* June 17, 2019: Easier use in modules (less dependencies).
|
||||||
|
* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio.
|
||||||
|
* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression.
|
||||||
|
* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels.
|
||||||
|
* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression!
|
||||||
|
* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels.
|
||||||
|
* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added.
|
||||||
|
* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression).
|
||||||
|
* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below.
|
||||||
|
* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0).
|
||||||
|
* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change.
|
||||||
|
* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change.
|
||||||
|
* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function.
|
||||||
|
* May 28, 2017: Reduce allocations when resetting decoder.
|
||||||
|
* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7.
|
||||||
|
* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625).
|
||||||
|
* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before.
|
||||||
|
* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update.
|
||||||
|
* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level.
|
||||||
|
* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression.
|
||||||
|
* Mar 24, 2016: Small speedup for level 1-3.
|
||||||
|
* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
|
||||||
|
* Feb 19, 2016: Handle small payloads faster in level 1-3.
|
||||||
|
* Feb 19, 2016: Added faster level 2 + 3 compression modes.
|
||||||
|
* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
|
||||||
|
* Feb 14, 2016: Snappy: Merge upstream changes.
|
||||||
|
* Feb 14, 2016: Snappy: Fix aggressive skipping.
|
||||||
|
* Feb 14, 2016: Snappy: Update benchmark.
|
||||||
|
* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression.
|
||||||
|
* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%.
|
||||||
|
* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content.
|
||||||
|
* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup.
|
||||||
|
* Jan 16, 2016: Optimization on deflate level 1,2,3 compression.
|
||||||
|
* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives.
|
||||||
|
* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs.
|
||||||
|
* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms.
|
||||||
|
* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update!
|
||||||
|
* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet).
|
||||||
|
* Nov 20 2015: Small optimization to bit writer on 64 bit systems.
|
||||||
|
* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15).
|
||||||
|
* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate.
|
||||||
|
* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file
|
||||||
|
* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
# deflate usage
|
||||||
|
|
||||||
|
* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/).
|
||||||
|
* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/).
|
||||||
|
* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
|
||||||
|
* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/)
|
||||||
|
|
||||||
|
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
|
||||||
|
|
||||||
|
| old import | new import | Documentation
|
||||||
|
|--------------------|-----------------------------------------|--------------------|
|
||||||
|
| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc)
|
||||||
|
| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc)
|
||||||
|
| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc)
|
||||||
|
| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc)
|
||||||
|
|
||||||
|
* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
|
||||||
|
|
||||||
|
You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages.
|
||||||
|
|
||||||
|
The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/).
|
||||||
|
|
||||||
|
Currently there is only minor speedup on decompression (mostly CRC32 calculation).
|
||||||
|
|
||||||
|
Memory usage is typically 1MB for a Writer. stdlib is in the same range.
|
||||||
|
If you expect to have a lot of concurrently allocated Writers consider using
|
||||||
|
the stateless compress described below.
|
||||||
|
|
||||||
|
# Stateless compression
|
||||||
|
|
||||||
|
This package offers stateless compression as a special option for gzip/deflate.
|
||||||
|
It will do compression but without maintaining any state between Write calls.
|
||||||
|
|
||||||
|
This means there will be no memory kept between Write calls, but compression and speed will be suboptimal.
|
||||||
|
|
||||||
|
This is only relevant in cases where you expect to run many thousands of compressors concurrently,
|
||||||
|
but with very little activity. This is *not* intended for regular web servers serving individual requests.
|
||||||
|
|
||||||
|
Because of this, the size of actual Write calls will affect output size.
|
||||||
|
|
||||||
|
In gzip, specify level `-3` / `gzip.StatelessCompression` to enable.
|
||||||
|
|
||||||
|
For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter)
|
||||||
|
|
||||||
|
A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer:
|
||||||
|
|
||||||
|
```
|
||||||
|
// replace 'ioutil.Discard' with your output.
|
||||||
|
gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer gzw.Close()
|
||||||
|
|
||||||
|
w := bufio.NewWriterSize(gzw, 4096)
|
||||||
|
defer w.Flush()
|
||||||
|
|
||||||
|
// Write to 'w'
|
||||||
|
```
|
||||||
|
|
||||||
|
This will only use up to 4KB in memory when the writer is idle.
|
||||||
|
|
||||||
|
Compression is almost always worse than the fastest compression level
|
||||||
|
and each write will allocate (a little) memory.
|
||||||
|
|
||||||
|
# Performance Update 2018
|
||||||
|
|
||||||
|
It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
|
||||||
|
|
||||||
|
The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
|
||||||
|
|
||||||
|
The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
|
||||||
|
|
||||||
|
The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
|
||||||
|
|
||||||
|
|
||||||
|
## Overall differences.
|
||||||
|
|
||||||
|
There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
|
||||||
|
|
||||||
|
The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
|
||||||
|
|
||||||
|
This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
|
||||||
|
|
||||||
|
There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
|
||||||
|
|
||||||
|
## Web Content
|
||||||
|
|
||||||
|
This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
|
||||||
|
|
||||||
|
Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
|
||||||
|
|
||||||
|
Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
|
||||||
|
|
||||||
|
## Object files
|
||||||
|
|
||||||
|
This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
|
||||||
|
|
||||||
|
The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
|
||||||
|
|
||||||
|
The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
|
||||||
|
|
||||||
|
## Highly Compressible File
|
||||||
|
|
||||||
|
This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
|
||||||
|
|
||||||
|
It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
|
||||||
|
|
||||||
|
So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
|
||||||
|
|
||||||
|
## Medium-High Compressible
|
||||||
|
|
||||||
|
This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
|
||||||
|
|
||||||
|
We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
|
||||||
|
|
||||||
|
## Medium Compressible
|
||||||
|
|
||||||
|
I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
|
||||||
|
|
||||||
|
The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
|
||||||
|
|
||||||
|
|
||||||
|
## Un-compressible Content
|
||||||
|
|
||||||
|
This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
|
||||||
|
|
||||||
|
|
||||||
|
## Huffman only compression
|
||||||
|
|
||||||
|
This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
|
||||||
|
|
||||||
|
This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
|
||||||
|
|
||||||
|
Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
|
||||||
|
|
||||||
|
The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%).
|
||||||
|
|
||||||
|
The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
|
||||||
|
|
||||||
|
For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
|
||||||
|
|
||||||
|
This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
|
||||||
|
|
||||||
|
# Other packages
|
||||||
|
|
||||||
|
Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code):
|
||||||
|
|
||||||
|
* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression.
|
||||||
|
* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression.
|
||||||
|
* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
|
||||||
|
|
||||||
|
# license
|
||||||
|
|
||||||
|
This code is licensed under the same conditions as the original Go code. See LICENSE file.
|
|
@ -0,0 +1,85 @@
|
||||||
|
package compress
|
||||||
|
|
||||||
|
import "math"
|
||||||
|
|
||||||
|
// Estimate returns a normalized compressibility estimate of block b.
|
||||||
|
// Values close to zero are likely uncompressible.
|
||||||
|
// Values above 0.1 are likely to be compressible.
|
||||||
|
// Values above 0.5 are very compressible.
|
||||||
|
// Very small lengths will return 0.
|
||||||
|
func Estimate(b []byte) float64 {
|
||||||
|
if len(b) < 16 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Correctly predicted order 1
|
||||||
|
hits := 0
|
||||||
|
lastMatch := false
|
||||||
|
var o1 [256]byte
|
||||||
|
var hist [256]int
|
||||||
|
c1 := byte(0)
|
||||||
|
for _, c := range b {
|
||||||
|
if c == o1[c1] {
|
||||||
|
// We only count a hit if there was two correct predictions in a row.
|
||||||
|
if lastMatch {
|
||||||
|
hits++
|
||||||
|
}
|
||||||
|
lastMatch = true
|
||||||
|
} else {
|
||||||
|
lastMatch = false
|
||||||
|
}
|
||||||
|
o1[c1] = c
|
||||||
|
c1 = c
|
||||||
|
hist[c]++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use x^0.6 to give better spread
|
||||||
|
prediction := math.Pow(float64(hits)/float64(len(b)), 0.6)
|
||||||
|
|
||||||
|
// Calculate histogram distribution
|
||||||
|
variance := float64(0)
|
||||||
|
avg := float64(len(b)) / 256
|
||||||
|
|
||||||
|
for _, v := range hist {
|
||||||
|
Δ := float64(v) - avg
|
||||||
|
variance += Δ * Δ
|
||||||
|
}
|
||||||
|
|
||||||
|
stddev := math.Sqrt(float64(variance)) / float64(len(b))
|
||||||
|
exp := math.Sqrt(1 / float64(len(b)))
|
||||||
|
|
||||||
|
// Subtract expected stddev
|
||||||
|
stddev -= exp
|
||||||
|
if stddev < 0 {
|
||||||
|
stddev = 0
|
||||||
|
}
|
||||||
|
stddev *= 1 + exp
|
||||||
|
|
||||||
|
// Use x^0.4 to give better spread
|
||||||
|
entropy := math.Pow(stddev, 0.4)
|
||||||
|
|
||||||
|
// 50/50 weight between prediction and histogram distribution
|
||||||
|
return math.Pow((prediction+entropy)/2, 0.9)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShannonEntropyBits returns the number of bits minimum required to represent
|
||||||
|
// an entropy encoding of the input bytes.
|
||||||
|
// https://en.wiktionary.org/wiki/Shannon_entropy
|
||||||
|
func ShannonEntropyBits(b []byte) int {
|
||||||
|
if len(b) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var hist [256]int
|
||||||
|
for _, c := range b {
|
||||||
|
hist[c]++
|
||||||
|
}
|
||||||
|
shannon := float64(0)
|
||||||
|
invTotal := 1.0 / float64(len(b))
|
||||||
|
for _, v := range hist[:] {
|
||||||
|
if v > 0 {
|
||||||
|
n := float64(v)
|
||||||
|
shannon += math.Ceil(-math.Log2(n*invTotal) * n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return int(math.Ceil(shannon))
|
||||||
|
}
|
|
@ -0,0 +1,79 @@
|
||||||
|
# Finite State Entropy
|
||||||
|
|
||||||
|
This package provides Finite State Entropy encoding and decoding.
|
||||||
|
|
||||||
|
Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS))
|
||||||
|
encoding provides a fast near-optimal symbol encoding/decoding
|
||||||
|
for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd).
|
||||||
|
|
||||||
|
This can be used for compressing input with a lot of similar input values to the smallest number of bytes.
|
||||||
|
This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders,
|
||||||
|
but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding.
|
||||||
|
|
||||||
|
* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse)
|
||||||
|
|
||||||
|
## News
|
||||||
|
|
||||||
|
* Feb 2018: First implementation released. Consider this beta software for now.
|
||||||
|
|
||||||
|
# Usage
|
||||||
|
|
||||||
|
This package provides a low level interface that allows to compress single independent blocks.
|
||||||
|
|
||||||
|
Each block is separate, and there is no built in integrity checks.
|
||||||
|
This means that the caller should keep track of block sizes and also do checksums if needed.
|
||||||
|
|
||||||
|
Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function.
|
||||||
|
You must provide input and will receive the output and maybe an error.
|
||||||
|
|
||||||
|
These error values can be returned:
|
||||||
|
|
||||||
|
| Error | Description |
|
||||||
|
|---------------------|-----------------------------------------------------------------------------|
|
||||||
|
| `<nil>` | Everything ok, output is returned |
|
||||||
|
| `ErrIncompressible` | Returned when input is judged to be too hard to compress |
|
||||||
|
| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated |
|
||||||
|
| `(error)` | An internal error occurred. |
|
||||||
|
|
||||||
|
As can be seen above there are errors that will be returned even under normal operation so it is important to handle these.
|
||||||
|
|
||||||
|
To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object
|
||||||
|
that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same
|
||||||
|
object can be used for both.
|
||||||
|
|
||||||
|
Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this
|
||||||
|
you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output.
|
||||||
|
|
||||||
|
Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function.
|
||||||
|
You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back
|
||||||
|
your input was likely corrupted.
|
||||||
|
|
||||||
|
It is important to note that a successful decoding does *not* mean your output matches your original input.
|
||||||
|
There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid.
|
||||||
|
|
||||||
|
For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples).
|
||||||
|
|
||||||
|
# Performance
|
||||||
|
|
||||||
|
A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors.
|
||||||
|
All compression functions are currently only running on the calling goroutine so only one core will be used per block.
|
||||||
|
|
||||||
|
The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input
|
||||||
|
is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be
|
||||||
|
beneficial to transpose all your input values down by 64.
|
||||||
|
|
||||||
|
With moderate block sizes around 64k speed are typically 200MB/s per core for compression and
|
||||||
|
around 300MB/s decompression speed.
|
||||||
|
|
||||||
|
The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s.
|
||||||
|
|
||||||
|
# Plans
|
||||||
|
|
||||||
|
At one point, more internals will be exposed to facilitate more "expert" usage of the components.
|
||||||
|
|
||||||
|
A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261).
|
||||||
|
|
||||||
|
# Contributing
|
||||||
|
|
||||||
|
Contributions are always welcome. Be aware that adding public functions will require good justification and breaking
|
||||||
|
changes will likely not be accepted. If in doubt open an issue before writing the PR.
|
|
@ -0,0 +1,122 @@
|
||||||
|
// Copyright 2018 Klaus Post. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
|
||||||
|
|
||||||
|
package fse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// bitReader reads a bitstream in reverse.
|
||||||
|
// The last set bit indicates the start of the stream and is used
|
||||||
|
// for aligning the input.
|
||||||
|
type bitReader struct {
|
||||||
|
in []byte
|
||||||
|
off uint // next byte to read is at in[off - 1]
|
||||||
|
value uint64
|
||||||
|
bitsRead uint8
|
||||||
|
}
|
||||||
|
|
||||||
|
// init initializes and resets the bit reader.
|
||||||
|
func (b *bitReader) init(in []byte) error {
|
||||||
|
if len(in) < 1 {
|
||||||
|
return errors.New("corrupt stream: too short")
|
||||||
|
}
|
||||||
|
b.in = in
|
||||||
|
b.off = uint(len(in))
|
||||||
|
// The highest bit of the last byte indicates where to start
|
||||||
|
v := in[len(in)-1]
|
||||||
|
if v == 0 {
|
||||||
|
return errors.New("corrupt stream, did not find end of stream")
|
||||||
|
}
|
||||||
|
b.bitsRead = 64
|
||||||
|
b.value = 0
|
||||||
|
if len(in) >= 8 {
|
||||||
|
b.fillFastStart()
|
||||||
|
} else {
|
||||||
|
b.fill()
|
||||||
|
b.fill()
|
||||||
|
}
|
||||||
|
b.bitsRead += 8 - uint8(highBits(uint32(v)))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBits will return n bits. n can be 0.
|
||||||
|
func (b *bitReader) getBits(n uint8) uint16 {
|
||||||
|
if n == 0 || b.bitsRead >= 64 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return b.getBitsFast(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBitsFast requires that at least one bit is requested every time.
|
||||||
|
// There are no checks if the buffer is filled.
|
||||||
|
func (b *bitReader) getBitsFast(n uint8) uint16 {
|
||||||
|
const regMask = 64 - 1
|
||||||
|
v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
|
||||||
|
b.bitsRead += n
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// fillFast() will make sure at least 32 bits are available.
|
||||||
|
// There must be at least 4 bytes available.
|
||||||
|
func (b *bitReader) fillFast() {
|
||||||
|
if b.bitsRead < 32 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// 2 bounds checks.
|
||||||
|
v := b.in[b.off-4:]
|
||||||
|
v = v[:4]
|
||||||
|
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
|
b.value = (b.value << 32) | uint64(low)
|
||||||
|
b.bitsRead -= 32
|
||||||
|
b.off -= 4
|
||||||
|
}
|
||||||
|
|
||||||
|
// fill() will make sure at least 32 bits are available.
|
||||||
|
func (b *bitReader) fill() {
|
||||||
|
if b.bitsRead < 32 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if b.off > 4 {
|
||||||
|
v := b.in[b.off-4:]
|
||||||
|
v = v[:4]
|
||||||
|
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
|
b.value = (b.value << 32) | uint64(low)
|
||||||
|
b.bitsRead -= 32
|
||||||
|
b.off -= 4
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for b.off > 0 {
|
||||||
|
b.value = (b.value << 8) | uint64(b.in[b.off-1])
|
||||||
|
b.bitsRead -= 8
|
||||||
|
b.off--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
|
||||||
|
func (b *bitReader) fillFastStart() {
|
||||||
|
// Do single re-slice to avoid bounds checks.
|
||||||
|
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
|
||||||
|
b.bitsRead = 0
|
||||||
|
b.off -= 8
|
||||||
|
}
|
||||||
|
|
||||||
|
// finished returns true if all bits have been read from the bit stream.
|
||||||
|
func (b *bitReader) finished() bool {
|
||||||
|
return b.bitsRead >= 64 && b.off == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
||||||
|
func (b *bitReader) close() error {
|
||||||
|
// Release reference.
|
||||||
|
b.in = nil
|
||||||
|
if b.bitsRead > 64 {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,168 @@
|
||||||
|
// Copyright 2018 Klaus Post. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
|
||||||
|
|
||||||
|
package fse
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// bitWriter will write bits.
|
||||||
|
// First bit will be LSB of the first byte of output.
|
||||||
|
type bitWriter struct {
|
||||||
|
bitContainer uint64
|
||||||
|
nBits uint8
|
||||||
|
out []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// bitMask16 is bitmasks. Has extra to avoid bounds check.
|
||||||
|
var bitMask16 = [32]uint16{
|
||||||
|
0, 1, 3, 7, 0xF, 0x1F,
|
||||||
|
0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
|
||||||
|
0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
|
||||||
|
0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
|
||||||
|
0xFFFF, 0xFFFF} /* up to 16 bits */
|
||||||
|
|
||||||
|
// addBits16NC will add up to 16 bits.
|
||||||
|
// It will not check if there is space for them,
|
||||||
|
// so the caller must ensure that it has flushed recently.
|
||||||
|
func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
|
||||||
|
b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
|
||||||
|
b.nBits += bits
|
||||||
|
}
|
||||||
|
|
||||||
|
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
|
||||||
|
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
|
||||||
|
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
|
||||||
|
b.bitContainer |= uint64(value) << (b.nBits & 63)
|
||||||
|
b.nBits += bits
|
||||||
|
}
|
||||||
|
|
||||||
|
// addBits16ZeroNC will add up to 16 bits.
|
||||||
|
// It will not check if there is space for them,
|
||||||
|
// so the caller must ensure that it has flushed recently.
|
||||||
|
// This is fastest if bits can be zero.
|
||||||
|
func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
|
||||||
|
if bits == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value <<= (16 - bits) & 15
|
||||||
|
value >>= (16 - bits) & 15
|
||||||
|
b.bitContainer |= uint64(value) << (b.nBits & 63)
|
||||||
|
b.nBits += bits
|
||||||
|
}
|
||||||
|
|
||||||
|
// flush will flush all pending full bytes.
|
||||||
|
// There will be at least 56 bits available for writing when this has been called.
|
||||||
|
// Using flush32 is faster, but leaves less space for writing.
|
||||||
|
func (b *bitWriter) flush() {
|
||||||
|
v := b.nBits >> 3
|
||||||
|
switch v {
|
||||||
|
case 0:
|
||||||
|
case 1:
|
||||||
|
b.out = append(b.out,
|
||||||
|
byte(b.bitContainer),
|
||||||
|
)
|
||||||
|
case 2:
|
||||||
|
b.out = append(b.out,
|
||||||
|
byte(b.bitContainer),
|
||||||
|
byte(b.bitContainer>>8),
|
||||||
|
)
|
||||||
|
case 3:
|
||||||
|
b.out = append(b.out,
|
||||||
|
byte(b.bitContainer),
|
||||||
|
byte(b.bitContainer>>8),
|
||||||
|
byte(b.bitContainer>>16),
|
||||||
|
)
|
||||||
|
case 4:
|
||||||
|
b.out = append(b.out,
|
||||||
|
byte(b.bitContainer),
|
||||||
|
byte(b.bitContainer>>8),
|
||||||
|
byte(b.bitContainer>>16),
|
||||||
|
byte(b.bitContainer>>24),
|
||||||
|
)
|
||||||
|
case 5:
|
||||||
|
b.out = append(b.out,
|
||||||
|
byte(b.bitContainer),
|
||||||
|
byte(b.bitContainer>>8),
|
||||||
|
byte(b.bitContainer>>16),
|
||||||
|
byte(b.bitContainer>>24),
|
||||||
|
byte(b.bitContainer>>32),
|
||||||
|
)
|
||||||
|
case 6:
|
||||||
|
b.out = append(b.out,
|
||||||
|
byte(b.bitContainer),
|
||||||
|
byte(b.bitContainer>>8),
|
||||||
|
byte(b.bitContainer>>16),
|
||||||
|
byte(b.bitContainer>>24),
|
||||||
|
byte(b.bitContainer>>32),
|
||||||
|
byte(b.bitContainer>>40),
|
||||||
|
)
|
||||||
|
case 7:
|
||||||
|
b.out = append(b.out,
|
||||||
|
byte(b.bitContainer),
|
||||||
|
byte(b.bitContainer>>8),
|
||||||
|
byte(b.bitContainer>>16),
|
||||||
|
byte(b.bitContainer>>24),
|
||||||
|
byte(b.bitContainer>>32),
|
||||||
|
byte(b.bitContainer>>40),
|
||||||
|
byte(b.bitContainer>>48),
|
||||||
|
)
|
||||||
|
case 8:
|
||||||
|
b.out = append(b.out,
|
||||||
|
byte(b.bitContainer),
|
||||||
|
byte(b.bitContainer>>8),
|
||||||
|
byte(b.bitContainer>>16),
|
||||||
|
byte(b.bitContainer>>24),
|
||||||
|
byte(b.bitContainer>>32),
|
||||||
|
byte(b.bitContainer>>40),
|
||||||
|
byte(b.bitContainer>>48),
|
||||||
|
byte(b.bitContainer>>56),
|
||||||
|
)
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("bits (%d) > 64", b.nBits))
|
||||||
|
}
|
||||||
|
b.bitContainer >>= v << 3
|
||||||
|
b.nBits &= 7
|
||||||
|
}
|
||||||
|
|
||||||
|
// flush32 will flush out, so there are at least 32 bits available for writing.
|
||||||
|
func (b *bitWriter) flush32() {
|
||||||
|
if b.nBits < 32 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.out = append(b.out,
|
||||||
|
byte(b.bitContainer),
|
||||||
|
byte(b.bitContainer>>8),
|
||||||
|
byte(b.bitContainer>>16),
|
||||||
|
byte(b.bitContainer>>24))
|
||||||
|
b.nBits -= 32
|
||||||
|
b.bitContainer >>= 32
|
||||||
|
}
|
||||||
|
|
||||||
|
// flushAlign will flush remaining full bytes and align to next byte boundary.
|
||||||
|
func (b *bitWriter) flushAlign() {
|
||||||
|
nbBytes := (b.nBits + 7) >> 3
|
||||||
|
for i := uint8(0); i < nbBytes; i++ {
|
||||||
|
b.out = append(b.out, byte(b.bitContainer>>(i*8)))
|
||||||
|
}
|
||||||
|
b.nBits = 0
|
||||||
|
b.bitContainer = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// close will write the alignment bit and write the final byte(s)
|
||||||
|
// to the output.
|
||||||
|
func (b *bitWriter) close() error {
|
||||||
|
// End mark
|
||||||
|
b.addBits16Clean(1, 1)
|
||||||
|
// flush until next byte.
|
||||||
|
b.flushAlign()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset and continue writing by appending to out.
|
||||||
|
func (b *bitWriter) reset(out []byte) {
|
||||||
|
b.bitContainer = 0
|
||||||
|
b.nBits = 0
|
||||||
|
b.out = out
|
||||||
|
}
|
|
@ -0,0 +1,47 @@
|
||||||
|
// Copyright 2018 Klaus Post. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
|
||||||
|
|
||||||
|
package fse
|
||||||
|
|
||||||
|
// byteReader provides a byte reader that reads
|
||||||
|
// little endian values from a byte stream.
|
||||||
|
// The input stream is manually advanced.
|
||||||
|
// The reader performs no bounds checks.
|
||||||
|
type byteReader struct {
|
||||||
|
b []byte
|
||||||
|
off int
|
||||||
|
}
|
||||||
|
|
||||||
|
// init will initialize the reader and set the input.
|
||||||
|
func (b *byteReader) init(in []byte) {
|
||||||
|
b.b = in
|
||||||
|
b.off = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// advance the stream b n bytes.
|
||||||
|
func (b *byteReader) advance(n uint) {
|
||||||
|
b.off += int(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32 returns a little endian uint32 starting at current offset.
|
||||||
|
func (b byteReader) Uint32() uint32 {
|
||||||
|
b2 := b.b[b.off:]
|
||||||
|
b2 = b2[:4]
|
||||||
|
v3 := uint32(b2[3])
|
||||||
|
v2 := uint32(b2[2])
|
||||||
|
v1 := uint32(b2[1])
|
||||||
|
v0 := uint32(b2[0])
|
||||||
|
return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
|
||||||
|
}
|
||||||
|
|
||||||
|
// unread returns the unread portion of the input.
|
||||||
|
func (b byteReader) unread() []byte {
|
||||||
|
return b.b[b.off:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// remain will return the number of bytes remaining.
|
||||||
|
func (b byteReader) remain() int {
|
||||||
|
return len(b.b) - b.off
|
||||||
|
}
|
|
@ -0,0 +1,683 @@
|
||||||
|
// Copyright 2018 Klaus Post. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
|
||||||
|
|
||||||
|
package fse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Compress the input bytes. Input must be < 2GB.
|
||||||
|
// Provide a Scratch buffer to avoid memory allocations.
|
||||||
|
// Note that the output is also kept in the scratch buffer.
|
||||||
|
// If input is too hard to compress, ErrIncompressible is returned.
|
||||||
|
// If input is a single byte value repeated ErrUseRLE is returned.
|
||||||
|
func Compress(in []byte, s *Scratch) ([]byte, error) {
|
||||||
|
if len(in) <= 1 {
|
||||||
|
return nil, ErrIncompressible
|
||||||
|
}
|
||||||
|
if len(in) > (2<<30)-1 {
|
||||||
|
return nil, errors.New("input too big, must be < 2GB")
|
||||||
|
}
|
||||||
|
s, err := s.prepare(in)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create histogram, if none was provided.
|
||||||
|
maxCount := s.maxCount
|
||||||
|
if maxCount == 0 {
|
||||||
|
maxCount = s.countSimple(in)
|
||||||
|
}
|
||||||
|
// Reset for next run.
|
||||||
|
s.clearCount = true
|
||||||
|
s.maxCount = 0
|
||||||
|
if maxCount == len(in) {
|
||||||
|
// One symbol, use RLE
|
||||||
|
return nil, ErrUseRLE
|
||||||
|
}
|
||||||
|
if maxCount == 1 || maxCount < (len(in)>>7) {
|
||||||
|
// Each symbol present maximum once or too well distributed.
|
||||||
|
return nil, ErrIncompressible
|
||||||
|
}
|
||||||
|
s.optimalTableLog()
|
||||||
|
err = s.normalizeCount()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = s.writeCount()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if false {
|
||||||
|
err = s.validateNorm()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = s.buildCTable()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = s.compress(in)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
s.Out = s.bw.out
|
||||||
|
// Check if we compressed.
|
||||||
|
if len(s.Out) >= len(in) {
|
||||||
|
return nil, ErrIncompressible
|
||||||
|
}
|
||||||
|
return s.Out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// cState contains the compression state of a stream.
|
||||||
|
type cState struct {
|
||||||
|
bw *bitWriter
|
||||||
|
stateTable []uint16
|
||||||
|
state uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
// init will initialize the compression state to the first symbol of the stream.
|
||||||
|
func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) {
|
||||||
|
c.bw = bw
|
||||||
|
c.stateTable = ct.stateTable
|
||||||
|
|
||||||
|
nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16
|
||||||
|
im := int32((nbBitsOut << 16) - first.deltaNbBits)
|
||||||
|
lu := (im >> nbBitsOut) + first.deltaFindState
|
||||||
|
c.state = c.stateTable[lu]
|
||||||
|
}
|
||||||
|
|
||||||
|
// encode the output symbol provided and write it to the bitstream.
|
||||||
|
func (c *cState) encode(symbolTT symbolTransform) {
|
||||||
|
nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
|
||||||
|
dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState
|
||||||
|
c.bw.addBits16NC(c.state, uint8(nbBitsOut))
|
||||||
|
c.state = c.stateTable[dstState]
|
||||||
|
}
|
||||||
|
|
||||||
|
// encode the output symbol provided and write it to the bitstream.
|
||||||
|
func (c *cState) encodeZero(symbolTT symbolTransform) {
|
||||||
|
nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
|
||||||
|
dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState
|
||||||
|
c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut))
|
||||||
|
c.state = c.stateTable[dstState]
|
||||||
|
}
|
||||||
|
|
||||||
|
// flush will write the tablelog to the output and flush the remaining full bytes.
|
||||||
|
func (c *cState) flush(tableLog uint8) {
|
||||||
|
c.bw.flush32()
|
||||||
|
c.bw.addBits16NC(c.state, tableLog)
|
||||||
|
c.bw.flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
// compress is the main compression loop that will encode the input from the last byte to the first.
|
||||||
|
func (s *Scratch) compress(src []byte) error {
|
||||||
|
if len(src) <= 2 {
|
||||||
|
return errors.New("compress: src too small")
|
||||||
|
}
|
||||||
|
tt := s.ct.symbolTT[:256]
|
||||||
|
s.bw.reset(s.Out)
|
||||||
|
|
||||||
|
// Our two states each encodes every second byte.
|
||||||
|
// Last byte encoded (first byte decoded) will always be encoded by c1.
|
||||||
|
var c1, c2 cState
|
||||||
|
|
||||||
|
// Encode so remaining size is divisible by 4.
|
||||||
|
ip := len(src)
|
||||||
|
if ip&1 == 1 {
|
||||||
|
c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]])
|
||||||
|
c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]])
|
||||||
|
c1.encodeZero(tt[src[ip-3]])
|
||||||
|
ip -= 3
|
||||||
|
} else {
|
||||||
|
c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]])
|
||||||
|
c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]])
|
||||||
|
ip -= 2
|
||||||
|
}
|
||||||
|
if ip&2 != 0 {
|
||||||
|
c2.encodeZero(tt[src[ip-1]])
|
||||||
|
c1.encodeZero(tt[src[ip-2]])
|
||||||
|
ip -= 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// Main compression loop.
|
||||||
|
switch {
|
||||||
|
case !s.zeroBits && s.actualTableLog <= 8:
|
||||||
|
// We can encode 4 symbols without requiring a flush.
|
||||||
|
// We do not need to check if any output is 0 bits.
|
||||||
|
for ip >= 4 {
|
||||||
|
s.bw.flush32()
|
||||||
|
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
|
||||||
|
c2.encode(tt[v0])
|
||||||
|
c1.encode(tt[v1])
|
||||||
|
c2.encode(tt[v2])
|
||||||
|
c1.encode(tt[v3])
|
||||||
|
ip -= 4
|
||||||
|
}
|
||||||
|
case !s.zeroBits:
|
||||||
|
// We do not need to check if any output is 0 bits.
|
||||||
|
for ip >= 4 {
|
||||||
|
s.bw.flush32()
|
||||||
|
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
|
||||||
|
c2.encode(tt[v0])
|
||||||
|
c1.encode(tt[v1])
|
||||||
|
s.bw.flush32()
|
||||||
|
c2.encode(tt[v2])
|
||||||
|
c1.encode(tt[v3])
|
||||||
|
ip -= 4
|
||||||
|
}
|
||||||
|
case s.actualTableLog <= 8:
|
||||||
|
// We can encode 4 symbols without requiring a flush
|
||||||
|
for ip >= 4 {
|
||||||
|
s.bw.flush32()
|
||||||
|
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
|
||||||
|
c2.encodeZero(tt[v0])
|
||||||
|
c1.encodeZero(tt[v1])
|
||||||
|
c2.encodeZero(tt[v2])
|
||||||
|
c1.encodeZero(tt[v3])
|
||||||
|
ip -= 4
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
for ip >= 4 {
|
||||||
|
s.bw.flush32()
|
||||||
|
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
|
||||||
|
c2.encodeZero(tt[v0])
|
||||||
|
c1.encodeZero(tt[v1])
|
||||||
|
s.bw.flush32()
|
||||||
|
c2.encodeZero(tt[v2])
|
||||||
|
c1.encodeZero(tt[v3])
|
||||||
|
ip -= 4
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush final state.
|
||||||
|
// Used to initialize state when decoding.
|
||||||
|
c2.flush(s.actualTableLog)
|
||||||
|
c1.flush(s.actualTableLog)
|
||||||
|
|
||||||
|
return s.bw.close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeCount will write the normalized histogram count to header.
|
||||||
|
// This is read back by readNCount.
|
||||||
|
func (s *Scratch) writeCount() error {
|
||||||
|
var (
|
||||||
|
tableLog = s.actualTableLog
|
||||||
|
tableSize = 1 << tableLog
|
||||||
|
previous0 bool
|
||||||
|
charnum uint16
|
||||||
|
|
||||||
|
maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3
|
||||||
|
|
||||||
|
// Write Table Size
|
||||||
|
bitStream = uint32(tableLog - minTablelog)
|
||||||
|
bitCount = uint(4)
|
||||||
|
remaining = int16(tableSize + 1) /* +1 for extra accuracy */
|
||||||
|
threshold = int16(tableSize)
|
||||||
|
nbBits = uint(tableLog + 1)
|
||||||
|
)
|
||||||
|
if cap(s.Out) < maxHeaderSize {
|
||||||
|
s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize)
|
||||||
|
}
|
||||||
|
outP := uint(0)
|
||||||
|
out := s.Out[:maxHeaderSize]
|
||||||
|
|
||||||
|
// stops at 1
|
||||||
|
for remaining > 1 {
|
||||||
|
if previous0 {
|
||||||
|
start := charnum
|
||||||
|
for s.norm[charnum] == 0 {
|
||||||
|
charnum++
|
||||||
|
}
|
||||||
|
for charnum >= start+24 {
|
||||||
|
start += 24
|
||||||
|
bitStream += uint32(0xFFFF) << bitCount
|
||||||
|
out[outP] = byte(bitStream)
|
||||||
|
out[outP+1] = byte(bitStream >> 8)
|
||||||
|
outP += 2
|
||||||
|
bitStream >>= 16
|
||||||
|
}
|
||||||
|
for charnum >= start+3 {
|
||||||
|
start += 3
|
||||||
|
bitStream += 3 << bitCount
|
||||||
|
bitCount += 2
|
||||||
|
}
|
||||||
|
bitStream += uint32(charnum-start) << bitCount
|
||||||
|
bitCount += 2
|
||||||
|
if bitCount > 16 {
|
||||||
|
out[outP] = byte(bitStream)
|
||||||
|
out[outP+1] = byte(bitStream >> 8)
|
||||||
|
outP += 2
|
||||||
|
bitStream >>= 16
|
||||||
|
bitCount -= 16
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
count := s.norm[charnum]
|
||||||
|
charnum++
|
||||||
|
max := (2*threshold - 1) - remaining
|
||||||
|
if count < 0 {
|
||||||
|
remaining += count
|
||||||
|
} else {
|
||||||
|
remaining -= count
|
||||||
|
}
|
||||||
|
count++ // +1 for extra accuracy
|
||||||
|
if count >= threshold {
|
||||||
|
count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[
|
||||||
|
}
|
||||||
|
bitStream += uint32(count) << bitCount
|
||||||
|
bitCount += nbBits
|
||||||
|
if count < max {
|
||||||
|
bitCount--
|
||||||
|
}
|
||||||
|
|
||||||
|
previous0 = count == 1
|
||||||
|
if remaining < 1 {
|
||||||
|
return errors.New("internal error: remaining<1")
|
||||||
|
}
|
||||||
|
for remaining < threshold {
|
||||||
|
nbBits--
|
||||||
|
threshold >>= 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if bitCount > 16 {
|
||||||
|
out[outP] = byte(bitStream)
|
||||||
|
out[outP+1] = byte(bitStream >> 8)
|
||||||
|
outP += 2
|
||||||
|
bitStream >>= 16
|
||||||
|
bitCount -= 16
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out[outP] = byte(bitStream)
|
||||||
|
out[outP+1] = byte(bitStream >> 8)
|
||||||
|
outP += (bitCount + 7) / 8
|
||||||
|
|
||||||
|
if charnum > s.symbolLen {
|
||||||
|
return errors.New("internal error: charnum > s.symbolLen")
|
||||||
|
}
|
||||||
|
s.Out = out[:outP]
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// symbolTransform contains the state transform for a symbol.
|
||||||
|
type symbolTransform struct {
|
||||||
|
deltaFindState int32
|
||||||
|
deltaNbBits uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// String prints values as a human readable string.
|
||||||
|
func (s symbolTransform) String() string {
|
||||||
|
return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState)
|
||||||
|
}
|
||||||
|
|
||||||
|
// cTable contains tables used for compression.
|
||||||
|
type cTable struct {
|
||||||
|
tableSymbol []byte
|
||||||
|
stateTable []uint16
|
||||||
|
symbolTT []symbolTransform
|
||||||
|
}
|
||||||
|
|
||||||
|
// allocCtable will allocate tables needed for compression.
|
||||||
|
// If existing tables a re big enough, they are simply re-used.
|
||||||
|
func (s *Scratch) allocCtable() {
|
||||||
|
tableSize := 1 << s.actualTableLog
|
||||||
|
// get tableSymbol that is big enough.
|
||||||
|
if cap(s.ct.tableSymbol) < tableSize {
|
||||||
|
s.ct.tableSymbol = make([]byte, tableSize)
|
||||||
|
}
|
||||||
|
s.ct.tableSymbol = s.ct.tableSymbol[:tableSize]
|
||||||
|
|
||||||
|
ctSize := tableSize
|
||||||
|
if cap(s.ct.stateTable) < ctSize {
|
||||||
|
s.ct.stateTable = make([]uint16, ctSize)
|
||||||
|
}
|
||||||
|
s.ct.stateTable = s.ct.stateTable[:ctSize]
|
||||||
|
|
||||||
|
if cap(s.ct.symbolTT) < 256 {
|
||||||
|
s.ct.symbolTT = make([]symbolTransform, 256)
|
||||||
|
}
|
||||||
|
s.ct.symbolTT = s.ct.symbolTT[:256]
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildCTable will populate the compression table so it is ready to be used.
|
||||||
|
func (s *Scratch) buildCTable() error {
|
||||||
|
tableSize := uint32(1 << s.actualTableLog)
|
||||||
|
highThreshold := tableSize - 1
|
||||||
|
var cumul [maxSymbolValue + 2]int16
|
||||||
|
|
||||||
|
s.allocCtable()
|
||||||
|
tableSymbol := s.ct.tableSymbol[:tableSize]
|
||||||
|
// symbol start positions
|
||||||
|
{
|
||||||
|
cumul[0] = 0
|
||||||
|
for ui, v := range s.norm[:s.symbolLen-1] {
|
||||||
|
u := byte(ui) // one less than reference
|
||||||
|
if v == -1 {
|
||||||
|
// Low proba symbol
|
||||||
|
cumul[u+1] = cumul[u] + 1
|
||||||
|
tableSymbol[highThreshold] = u
|
||||||
|
highThreshold--
|
||||||
|
} else {
|
||||||
|
cumul[u+1] = cumul[u] + v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Encode last symbol separately to avoid overflowing u
|
||||||
|
u := int(s.symbolLen - 1)
|
||||||
|
v := s.norm[s.symbolLen-1]
|
||||||
|
if v == -1 {
|
||||||
|
// Low proba symbol
|
||||||
|
cumul[u+1] = cumul[u] + 1
|
||||||
|
tableSymbol[highThreshold] = byte(u)
|
||||||
|
highThreshold--
|
||||||
|
} else {
|
||||||
|
cumul[u+1] = cumul[u] + v
|
||||||
|
}
|
||||||
|
if uint32(cumul[s.symbolLen]) != tableSize {
|
||||||
|
return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize)
|
||||||
|
}
|
||||||
|
cumul[s.symbolLen] = int16(tableSize) + 1
|
||||||
|
}
|
||||||
|
// Spread symbols
|
||||||
|
s.zeroBits = false
|
||||||
|
{
|
||||||
|
step := tableStep(tableSize)
|
||||||
|
tableMask := tableSize - 1
|
||||||
|
var position uint32
|
||||||
|
// if any symbol > largeLimit, we may have 0 bits output.
|
||||||
|
largeLimit := int16(1 << (s.actualTableLog - 1))
|
||||||
|
for ui, v := range s.norm[:s.symbolLen] {
|
||||||
|
symbol := byte(ui)
|
||||||
|
if v > largeLimit {
|
||||||
|
s.zeroBits = true
|
||||||
|
}
|
||||||
|
for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
|
||||||
|
tableSymbol[position] = symbol
|
||||||
|
position = (position + step) & tableMask
|
||||||
|
for position > highThreshold {
|
||||||
|
position = (position + step) & tableMask
|
||||||
|
} /* Low proba area */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we have gone through all positions
|
||||||
|
if position != 0 {
|
||||||
|
return errors.New("position!=0")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build table
|
||||||
|
table := s.ct.stateTable
|
||||||
|
{
|
||||||
|
tsi := int(tableSize)
|
||||||
|
for u, v := range tableSymbol {
|
||||||
|
// TableU16 : sorted by symbol order; gives next state value
|
||||||
|
table[cumul[v]] = uint16(tsi + u)
|
||||||
|
cumul[v]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build Symbol Transformation Table
|
||||||
|
{
|
||||||
|
total := int16(0)
|
||||||
|
symbolTT := s.ct.symbolTT[:s.symbolLen]
|
||||||
|
tableLog := s.actualTableLog
|
||||||
|
tl := (uint32(tableLog) << 16) - (1 << tableLog)
|
||||||
|
for i, v := range s.norm[:s.symbolLen] {
|
||||||
|
switch v {
|
||||||
|
case 0:
|
||||||
|
case -1, 1:
|
||||||
|
symbolTT[i].deltaNbBits = tl
|
||||||
|
symbolTT[i].deltaFindState = int32(total - 1)
|
||||||
|
total++
|
||||||
|
default:
|
||||||
|
maxBitsOut := uint32(tableLog) - highBits(uint32(v-1))
|
||||||
|
minStatePlus := uint32(v) << maxBitsOut
|
||||||
|
symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus
|
||||||
|
symbolTT[i].deltaFindState = int32(total - v)
|
||||||
|
total += v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if total != int16(tableSize) {
|
||||||
|
return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// countSimple will create a simple histogram in s.count.
|
||||||
|
// Returns the biggest count.
|
||||||
|
// Does not update s.clearCount.
|
||||||
|
func (s *Scratch) countSimple(in []byte) (max int) {
|
||||||
|
for _, v := range in {
|
||||||
|
s.count[v]++
|
||||||
|
}
|
||||||
|
m := uint32(0)
|
||||||
|
for i, v := range s.count[:] {
|
||||||
|
if v > m {
|
||||||
|
m = v
|
||||||
|
}
|
||||||
|
if v > 0 {
|
||||||
|
s.symbolLen = uint16(i) + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return int(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// minTableLog provides the minimum logSize to safely represent a distribution.
|
||||||
|
func (s *Scratch) minTableLog() uint8 {
|
||||||
|
minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1
|
||||||
|
minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2
|
||||||
|
if minBitsSrc < minBitsSymbols {
|
||||||
|
return uint8(minBitsSrc)
|
||||||
|
}
|
||||||
|
return uint8(minBitsSymbols)
|
||||||
|
}
|
||||||
|
|
||||||
|
// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog
|
||||||
|
func (s *Scratch) optimalTableLog() {
|
||||||
|
tableLog := s.TableLog
|
||||||
|
minBits := s.minTableLog()
|
||||||
|
maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2
|
||||||
|
if maxBitsSrc < tableLog {
|
||||||
|
// Accuracy can be reduced
|
||||||
|
tableLog = maxBitsSrc
|
||||||
|
}
|
||||||
|
if minBits > tableLog {
|
||||||
|
tableLog = minBits
|
||||||
|
}
|
||||||
|
// Need a minimum to safely represent all symbol values
|
||||||
|
if tableLog < minTablelog {
|
||||||
|
tableLog = minTablelog
|
||||||
|
}
|
||||||
|
if tableLog > maxTableLog {
|
||||||
|
tableLog = maxTableLog
|
||||||
|
}
|
||||||
|
s.actualTableLog = tableLog
|
||||||
|
}
|
||||||
|
|
||||||
|
var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000}
|
||||||
|
|
||||||
|
// normalizeCount will normalize the count of the symbols so
|
||||||
|
// the total is equal to the table size.
|
||||||
|
func (s *Scratch) normalizeCount() error {
|
||||||
|
var (
|
||||||
|
tableLog = s.actualTableLog
|
||||||
|
scale = 62 - uint64(tableLog)
|
||||||
|
step = (1 << 62) / uint64(s.br.remain())
|
||||||
|
vStep = uint64(1) << (scale - 20)
|
||||||
|
stillToDistribute = int16(1 << tableLog)
|
||||||
|
largest int
|
||||||
|
largestP int16
|
||||||
|
lowThreshold = (uint32)(s.br.remain() >> tableLog)
|
||||||
|
)
|
||||||
|
|
||||||
|
for i, cnt := range s.count[:s.symbolLen] {
|
||||||
|
// already handled
|
||||||
|
// if (count[s] == s.length) return 0; /* rle special case */
|
||||||
|
|
||||||
|
if cnt == 0 {
|
||||||
|
s.norm[i] = 0
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if cnt <= lowThreshold {
|
||||||
|
s.norm[i] = -1
|
||||||
|
stillToDistribute--
|
||||||
|
} else {
|
||||||
|
proba := (int16)((uint64(cnt) * step) >> scale)
|
||||||
|
if proba < 8 {
|
||||||
|
restToBeat := vStep * uint64(rtbTable[proba])
|
||||||
|
v := uint64(cnt)*step - (uint64(proba) << scale)
|
||||||
|
if v > restToBeat {
|
||||||
|
proba++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if proba > largestP {
|
||||||
|
largestP = proba
|
||||||
|
largest = i
|
||||||
|
}
|
||||||
|
s.norm[i] = proba
|
||||||
|
stillToDistribute -= proba
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if -stillToDistribute >= (s.norm[largest] >> 1) {
|
||||||
|
// corner case, need another normalization method
|
||||||
|
return s.normalizeCount2()
|
||||||
|
}
|
||||||
|
s.norm[largest] += stillToDistribute
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Secondary normalization method.
|
||||||
|
// To be used when primary method fails.
|
||||||
|
func (s *Scratch) normalizeCount2() error {
|
||||||
|
const notYetAssigned = -2
|
||||||
|
var (
|
||||||
|
distributed uint32
|
||||||
|
total = uint32(s.br.remain())
|
||||||
|
tableLog = s.actualTableLog
|
||||||
|
lowThreshold = total >> tableLog
|
||||||
|
lowOne = (total * 3) >> (tableLog + 1)
|
||||||
|
)
|
||||||
|
for i, cnt := range s.count[:s.symbolLen] {
|
||||||
|
if cnt == 0 {
|
||||||
|
s.norm[i] = 0
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if cnt <= lowThreshold {
|
||||||
|
s.norm[i] = -1
|
||||||
|
distributed++
|
||||||
|
total -= cnt
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if cnt <= lowOne {
|
||||||
|
s.norm[i] = 1
|
||||||
|
distributed++
|
||||||
|
total -= cnt
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
s.norm[i] = notYetAssigned
|
||||||
|
}
|
||||||
|
toDistribute := (1 << tableLog) - distributed
|
||||||
|
|
||||||
|
if (total / toDistribute) > lowOne {
|
||||||
|
// risk of rounding to zero
|
||||||
|
lowOne = (total * 3) / (toDistribute * 2)
|
||||||
|
for i, cnt := range s.count[:s.symbolLen] {
|
||||||
|
if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
|
||||||
|
s.norm[i] = 1
|
||||||
|
distributed++
|
||||||
|
total -= cnt
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
toDistribute = (1 << tableLog) - distributed
|
||||||
|
}
|
||||||
|
if distributed == uint32(s.symbolLen)+1 {
|
||||||
|
// all values are pretty poor;
|
||||||
|
// probably incompressible data (should have already been detected);
|
||||||
|
// find max, then give all remaining points to max
|
||||||
|
var maxV int
|
||||||
|
var maxC uint32
|
||||||
|
for i, cnt := range s.count[:s.symbolLen] {
|
||||||
|
if cnt > maxC {
|
||||||
|
maxV = i
|
||||||
|
maxC = cnt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.norm[maxV] += int16(toDistribute)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if total == 0 {
|
||||||
|
// all of the symbols were low enough for the lowOne or lowThreshold
|
||||||
|
for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) {
|
||||||
|
if s.norm[i] > 0 {
|
||||||
|
toDistribute--
|
||||||
|
s.norm[i]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
vStepLog = 62 - uint64(tableLog)
|
||||||
|
mid = uint64((1 << (vStepLog - 1)) - 1)
|
||||||
|
rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining
|
||||||
|
tmpTotal = mid
|
||||||
|
)
|
||||||
|
for i, cnt := range s.count[:s.symbolLen] {
|
||||||
|
if s.norm[i] == notYetAssigned {
|
||||||
|
var (
|
||||||
|
end = tmpTotal + uint64(cnt)*rStep
|
||||||
|
sStart = uint32(tmpTotal >> vStepLog)
|
||||||
|
sEnd = uint32(end >> vStepLog)
|
||||||
|
weight = sEnd - sStart
|
||||||
|
)
|
||||||
|
if weight < 1 {
|
||||||
|
return errors.New("weight < 1")
|
||||||
|
}
|
||||||
|
s.norm[i] = int16(weight)
|
||||||
|
tmpTotal = end
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateNorm validates the normalized histogram table.
|
||||||
|
func (s *Scratch) validateNorm() (err error) {
|
||||||
|
var total int
|
||||||
|
for _, v := range s.norm[:s.symbolLen] {
|
||||||
|
if v >= 0 {
|
||||||
|
total += int(v)
|
||||||
|
} else {
|
||||||
|
total -= int(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen)
|
||||||
|
for i, v := range s.norm[:s.symbolLen] {
|
||||||
|
fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if total != (1 << s.actualTableLog) {
|
||||||
|
return fmt.Errorf("warning: Total == %d != %d", total, 1<<s.actualTableLog)
|
||||||
|
}
|
||||||
|
for i, v := range s.count[s.symbolLen:] {
|
||||||
|
if v != 0 {
|
||||||
|
return fmt.Errorf("warning: Found symbol out of range, %d after cut", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,374 @@
|
||||||
|
package fse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
tablelogAbsoluteMax = 15
|
||||||
|
)
|
||||||
|
|
||||||
|
// Decompress a block of data.
|
||||||
|
// You can provide a scratch buffer to avoid allocations.
|
||||||
|
// If nil is provided a temporary one will be allocated.
|
||||||
|
// It is possible, but by no way guaranteed that corrupt data will
|
||||||
|
// return an error.
|
||||||
|
// It is up to the caller to verify integrity of the returned data.
|
||||||
|
// Use a predefined Scrach to set maximum acceptable output size.
|
||||||
|
func Decompress(b []byte, s *Scratch) ([]byte, error) {
|
||||||
|
s, err := s.prepare(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
s.Out = s.Out[:0]
|
||||||
|
err = s.readNCount()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = s.buildDtable()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = s.decompress()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.Out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readNCount will read the symbol distribution so decoding tables can be constructed.
|
||||||
|
func (s *Scratch) readNCount() error {
|
||||||
|
var (
|
||||||
|
charnum uint16
|
||||||
|
previous0 bool
|
||||||
|
b = &s.br
|
||||||
|
)
|
||||||
|
iend := b.remain()
|
||||||
|
if iend < 4 {
|
||||||
|
return errors.New("input too small")
|
||||||
|
}
|
||||||
|
bitStream := b.Uint32()
|
||||||
|
nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog
|
||||||
|
if nbBits > tablelogAbsoluteMax {
|
||||||
|
return errors.New("tableLog too large")
|
||||||
|
}
|
||||||
|
bitStream >>= 4
|
||||||
|
bitCount := uint(4)
|
||||||
|
|
||||||
|
s.actualTableLog = uint8(nbBits)
|
||||||
|
remaining := int32((1 << nbBits) + 1)
|
||||||
|
threshold := int32(1 << nbBits)
|
||||||
|
gotTotal := int32(0)
|
||||||
|
nbBits++
|
||||||
|
|
||||||
|
for remaining > 1 {
|
||||||
|
if previous0 {
|
||||||
|
n0 := charnum
|
||||||
|
for (bitStream & 0xFFFF) == 0xFFFF {
|
||||||
|
n0 += 24
|
||||||
|
if b.off < iend-5 {
|
||||||
|
b.advance(2)
|
||||||
|
bitStream = b.Uint32() >> bitCount
|
||||||
|
} else {
|
||||||
|
bitStream >>= 16
|
||||||
|
bitCount += 16
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (bitStream & 3) == 3 {
|
||||||
|
n0 += 3
|
||||||
|
bitStream >>= 2
|
||||||
|
bitCount += 2
|
||||||
|
}
|
||||||
|
n0 += uint16(bitStream & 3)
|
||||||
|
bitCount += 2
|
||||||
|
if n0 > maxSymbolValue {
|
||||||
|
return errors.New("maxSymbolValue too small")
|
||||||
|
}
|
||||||
|
for charnum < n0 {
|
||||||
|
s.norm[charnum&0xff] = 0
|
||||||
|
charnum++
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 {
|
||||||
|
b.advance(bitCount >> 3)
|
||||||
|
bitCount &= 7
|
||||||
|
bitStream = b.Uint32() >> bitCount
|
||||||
|
} else {
|
||||||
|
bitStream >>= 2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
max := (2*(threshold) - 1) - (remaining)
|
||||||
|
var count int32
|
||||||
|
|
||||||
|
if (int32(bitStream) & (threshold - 1)) < max {
|
||||||
|
count = int32(bitStream) & (threshold - 1)
|
||||||
|
bitCount += nbBits - 1
|
||||||
|
} else {
|
||||||
|
count = int32(bitStream) & (2*threshold - 1)
|
||||||
|
if count >= threshold {
|
||||||
|
count -= max
|
||||||
|
}
|
||||||
|
bitCount += nbBits
|
||||||
|
}
|
||||||
|
|
||||||
|
count-- // extra accuracy
|
||||||
|
if count < 0 {
|
||||||
|
// -1 means +1
|
||||||
|
remaining += count
|
||||||
|
gotTotal -= count
|
||||||
|
} else {
|
||||||
|
remaining -= count
|
||||||
|
gotTotal += count
|
||||||
|
}
|
||||||
|
s.norm[charnum&0xff] = int16(count)
|
||||||
|
charnum++
|
||||||
|
previous0 = count == 0
|
||||||
|
for remaining < threshold {
|
||||||
|
nbBits--
|
||||||
|
threshold >>= 1
|
||||||
|
}
|
||||||
|
if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 {
|
||||||
|
b.advance(bitCount >> 3)
|
||||||
|
bitCount &= 7
|
||||||
|
} else {
|
||||||
|
bitCount -= (uint)(8 * (len(b.b) - 4 - b.off))
|
||||||
|
b.off = len(b.b) - 4
|
||||||
|
}
|
||||||
|
bitStream = b.Uint32() >> (bitCount & 31)
|
||||||
|
}
|
||||||
|
s.symbolLen = charnum
|
||||||
|
|
||||||
|
if s.symbolLen <= 1 {
|
||||||
|
return fmt.Errorf("symbolLen (%d) too small", s.symbolLen)
|
||||||
|
}
|
||||||
|
if s.symbolLen > maxSymbolValue+1 {
|
||||||
|
return fmt.Errorf("symbolLen (%d) too big", s.symbolLen)
|
||||||
|
}
|
||||||
|
if remaining != 1 {
|
||||||
|
return fmt.Errorf("corruption detected (remaining %d != 1)", remaining)
|
||||||
|
}
|
||||||
|
if bitCount > 32 {
|
||||||
|
return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount)
|
||||||
|
}
|
||||||
|
if gotTotal != 1<<s.actualTableLog {
|
||||||
|
return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<<s.actualTableLog)
|
||||||
|
}
|
||||||
|
b.advance((bitCount + 7) >> 3)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decSymbol contains information about a state entry,
|
||||||
|
// Including the state offset base, the output symbol and
|
||||||
|
// the number of bits to read for the low part of the destination state.
|
||||||
|
type decSymbol struct {
|
||||||
|
newState uint16
|
||||||
|
symbol uint8
|
||||||
|
nbBits uint8
|
||||||
|
}
|
||||||
|
|
||||||
|
// allocDtable will allocate decoding tables if they are not big enough.
|
||||||
|
func (s *Scratch) allocDtable() {
|
||||||
|
tableSize := 1 << s.actualTableLog
|
||||||
|
if cap(s.decTable) < tableSize {
|
||||||
|
s.decTable = make([]decSymbol, tableSize)
|
||||||
|
}
|
||||||
|
s.decTable = s.decTable[:tableSize]
|
||||||
|
|
||||||
|
if cap(s.ct.tableSymbol) < 256 {
|
||||||
|
s.ct.tableSymbol = make([]byte, 256)
|
||||||
|
}
|
||||||
|
s.ct.tableSymbol = s.ct.tableSymbol[:256]
|
||||||
|
|
||||||
|
if cap(s.ct.stateTable) < 256 {
|
||||||
|
s.ct.stateTable = make([]uint16, 256)
|
||||||
|
}
|
||||||
|
s.ct.stateTable = s.ct.stateTable[:256]
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildDtable will build the decoding table.
|
||||||
|
func (s *Scratch) buildDtable() error {
|
||||||
|
tableSize := uint32(1 << s.actualTableLog)
|
||||||
|
highThreshold := tableSize - 1
|
||||||
|
s.allocDtable()
|
||||||
|
symbolNext := s.ct.stateTable[:256]
|
||||||
|
|
||||||
|
// Init, lay down lowprob symbols
|
||||||
|
s.zeroBits = false
|
||||||
|
{
|
||||||
|
largeLimit := int16(1 << (s.actualTableLog - 1))
|
||||||
|
for i, v := range s.norm[:s.symbolLen] {
|
||||||
|
if v == -1 {
|
||||||
|
s.decTable[highThreshold].symbol = uint8(i)
|
||||||
|
highThreshold--
|
||||||
|
symbolNext[i] = 1
|
||||||
|
} else {
|
||||||
|
if v >= largeLimit {
|
||||||
|
s.zeroBits = true
|
||||||
|
}
|
||||||
|
symbolNext[i] = uint16(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Spread symbols
|
||||||
|
{
|
||||||
|
tableMask := tableSize - 1
|
||||||
|
step := tableStep(tableSize)
|
||||||
|
position := uint32(0)
|
||||||
|
for ss, v := range s.norm[:s.symbolLen] {
|
||||||
|
for i := 0; i < int(v); i++ {
|
||||||
|
s.decTable[position].symbol = uint8(ss)
|
||||||
|
position = (position + step) & tableMask
|
||||||
|
for position > highThreshold {
|
||||||
|
// lowprob area
|
||||||
|
position = (position + step) & tableMask
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if position != 0 {
|
||||||
|
// position must reach all cells once, otherwise normalizedCounter is incorrect
|
||||||
|
return errors.New("corrupted input (position != 0)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build Decoding table
|
||||||
|
{
|
||||||
|
tableSize := uint16(1 << s.actualTableLog)
|
||||||
|
for u, v := range s.decTable {
|
||||||
|
symbol := v.symbol
|
||||||
|
nextState := symbolNext[symbol]
|
||||||
|
symbolNext[symbol] = nextState + 1
|
||||||
|
nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
|
||||||
|
s.decTable[u].nbBits = nBits
|
||||||
|
newState := (nextState << nBits) - tableSize
|
||||||
|
if newState >= tableSize {
|
||||||
|
return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
|
||||||
|
}
|
||||||
|
if newState == uint16(u) && nBits == 0 {
|
||||||
|
// Seems weird that this is possible with nbits > 0.
|
||||||
|
return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
|
||||||
|
}
|
||||||
|
s.decTable[u].newState = newState
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decompress will decompress the bitstream.
|
||||||
|
// If the buffer is over-read an error is returned.
|
||||||
|
func (s *Scratch) decompress() error {
|
||||||
|
br := &s.bits
|
||||||
|
br.init(s.br.unread())
|
||||||
|
|
||||||
|
var s1, s2 decoder
|
||||||
|
// Initialize and decode first state and symbol.
|
||||||
|
s1.init(br, s.decTable, s.actualTableLog)
|
||||||
|
s2.init(br, s.decTable, s.actualTableLog)
|
||||||
|
|
||||||
|
// Use temp table to avoid bound checks/append penalty.
|
||||||
|
var tmp = s.ct.tableSymbol[:256]
|
||||||
|
var off uint8
|
||||||
|
|
||||||
|
// Main part
|
||||||
|
if !s.zeroBits {
|
||||||
|
for br.off >= 8 {
|
||||||
|
br.fillFast()
|
||||||
|
tmp[off+0] = s1.nextFast()
|
||||||
|
tmp[off+1] = s2.nextFast()
|
||||||
|
br.fillFast()
|
||||||
|
tmp[off+2] = s1.nextFast()
|
||||||
|
tmp[off+3] = s2.nextFast()
|
||||||
|
off += 4
|
||||||
|
// When off is 0, we have overflowed and should write.
|
||||||
|
if off == 0 {
|
||||||
|
s.Out = append(s.Out, tmp...)
|
||||||
|
if len(s.Out) >= s.DecompressLimit {
|
||||||
|
return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for br.off >= 8 {
|
||||||
|
br.fillFast()
|
||||||
|
tmp[off+0] = s1.next()
|
||||||
|
tmp[off+1] = s2.next()
|
||||||
|
br.fillFast()
|
||||||
|
tmp[off+2] = s1.next()
|
||||||
|
tmp[off+3] = s2.next()
|
||||||
|
off += 4
|
||||||
|
if off == 0 {
|
||||||
|
s.Out = append(s.Out, tmp...)
|
||||||
|
// When off is 0, we have overflowed and should write.
|
||||||
|
if len(s.Out) >= s.DecompressLimit {
|
||||||
|
return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.Out = append(s.Out, tmp[:off]...)
|
||||||
|
|
||||||
|
// Final bits, a bit more expensive check
|
||||||
|
for {
|
||||||
|
if s1.finished() {
|
||||||
|
s.Out = append(s.Out, s1.final(), s2.final())
|
||||||
|
break
|
||||||
|
}
|
||||||
|
br.fill()
|
||||||
|
s.Out = append(s.Out, s1.next())
|
||||||
|
if s2.finished() {
|
||||||
|
s.Out = append(s.Out, s2.final(), s1.final())
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s.Out = append(s.Out, s2.next())
|
||||||
|
if len(s.Out) >= s.DecompressLimit {
|
||||||
|
return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return br.close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// decoder keeps track of the current state and updates it from the bitstream.
|
||||||
|
type decoder struct {
|
||||||
|
state uint16
|
||||||
|
br *bitReader
|
||||||
|
dt []decSymbol
|
||||||
|
}
|
||||||
|
|
||||||
|
// init will initialize the decoder and read the first state from the stream.
|
||||||
|
func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) {
|
||||||
|
d.dt = dt
|
||||||
|
d.br = in
|
||||||
|
d.state = in.getBits(tableLog)
|
||||||
|
}
|
||||||
|
|
||||||
|
// next returns the next symbol and sets the next state.
|
||||||
|
// At least tablelog bits must be available in the bit reader.
|
||||||
|
func (d *decoder) next() uint8 {
|
||||||
|
n := &d.dt[d.state]
|
||||||
|
lowBits := d.br.getBits(n.nbBits)
|
||||||
|
d.state = n.newState + lowBits
|
||||||
|
return n.symbol
|
||||||
|
}
|
||||||
|
|
||||||
|
// finished returns true if all bits have been read from the bitstream
|
||||||
|
// and the next state would require reading bits from the input.
|
||||||
|
func (d *decoder) finished() bool {
|
||||||
|
return d.br.finished() && d.dt[d.state].nbBits > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// final returns the current state symbol without decoding the next.
|
||||||
|
func (d *decoder) final() uint8 {
|
||||||
|
return d.dt[d.state].symbol
|
||||||
|
}
|
||||||
|
|
||||||
|
// nextFast returns the next symbol and sets the next state.
|
||||||
|
// This can only be used if no symbols are 0 bits.
|
||||||
|
// At least tablelog bits must be available in the bit reader.
|
||||||
|
func (d *decoder) nextFast() uint8 {
|
||||||
|
n := d.dt[d.state]
|
||||||
|
lowBits := d.br.getBitsFast(n.nbBits)
|
||||||
|
d.state = n.newState + lowBits
|
||||||
|
return n.symbol
|
||||||
|
}
|
|
@ -0,0 +1,144 @@
|
||||||
|
// Copyright 2018 Klaus Post. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
|
||||||
|
|
||||||
|
// Package fse provides Finite State Entropy encoding and decoding.
|
||||||
|
//
|
||||||
|
// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding
|
||||||
|
// for byte blocks as implemented in zstd.
|
||||||
|
//
|
||||||
|
// See https://github.com/klauspost/compress/tree/master/fse for more information.
|
||||||
|
package fse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math/bits"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
/*!MEMORY_USAGE :
|
||||||
|
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
|
||||||
|
* Increasing memory usage improves compression ratio
|
||||||
|
* Reduced memory usage can improve speed, due to cache effect
|
||||||
|
* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
|
||||||
|
maxMemoryUsage = 14
|
||||||
|
defaultMemoryUsage = 13
|
||||||
|
|
||||||
|
maxTableLog = maxMemoryUsage - 2
|
||||||
|
maxTablesize = 1 << maxTableLog
|
||||||
|
defaultTablelog = defaultMemoryUsage - 2
|
||||||
|
minTablelog = 5
|
||||||
|
maxSymbolValue = 255
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrIncompressible is returned when input is judged to be too hard to compress.
|
||||||
|
ErrIncompressible = errors.New("input is not compressible")
|
||||||
|
|
||||||
|
// ErrUseRLE is returned from the compressor when the input is a single byte value repeated.
|
||||||
|
ErrUseRLE = errors.New("input is single value repeated")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Scratch provides temporary storage for compression and decompression.
|
||||||
|
type Scratch struct {
|
||||||
|
// Private
|
||||||
|
count [maxSymbolValue + 1]uint32
|
||||||
|
norm [maxSymbolValue + 1]int16
|
||||||
|
br byteReader
|
||||||
|
bits bitReader
|
||||||
|
bw bitWriter
|
||||||
|
ct cTable // Compression tables.
|
||||||
|
decTable []decSymbol // Decompression table.
|
||||||
|
maxCount int // count of the most probable symbol
|
||||||
|
|
||||||
|
// Per block parameters.
|
||||||
|
// These can be used to override compression parameters of the block.
|
||||||
|
// Do not touch, unless you know what you are doing.
|
||||||
|
|
||||||
|
// Out is output buffer.
|
||||||
|
// If the scratch is re-used before the caller is done processing the output,
|
||||||
|
// set this field to nil.
|
||||||
|
// Otherwise the output buffer will be re-used for next Compression/Decompression step
|
||||||
|
// and allocation will be avoided.
|
||||||
|
Out []byte
|
||||||
|
|
||||||
|
// DecompressLimit limits the maximum decoded size acceptable.
|
||||||
|
// If > 0 decompression will stop when approximately this many bytes
|
||||||
|
// has been decoded.
|
||||||
|
// If 0, maximum size will be 2GB.
|
||||||
|
DecompressLimit int
|
||||||
|
|
||||||
|
symbolLen uint16 // Length of active part of the symbol table.
|
||||||
|
actualTableLog uint8 // Selected tablelog.
|
||||||
|
zeroBits bool // no bits has prob > 50%.
|
||||||
|
clearCount bool // clear count
|
||||||
|
|
||||||
|
// MaxSymbolValue will override the maximum symbol value of the next block.
|
||||||
|
MaxSymbolValue uint8
|
||||||
|
|
||||||
|
// TableLog will attempt to override the tablelog for the next block.
|
||||||
|
TableLog uint8
|
||||||
|
}
|
||||||
|
|
||||||
|
// Histogram allows to populate the histogram and skip that step in the compression,
|
||||||
|
// It otherwise allows to inspect the histogram when compression is done.
|
||||||
|
// To indicate that you have populated the histogram call HistogramFinished
|
||||||
|
// with the value of the highest populated symbol, as well as the number of entries
|
||||||
|
// in the most populated entry. These are accepted at face value.
|
||||||
|
// The returned slice will always be length 256.
|
||||||
|
func (s *Scratch) Histogram() []uint32 {
|
||||||
|
return s.count[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// HistogramFinished can be called to indicate that the histogram has been populated.
|
||||||
|
// maxSymbol is the index of the highest set symbol of the next data segment.
|
||||||
|
// maxCount is the number of entries in the most populated entry.
|
||||||
|
// These are accepted at face value.
|
||||||
|
func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) {
|
||||||
|
s.maxCount = maxCount
|
||||||
|
s.symbolLen = uint16(maxSymbol) + 1
|
||||||
|
s.clearCount = maxCount != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// prepare will prepare and allocate scratch tables used for both compression and decompression.
|
||||||
|
func (s *Scratch) prepare(in []byte) (*Scratch, error) {
|
||||||
|
if s == nil {
|
||||||
|
s = &Scratch{}
|
||||||
|
}
|
||||||
|
if s.MaxSymbolValue == 0 {
|
||||||
|
s.MaxSymbolValue = 255
|
||||||
|
}
|
||||||
|
if s.TableLog == 0 {
|
||||||
|
s.TableLog = defaultTablelog
|
||||||
|
}
|
||||||
|
if s.TableLog > maxTableLog {
|
||||||
|
return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog)
|
||||||
|
}
|
||||||
|
if cap(s.Out) == 0 {
|
||||||
|
s.Out = make([]byte, 0, len(in))
|
||||||
|
}
|
||||||
|
if s.clearCount && s.maxCount == 0 {
|
||||||
|
for i := range s.count {
|
||||||
|
s.count[i] = 0
|
||||||
|
}
|
||||||
|
s.clearCount = false
|
||||||
|
}
|
||||||
|
s.br.init(in)
|
||||||
|
if s.DecompressLimit == 0 {
|
||||||
|
// Max size 2GB.
|
||||||
|
s.DecompressLimit = (2 << 30) - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// tableStep returns the next table index.
|
||||||
|
func tableStep(tableSize uint32) uint32 {
|
||||||
|
return (tableSize >> 1) + (tableSize >> 3) + 3
|
||||||
|
}
|
||||||
|
|
||||||
|
func highBits(val uint32) (n uint32) {
|
||||||
|
return uint32(bits.Len32(val) - 1)
|
||||||
|
}
|
|
@ -0,0 +1,4 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
cd s2/cmd/_s2sx/ || exit 1
|
||||||
|
go generate .
|
|
@ -0,0 +1,3 @@
|
||||||
|
module github.com/klauspost/compress
|
||||||
|
|
||||||
|
go 1.15
|
|
@ -0,0 +1 @@
|
||||||
|
/huff0-fuzz.zip
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue