vendor: github.com/klauspost/compress v1.17.11

full diff: https://github.com/klauspost/compress/compare/v1.17.9...v1.17.11

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn 2024-11-01 13:28:47 +01:00
parent 97ff1b7c0a
commit e3942d46a0
No known key found for this signature in database
GPG Key ID: 76698F39D527CE8C
15 changed files with 110 additions and 58 deletions

View File

@ -75,7 +75,7 @@ require (
github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/mux v1.8.1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/klauspost/compress v1.17.9 // indirect github.com/klauspost/compress v1.17.11 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect

View File

@ -151,8 +151,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=

View File

@ -1,5 +1,5 @@
# This is an example goreleaser.yaml file with some sane defaults. version: 2
# Make sure to check the documentation at http://goreleaser.com
before: before:
hooks: hooks:
- ./gen.sh - ./gen.sh
@ -99,7 +99,7 @@ archives:
checksum: checksum:
name_template: 'checksums.txt' name_template: 'checksums.txt'
snapshot: snapshot:
name_template: "{{ .Tag }}-next" version_template: "{{ .Tag }}-next"
changelog: changelog:
sort: asc sort: asc
filters: filters:

View File

@ -16,6 +16,27 @@ This package provides various compression algorithms.
# changelog # changelog
* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10)
* gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978
* gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002
* s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982
* zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007
* flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996
* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9)
* s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949
* flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963
* Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971
* zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951
* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8)
* zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885
* zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938
* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7)
* s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927
* s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930
* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) * Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6)
* zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923
* s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925
@ -81,7 +102,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
* gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) * Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
@ -136,7 +157,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
* Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
* zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657
* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
@ -339,7 +360,7 @@ While the release has been extensively tested, it is recommended to testing when
* s2: Fix binaries. * s2: Fix binaries.
* Feb 25, 2021 (v1.11.8) * Feb 25, 2021 (v1.11.8)
* s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended.
* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
@ -518,7 +539,7 @@ While the release has been extensively tested, it is recommended to testing when
* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. * Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
* Feb 19, 2016: Handle small payloads faster in level 1-3. * Feb 19, 2016: Handle small payloads faster in level 1-3.
* Feb 19, 2016: Added faster level 2 + 3 compression modes. * Feb 19, 2016: Added faster level 2 + 3 compression modes.
* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. * Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5.
* Feb 14, 2016: Snappy: Merge upstream changes. * Feb 14, 2016: Snappy: Merge upstream changes.
* Feb 14, 2016: Snappy: Fix aggressive skipping. * Feb 14, 2016: Snappy: Fix aggressive skipping.
* Feb 14, 2016: Snappy: Update benchmark. * Feb 14, 2016: Snappy: Update benchmark.

View File

@ -15,7 +15,7 @@ const (
// It is possible, but by no way guaranteed that corrupt data will // It is possible, but by no way guaranteed that corrupt data will
// return an error. // return an error.
// It is up to the caller to verify integrity of the returned data. // It is up to the caller to verify integrity of the returned data.
// Use a predefined Scrach to set maximum acceptable output size. // Use a predefined Scratch to set maximum acceptable output size.
func Decompress(b []byte, s *Scratch) ([]byte, error) { func Decompress(b []byte, s *Scratch) ([]byte, error) {
s, err := s.prepare(b) s, err := s.prepare(b)
if err != nil { if err != nil {

View File

@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
errs++ errs++
} }
if errs > 0 { if errs > 0 {
fmt.Fprintf(w, "%d errros in base, stopping\n", errs) fmt.Fprintf(w, "%d errors in base, stopping\n", errs)
continue continue
} }
// Ensure that all combinations are covered. // Ensure that all combinations are covered.
@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
errs++ errs++
} }
if errs > 20 { if errs > 20 {
fmt.Fprintf(w, "%d errros, stopping\n", errs) fmt.Fprintf(w, "%d errors, stopping\n", errs)
break break
} }
} }

View File

@ -598,7 +598,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
printf("RLE set to 0x%x, code: %v", symb, v) printf("RLE set to 0x%x, code: %v", symb, v)
} }
case compModeFSE: case compModeFSE:
if debugDecoder {
println("Reading table for", tableIndex(i)) println("Reading table for", tableIndex(i))
}
if seq.fse == nil || seq.fse.preDefined { if seq.fse == nil || seq.fse.preDefined {
seq.fse = fseDecoderPool.Get().(*fseDecoder) seq.fse = fseDecoderPool.Get().(*fseDecoder)
} }

View File

@ -179,9 +179,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well. // Consider history as well.
var seq seq var seq seq
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
seq.matchLen = uint32(lenght - zstdMinMatch) seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards. // We might be able to match backwards.
// Extend as long as we can. // Extend as long as we can.
@ -210,12 +210,12 @@ encodeLoop:
// Index match start+1 (long) -> s - 1 // Index match start+1 (long) -> s - 1
index0 := s + repOff index0 := s + repOff
s += lenght + repOff s += length + repOff
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
if debugEncoder { if debugEncoder {
println("repeat ended", s, lenght) println("repeat ended", s, length)
} }
break encodeLoop break encodeLoop
@ -241,9 +241,9 @@ encodeLoop:
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
// Consider history as well. // Consider history as well.
var seq seq var seq seq
lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
seq.matchLen = uint32(lenght - zstdMinMatch) seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards. // We might be able to match backwards.
// Extend as long as we can. // Extend as long as we can.
@ -270,11 +270,11 @@ encodeLoop:
} }
blk.sequences = append(blk.sequences, seq) blk.sequences = append(blk.sequences, seq)
s += lenght + repOff2 s += length + repOff2
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
if debugEncoder { if debugEncoder {
println("repeat ended", s, lenght) println("repeat ended", s, length)
} }
break encodeLoop break encodeLoop
@ -708,9 +708,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well. // Consider history as well.
var seq seq var seq seq
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
seq.matchLen = uint32(lenght - zstdMinMatch) seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards. // We might be able to match backwards.
// Extend as long as we can. // Extend as long as we can.
@ -738,12 +738,12 @@ encodeLoop:
blk.sequences = append(blk.sequences, seq) blk.sequences = append(blk.sequences, seq)
// Index match start+1 (long) -> s - 1 // Index match start+1 (long) -> s - 1
s += lenght + repOff s += length + repOff
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
if debugEncoder { if debugEncoder {
println("repeat ended", s, lenght) println("repeat ended", s, length)
} }
break encodeLoop break encodeLoop
@ -772,9 +772,9 @@ encodeLoop:
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
// Consider history as well. // Consider history as well.
var seq seq var seq seq
lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
seq.matchLen = uint32(lenght - zstdMinMatch) seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards. // We might be able to match backwards.
// Extend as long as we can. // Extend as long as we can.
@ -801,11 +801,11 @@ encodeLoop:
} }
blk.sequences = append(blk.sequences, seq) blk.sequences = append(blk.sequences, seq)
s += lenght + repOff2 s += length + repOff2
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
if debugEncoder { if debugEncoder {
println("repeat ended", s, lenght) println("repeat ended", s, length)
} }
break encodeLoop break encodeLoop

View File

@ -138,9 +138,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well. // Consider history as well.
var seq seq var seq seq
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
seq.matchLen = uint32(lenght - zstdMinMatch) seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards. // We might be able to match backwards.
// Extend as long as we can. // Extend as long as we can.
@ -166,11 +166,11 @@ encodeLoop:
println("repeat sequence", seq, "next s:", s) println("repeat sequence", seq, "next s:", s)
} }
blk.sequences = append(blk.sequences, seq) blk.sequences = append(blk.sequences, seq)
s += lenght + repOff s += length + repOff
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
if debugEncoder { if debugEncoder {
println("repeat ended", s, lenght) println("repeat ended", s, length)
} }
break encodeLoop break encodeLoop
@ -798,9 +798,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well. // Consider history as well.
var seq seq var seq seq
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
seq.matchLen = uint32(lenght - zstdMinMatch) seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards. // We might be able to match backwards.
// Extend as long as we can. // Extend as long as we can.
@ -826,11 +826,11 @@ encodeLoop:
println("repeat sequence", seq, "next s:", s) println("repeat sequence", seq, "next s:", s)
} }
blk.sequences = append(blk.sequences, seq) blk.sequences = append(blk.sequences, seq)
s += lenght + repOff s += length + repOff
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
if debugEncoder { if debugEncoder {
println("repeat ended", s, lenght) println("repeat ended", s, length)
} }
break encodeLoop break encodeLoop

View File

@ -6,6 +6,7 @@ package zstd
import ( import (
"crypto/rand" "crypto/rand"
"errors"
"fmt" "fmt"
"io" "io"
"math" "math"
@ -149,6 +150,9 @@ func (e *Encoder) ResetContentSize(w io.Writer, size int64) {
// and write CRC if requested. // and write CRC if requested.
func (e *Encoder) Write(p []byte) (n int, err error) { func (e *Encoder) Write(p []byte) (n int, err error) {
s := &e.state s := &e.state
if s.eofWritten {
return 0, ErrEncoderClosed
}
for len(p) > 0 { for len(p) > 0 {
if len(p)+len(s.filling) < e.o.blockSize { if len(p)+len(s.filling) < e.o.blockSize {
if e.o.crc { if e.o.crc {
@ -202,7 +206,7 @@ func (e *Encoder) nextBlock(final bool) error {
return nil return nil
} }
if final && len(s.filling) > 0 { if final && len(s.filling) > 0 {
s.current = e.EncodeAll(s.filling, s.current[:0]) s.current = e.encodeAll(s.encoder, s.filling, s.current[:0])
var n2 int var n2 int
n2, s.err = s.w.Write(s.current) n2, s.err = s.w.Write(s.current)
if s.err != nil { if s.err != nil {
@ -288,6 +292,9 @@ func (e *Encoder) nextBlock(final bool) error {
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
s.nInput += int64(len(s.current)) s.nInput += int64(len(s.current))
s.wg.Add(1) s.wg.Add(1)
if final {
s.eofWritten = true
}
go func(src []byte) { go func(src []byte) {
if debugEncoder { if debugEncoder {
println("Adding block,", len(src), "bytes, final:", final) println("Adding block,", len(src), "bytes, final:", final)
@ -303,9 +310,6 @@ func (e *Encoder) nextBlock(final bool) error {
blk := enc.Block() blk := enc.Block()
enc.Encode(blk, src) enc.Encode(blk, src)
blk.last = final blk.last = final
if final {
s.eofWritten = true
}
// Wait for pending writes. // Wait for pending writes.
s.wWg.Wait() s.wWg.Wait()
if s.writeErr != nil { if s.writeErr != nil {
@ -401,12 +405,20 @@ func (e *Encoder) Flush() error {
if len(s.filling) > 0 { if len(s.filling) > 0 {
err := e.nextBlock(false) err := e.nextBlock(false)
if err != nil { if err != nil {
// Ignore Flush after Close.
if errors.Is(s.err, ErrEncoderClosed) {
return nil
}
return err return err
} }
} }
s.wg.Wait() s.wg.Wait()
s.wWg.Wait() s.wWg.Wait()
if s.err != nil { if s.err != nil {
// Ignore Flush after Close.
if errors.Is(s.err, ErrEncoderClosed) {
return nil
}
return s.err return s.err
} }
return s.writeErr return s.writeErr
@ -422,6 +434,9 @@ func (e *Encoder) Close() error {
} }
err := e.nextBlock(true) err := e.nextBlock(true)
if err != nil { if err != nil {
if errors.Is(s.err, ErrEncoderClosed) {
return nil
}
return err return err
} }
if s.frameContentSize > 0 { if s.frameContentSize > 0 {
@ -459,6 +474,11 @@ func (e *Encoder) Close() error {
} }
_, s.err = s.w.Write(frame) _, s.err = s.w.Write(frame)
} }
if s.err == nil {
s.err = ErrEncoderClosed
return nil
}
return s.err return s.err
} }
@ -469,6 +489,15 @@ func (e *Encoder) Close() error {
// Data compressed with EncodeAll can be decoded with the Decoder, // Data compressed with EncodeAll can be decoded with the Decoder,
// using either a stream or DecodeAll. // using either a stream or DecodeAll.
func (e *Encoder) EncodeAll(src, dst []byte) []byte { func (e *Encoder) EncodeAll(src, dst []byte) []byte {
e.init.Do(e.initialize)
enc := <-e.encoders
defer func() {
e.encoders <- enc
}()
return e.encodeAll(enc, src, dst)
}
func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte {
if len(src) == 0 { if len(src) == 0 {
if e.o.fullZero { if e.o.fullZero {
// Add frame header. // Add frame header.
@ -491,13 +520,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
} }
return dst return dst
} }
e.init.Do(e.initialize)
enc := <-e.encoders
defer func() {
// Release encoder reference to last block.
// If a non-single block is needed the encoder will reset again.
e.encoders <- enc
}()
// Use single segments when above minimum window and below window size. // Use single segments when above minimum window and below window size.
single := len(src) <= e.o.windowSize && len(src) > MinWindowSize single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
if e.o.single != nil { if e.o.single != nil {

View File

@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error {
} }
return err return err
} }
if debugDecoder {
printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
}
windowLog := 10 + (wd >> 3) windowLog := 10 + (wd >> 3)
windowBase := uint64(1) << windowLog windowBase := uint64(1) << windowLog
windowAdd := (windowBase / 8) * uint64(wd&0x7) windowAdd := (windowBase / 8) * uint64(wd&0x7)

View File

@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
default: default:
return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode)
} }
s.seqSize += ctx.litRemain s.seqSize += ctx.litRemain
@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode)
} }
if ctx.litRemain < 0 { if ctx.litRemain < 0 {

View File

@ -1814,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
MOVQ 40(SP), AX MOVQ 40(SP), AX
ADDQ AX, 48(SP) ADDQ AX, 48(SP)
// Calculate poiter to s.out[cap(s.out)] (a past-end pointer) // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R10, 32(SP) ADDQ R10, 32(SP)
// outBase += outPosition // outBase += outPosition
@ -2376,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
MOVQ 40(SP), CX MOVQ 40(SP), CX
ADDQ CX, 48(SP) ADDQ CX, 48(SP)
// Calculate poiter to s.out[cap(s.out)] (a past-end pointer) // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R9, 32(SP) ADDQ R9, 32(SP)
// outBase += outPosition // outBase += outPosition
@ -2896,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
MOVQ 40(SP), AX MOVQ 40(SP), AX
ADDQ AX, 48(SP) ADDQ AX, 48(SP)
// Calculate poiter to s.out[cap(s.out)] (a past-end pointer) // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R10, 32(SP) ADDQ R10, 32(SP)
// outBase += outPosition // outBase += outPosition
@ -3560,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
MOVQ 40(SP), CX MOVQ 40(SP), CX
ADDQ CX, 48(SP) ADDQ CX, 48(SP)
// Calculate poiter to s.out[cap(s.out)] (a past-end pointer) // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R9, 32(SP) ADDQ R9, 32(SP)
// outBase += outPosition // outBase += outPosition

View File

@ -88,6 +88,10 @@ var (
// Close has been called. // Close has been called.
ErrDecoderClosed = errors.New("decoder used after Close") ErrDecoderClosed = errors.New("decoder used after Close")
// ErrEncoderClosed will be returned if the Encoder was used after
// Close has been called.
ErrEncoderClosed = errors.New("encoder used after Close")
// ErrDecoderNilInput is returned when a nil Reader was provided // ErrDecoderNilInput is returned when a nil Reader was provided
// and an operation other than Reset/DecodeAll/Close was attempted. // and an operation other than Reset/DecodeAll/Close was attempted.
ErrDecoderNilInput = errors.New("nil input provided as reader") ErrDecoderNilInput = errors.New("nil input provided as reader")

4
vendor/modules.txt vendored
View File

@ -172,8 +172,8 @@ github.com/grpc-ecosystem/grpc-gateway/v2/utilities
# github.com/inconshreveable/mousetrap v1.1.0 # github.com/inconshreveable/mousetrap v1.1.0
## explicit; go 1.18 ## explicit; go 1.18
github.com/inconshreveable/mousetrap github.com/inconshreveable/mousetrap
# github.com/klauspost/compress v1.17.9 # github.com/klauspost/compress v1.17.11
## explicit; go 1.20 ## explicit; go 1.21
github.com/klauspost/compress github.com/klauspost/compress
github.com/klauspost/compress/fse github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0