summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/pierrec/lz4/v4
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/pierrec/lz4/v4')
-rw-r--r--vendor/github.com/pierrec/lz4/v4/.gitignore34
-rw-r--r--vendor/github.com/pierrec/lz4/v4/.travis.yml19
-rw-r--r--vendor/github.com/pierrec/lz4/v4/LICENSE28
-rw-r--r--vendor/github.com/pierrec/lz4/v4/README.md90
-rw-r--r--vendor/github.com/pierrec/lz4/v4/go.mod3
-rw-r--r--vendor/github.com/pierrec/lz4/v4/go.sum3
-rw-r--r--vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go469
-rw-r--r--vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go88
-rw-r--r--vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s369
-rw-r--r--vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s201
-rw-r--r--vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go9
-rw-r--r--vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go100
-rw-r--r--vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go19
-rw-r--r--vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go331
-rw-r--r--vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go200
-rw-r--r--vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go103
-rw-r--r--vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go212
-rw-r--r--vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go11
-rw-r--r--vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s259
-rw-r--r--vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go10
-rw-r--r--vendor/github.com/pierrec/lz4/v4/lz4.go147
-rw-r--r--vendor/github.com/pierrec/lz4/v4/options.go213
-rw-r--r--vendor/github.com/pierrec/lz4/v4/options_gen.go92
-rw-r--r--vendor/github.com/pierrec/lz4/v4/reader.go243
-rw-r--r--vendor/github.com/pierrec/lz4/v4/state.go75
-rw-r--r--vendor/github.com/pierrec/lz4/v4/state_gen.go28
-rw-r--r--vendor/github.com/pierrec/lz4/v4/writer.go233
27 files changed, 3589 insertions, 0 deletions
diff --git a/vendor/github.com/pierrec/lz4/v4/.gitignore b/vendor/github.com/pierrec/lz4/v4/.gitignore
new file mode 100644
index 0000000000..5e98735047
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/.gitignore
@@ -0,0 +1,34 @@
+# Created by https://www.gitignore.io/api/macos
+
+### macOS ###
+*.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+# End of https://www.gitignore.io/api/macos
+
+cmd/*/*exe
+.idea \ No newline at end of file
diff --git a/vendor/github.com/pierrec/lz4/v4/.travis.yml b/vendor/github.com/pierrec/lz4/v4/.travis.yml
new file mode 100644
index 0000000000..4a9819e03a
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/.travis.yml
@@ -0,0 +1,19 @@
+language: go
+
+env:
+ - GO111MODULE=off
+
+go:
+ - 1.13.x
+ - 1.14.x
+
+matrix:
+ fast_finish: true
+
+sudo: false
+
+script:
+ - go test -v -cpu=2
+ - go test -v -cpu=2 -race
+ - go test -v -cpu=2 -tags noasm
+ - go test -v -cpu=2 -race -tags noasm
diff --git a/vendor/github.com/pierrec/lz4/v4/LICENSE b/vendor/github.com/pierrec/lz4/v4/LICENSE
new file mode 100644
index 0000000000..bd899d8353
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2015, Pierre Curto
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of xxHash nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/pierrec/lz4/v4/README.md b/vendor/github.com/pierrec/lz4/v4/README.md
new file mode 100644
index 0000000000..4ee388e81b
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/README.md
@@ -0,0 +1,90 @@
+# lz4 : LZ4 compression in pure Go
+
+[![GoDoc](https://godoc.org/github.com/pierrec/lz4?status.svg)](https://godoc.org/github.com/pierrec/lz4)
+[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4)
+[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4)
+[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags)
+
+## Overview
+
+This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks.
+The implementation is based on the reference C [one](https://github.com/lz4/lz4).
+
+## Install
+
+Assuming you have the go toolchain installed:
+
+```
+go get github.com/pierrec/lz4
+```
+
+There is a command line interface tool to compress and decompress LZ4 files.
+
+```
+go install github.com/pierrec/lz4/cmd/lz4c
+```
+
+Usage
+
+```
+Usage of lz4c:
+ -version
+ print the program version
+
+Subcommands:
+Compress the given files or from stdin to stdout.
+compress [arguments] [<file name> ...]
+ -bc
+ enable block checksum
+ -l int
+ compression level (0=fastest)
+ -sc
+ disable stream checksum
+ -size string
+ block max size [64K,256K,1M,4M] (default "4M")
+
+Uncompress the given files or from stdin to stdout.
+uncompress [arguments] [<file name> ...]
+
+```
+
+
+## Example
+
+```
+// Compress and uncompress an input string.
+s := "hello world"
+r := strings.NewReader(s)
+
+// The pipe will uncompress the data from the writer.
+pr, pw := io.Pipe()
+zw := lz4.NewWriter(pw)
+zr := lz4.NewReader(pr)
+
+go func() {
+ // Compress the input string.
+ _, _ = io.Copy(zw, r)
+ _ = zw.Close() // Make sure the writer is closed
+ _ = pw.Close() // Terminate the pipe
+}()
+
+_, _ = io.Copy(os.Stdout, zr)
+
+// Output:
+// hello world
+```
+
+## Contributing
+
+Contributions are very welcome for bug fixing, performance improvements...!
+
+- Open an issue with a proper description
+- Send a pull request with appropriate test case(s)
+
+## Contributors
+
+Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far!
+
+Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder.
+
+Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code.
diff --git a/vendor/github.com/pierrec/lz4/v4/go.mod b/vendor/github.com/pierrec/lz4/v4/go.mod
new file mode 100644
index 0000000000..42229b2967
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/go.mod
@@ -0,0 +1,3 @@
+module github.com/pierrec/lz4/v4
+
+go 1.14
diff --git a/vendor/github.com/pierrec/lz4/v4/go.sum b/vendor/github.com/pierrec/lz4/v4/go.sum
new file mode 100644
index 0000000000..6973bd668a
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/go.sum
@@ -0,0 +1,3 @@
+github.com/pierrec/lz4 v1.0.1 h1:w6GMGWSsCI04fTM8wQRdnW74MuJISakuUU0onU0TYB4=
+github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A=
+github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go
new file mode 100644
index 0000000000..f382649430
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go
@@ -0,0 +1,469 @@
+package lz4block
+
+import (
+ "encoding/binary"
+ "math/bits"
+ "sync"
+
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+const (
+ // The following constants are used to setup the compression algorithm.
+ minMatch = 4 // the minimum size of the match sequence size (4 bytes)
+ winSizeLog = 16 // LZ4 64Kb window size limit
+ winSize = 1 << winSizeLog
+ winMask = winSize - 1 // 64Kb window of previous data for dependent blocks
+
+ // hashLog determines the size of the hash table used to quickly find a previous match position.
+ // Its value influences the compression speed and memory usage, the lower the faster,
+ // but at the expense of the compression ratio.
+ // 16 seems to be the best compromise for fast compression.
+ hashLog = 16
+ htSize = 1 << hashLog
+
+ mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes.
+)
+
+func recoverBlock(e *error) {
+ if r := recover(); r != nil && *e == nil {
+ *e = lz4errors.ErrInvalidSourceShortBuffer
+ }
+}
+
+// blockHash hashes the lower 6 bytes into a value < htSize.
+func blockHash(x uint64) uint32 {
+ const prime6bytes = 227718039650203
+ return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog))
+}
+
+func CompressBlockBound(n int) int {
+ return n + n/255 + 16
+}
+
+func UncompressBlock(src, dst []byte) (int, error) {
+ if len(src) == 0 {
+ return 0, nil
+ }
+ if di := decodeBlock(dst, src); di >= 0 {
+ return di, nil
+ }
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+}
+
+type Compressor struct {
+ // Offsets are at most 64kiB, so we can store only the lower 16 bits of
+ // match positions: effectively, an offset from some 64kiB block boundary.
+ //
+ // When we retrieve such an offset, we interpret it as relative to the last
+ // block boundary si &^ 0xffff, or the one before, (si &^ 0xffff) - 0x10000,
+ // depending on which of these is inside the current window. If a table
+ // entry was generated more than 64kiB back in the input, we find out by
+ // inspecting the input stream.
+ table [htSize]uint16
+
+ needsReset bool
+}
+
+// Get returns the position of a presumptive match for the hash h.
+// The match may be a false positive due to a hash collision or an old entry.
+// If si < winSize, the return value may be negative.
+func (c *Compressor) get(h uint32, si int) int {
+ h &= htSize - 1
+ i := int(c.table[h])
+ i += si &^ winMask
+ if i >= si {
+ // Try previous 64kiB block (negative when in first block).
+ i -= winSize
+ }
+ return i
+}
+
+func (c *Compressor) put(h uint32, si int) {
+ h &= htSize - 1
+ c.table[h] = uint16(si)
+}
+
+var compressorPool = sync.Pool{New: func() interface{} { return new(Compressor) }}
+
+func CompressBlock(src, dst []byte) (int, error) {
+ c := compressorPool.Get().(*Compressor)
+ n, err := c.CompressBlock(src, dst)
+ compressorPool.Put(c)
+ return n, err
+}
+
+func (c *Compressor) CompressBlock(src, dst []byte) (int, error) {
+ if c.needsReset {
+ // Zero out reused table to avoid non-deterministic output (issue #65).
+ c.table = [htSize]uint16{}
+ }
+ c.needsReset = true // Only false on first call.
+
+ // Return 0, nil only if the destination buffer size is < CompressBlockBound.
+ isNotCompressible := len(dst) < CompressBlockBound(len(src))
+
+ // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
+ // This significantly speeds up incompressible data and usually has very small impact on compression.
+ // bytes to skip = 1 + (bytes since last match >> adaptSkipLog)
+ const adaptSkipLog = 7
+
+ // si: Current position of the search.
+ // anchor: Position of the current literals.
+ var si, di, anchor int
+ sn := len(src) - mfLimit
+ if sn <= 0 {
+ goto lastLiterals
+ }
+
+ // Fast scan strategy: the hash table only stores the last 4 bytes sequences.
+ for si < sn {
+ // Hash the next 6 bytes (sequence)...
+ match := binary.LittleEndian.Uint64(src[si:])
+ h := blockHash(match)
+ h2 := blockHash(match >> 8)
+
+ // We check a match at s, s+1 and s+2 and pick the first one we get.
+ // Checking 3 only requires us to load the source one.
+ ref := c.get(h, si)
+ ref2 := c.get(h2, si)
+ c.put(h, si)
+ c.put(h2, si+1)
+
+ offset := si - ref
+
+ if offset <= 0 || offset >= winSize || uint32(match) != binary.LittleEndian.Uint32(src[ref:]) {
+ // No match. Start calculating another hash.
+ // The processor can usually do this out-of-order.
+ h = blockHash(match >> 16)
+ ref3 := c.get(h, si+2)
+
+ // Check the second match at si+1
+ si += 1
+ offset = si - ref2
+
+ if offset <= 0 || offset >= winSize || uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) {
+ // No match. Check the third match at si+2
+ si += 1
+ offset = si - ref3
+ c.put(h, si)
+
+ if offset <= 0 || offset >= winSize || uint32(match>>16) != binary.LittleEndian.Uint32(src[ref3:]) {
+ // Skip one extra byte (at si+3) before we check 3 matches again.
+ si += 2 + (si-anchor)>>adaptSkipLog
+ continue
+ }
+ }
+ }
+
+ // Match found.
+ lLen := si - anchor // Literal length.
+ // We already matched 4 bytes.
+ mLen := 4
+
+ // Extend backwards if we can, reducing literals.
+ tOff := si - offset - 1
+ for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] {
+ si--
+ tOff--
+ lLen--
+ mLen++
+ }
+
+ // Add the match length, so we continue search at the end.
+ // Use mLen to store the offset base.
+ si, mLen = si+mLen, si+minMatch
+
+ // Find the longest match by looking by batches of 8 bytes.
+ for si+8 < sn {
+ x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:])
+ if x == 0 {
+ si += 8
+ } else {
+ // Stop is first non-zero byte.
+ si += bits.TrailingZeros64(x) >> 3
+ break
+ }
+ }
+
+ mLen = si - mLen
+ if mLen < 0xF {
+ dst[di] = byte(mLen)
+ } else {
+ dst[di] = 0xF
+ }
+
+ // Encode literals length.
+ if lLen < 0xF {
+ dst[di] |= byte(lLen << 4)
+ } else {
+ dst[di] |= 0xF0
+ di++
+ l := lLen - 0xF
+ for ; l >= 0xFF; l -= 0xFF {
+ dst[di] = 0xFF
+ di++
+ }
+ dst[di] = byte(l)
+ }
+ di++
+
+ // Literals.
+ if di+lLen > len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ copy(dst[di:di+lLen], src[anchor:anchor+lLen])
+ di += lLen + 2
+ anchor = si
+
+ // Encode offset.
+ if di > len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+ // Encode match length part 2.
+ if mLen >= 0xF {
+ for mLen -= 0xF; mLen >= 0xFF && di < len(dst); mLen -= 0xFF {
+ dst[di] = 0xFF
+ di++
+ }
+ if di >= len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ dst[di] = byte(mLen)
+ di++
+ }
+ // Check if we can load next values.
+ if si >= sn {
+ break
+ }
+ // Hash match end-2
+ h = blockHash(binary.LittleEndian.Uint64(src[si-2:]))
+ c.put(h, si-2)
+ }
+
+lastLiterals:
+ if isNotCompressible && anchor == 0 {
+ // Incompressible.
+ return 0, nil
+ }
+
+ // Last literals.
+ if di >= len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ lLen := len(src) - anchor
+ if lLen < 0xF {
+ dst[di] = byte(lLen << 4)
+ } else {
+ dst[di] = 0xF0
+ di++
+ for lLen -= 0xF; lLen >= 0xFF && di < len(dst); lLen -= 0xFF {
+ dst[di] = 0xFF
+ di++
+ }
+ if di >= len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ dst[di] = byte(lLen)
+ }
+ di++
+
+ // Write the last literals.
+ if isNotCompressible && di >= anchor {
+ // Incompressible.
+ return 0, nil
+ }
+ if di+len(src)-anchor > len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ di += copy(dst[di:di+len(src)-anchor], src[anchor:])
+ return di, nil
+}
+
+// blockHash hashes 4 bytes into a value < winSize.
+func blockHashHC(x uint32) uint32 {
+ const hasher uint32 = 2654435761 // Knuth multiplicative hash.
+ return x * hasher >> (32 - winSizeLog)
+}
+
+type CompressorHC struct {
+ // hashTable: stores the last position found for a given hash
+ // chainTable: stores previous positions for a given hash
+ hashTable, chainTable [htSize]int
+ needsReset bool
+}
+
+var compressorHCPool = sync.Pool{New: func() interface{} { return new(CompressorHC) }}
+
+func CompressBlockHC(src, dst []byte, depth CompressionLevel) (int, error) {
+ c := compressorHCPool.Get().(*CompressorHC)
+ n, err := c.CompressBlock(src, dst, depth)
+ compressorHCPool.Put(c)
+ return n, err
+}
+
+func (c *CompressorHC) CompressBlock(src, dst []byte, depth CompressionLevel) (_ int, err error) {
+ if c.needsReset {
+ // Zero out reused table to avoid non-deterministic output (issue #65).
+ c.hashTable = [htSize]int{}
+ c.chainTable = [htSize]int{}
+ }
+ c.needsReset = true // Only false on first call.
+
+ defer recoverBlock(&err)
+
+ // Return 0, nil only if the destination buffer size is < CompressBlockBound.
+ isNotCompressible := len(dst) < CompressBlockBound(len(src))
+
+ // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
+ // This significantly speeds up incompressible data and usually has very small impact on compression.
+ // bytes to skip = 1 + (bytes since last match >> adaptSkipLog)
+ const adaptSkipLog = 7
+
+ var si, di, anchor int
+ sn := len(src) - mfLimit
+ if sn <= 0 {
+ goto lastLiterals
+ }
+
+ if depth == 0 {
+ depth = winSize
+ }
+
+ for si < sn {
+ // Hash the next 4 bytes (sequence).
+ match := binary.LittleEndian.Uint32(src[si:])
+ h := blockHashHC(match)
+
+ // Follow the chain until out of window and give the longest match.
+ mLen := 0
+ offset := 0
+ for next, try := c.hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next, try = c.chainTable[next&winMask], try-1 {
+ // The first (mLen==0) or next byte (mLen>=minMatch) at current match length
+ // must match to improve on the match length.
+ if src[next+mLen] != src[si+mLen] {
+ continue
+ }
+ ml := 0
+ // Compare the current position with a previous with the same hash.
+ for ml < sn-si {
+ x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:])
+ if x == 0 {
+ ml += 8
+ } else {
+ // Stop is first non-zero byte.
+ ml += bits.TrailingZeros64(x) >> 3
+ break
+ }
+ }
+ if ml < minMatch || ml <= mLen {
+ // Match too small (<minMath) or smaller than the current match.
+ continue
+ }
+ // Found a longer match, keep its position and length.
+ mLen = ml
+ offset = si - next
+ // Try another previous position with the same hash.
+ }
+ c.chainTable[si&winMask] = c.hashTable[h]
+ c.hashTable[h] = si
+
+ // No match found.
+ if mLen == 0 {
+ si += 1 + (si-anchor)>>adaptSkipLog
+ continue
+ }
+
+ // Match found.
+ // Update hash/chain tables with overlapping bytes:
+ // si already hashed, add everything from si+1 up to the match length.
+ winStart := si + 1
+ if ws := si + mLen - winSize; ws > winStart {
+ winStart = ws
+ }
+ for si, ml := winStart, si+mLen; si < ml; {
+ match >>= 8
+ match |= uint32(src[si+3]) << 24
+ h := blockHashHC(match)
+ c.chainTable[si&winMask] = c.hashTable[h]
+ c.hashTable[h] = si
+ si++
+ }
+
+ lLen := si - anchor
+ si += mLen
+ mLen -= minMatch // Match length does not include minMatch.
+
+ if mLen < 0xF {
+ dst[di] = byte(mLen)
+ } else {
+ dst[di] = 0xF
+ }
+
+ // Encode literals length.
+ if lLen < 0xF {
+ dst[di] |= byte(lLen << 4)
+ } else {
+ dst[di] |= 0xF0
+ di++
+ l := lLen - 0xF
+ for ; l >= 0xFF; l -= 0xFF {
+ dst[di] = 0xFF
+ di++
+ }
+ dst[di] = byte(l)
+ }
+ di++
+
+ // Literals.
+ copy(dst[di:di+lLen], src[anchor:anchor+lLen])
+ di += lLen
+ anchor = si
+
+ // Encode offset.
+ di += 2
+ dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+ // Encode match length part 2.
+ if mLen >= 0xF {
+ for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
+ dst[di] = 0xFF
+ di++
+ }
+ dst[di] = byte(mLen)
+ di++
+ }
+ }
+
+ if isNotCompressible && anchor == 0 {
+ // Incompressible.
+ return 0, nil
+ }
+
+ // Last literals.
+lastLiterals:
+ lLen := len(src) - anchor
+ if lLen < 0xF {
+ dst[di] = byte(lLen << 4)
+ } else {
+ dst[di] = 0xF0
+ di++
+ lLen -= 0xF
+ for ; lLen >= 0xFF; lLen -= 0xFF {
+ dst[di] = 0xFF
+ di++
+ }
+ dst[di] = byte(lLen)
+ }
+ di++
+
+ // Write the last literals.
+ if isNotCompressible && di >= anchor {
+ // Incompressible.
+ return 0, nil
+ }
+ di += copy(dst[di:di+len(src)-anchor], src[anchor:])
+ return di, nil
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go
new file mode 100644
index 0000000000..e6cf88d71c
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go
@@ -0,0 +1,88 @@
+// Package lz4block provides LZ4 BlockSize types and pools of buffers.
+package lz4block
+
+import "sync"
+
+const (
+ Block64Kb uint32 = 1 << (16 + iota*2)
+ Block256Kb
+ Block1Mb
+ Block4Mb
+ Block8Mb = 2 * Block4Mb
+ legacyBlockSize = Block8Mb + Block8Mb/255 + 16 // CompressBound(Block8Mb)
+)
+
+var (
+ BlockPool64K = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }}
+ BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }}
+ BlockPool1M = sync.Pool{New: func() interface{} { return make([]byte, Block1Mb) }}
+ BlockPool4M = sync.Pool{New: func() interface{} { return make([]byte, Block4Mb) }}
+ BlockPool8M = sync.Pool{New: func() interface{} { return make([]byte, legacyBlockSize) }}
+)
+
+func Index(b uint32) BlockSizeIndex {
+ switch b {
+ case Block64Kb:
+ return 4
+ case Block256Kb:
+ return 5
+ case Block1Mb:
+ return 6
+ case Block4Mb:
+ return 7
+ case Block8Mb: // only valid in legacy mode
+ return 3
+ }
+ return 0
+}
+
+func IsValid(b uint32) bool {
+ return Index(b) > 0
+}
+
+type BlockSizeIndex uint8
+
+func (b BlockSizeIndex) IsValid() bool {
+ switch b {
+ case 4, 5, 6, 7:
+ return true
+ }
+ return false
+}
+
+func (b BlockSizeIndex) Get() []byte {
+ var buf interface{}
+ switch b {
+ case 4:
+ buf = BlockPool64K.Get()
+ case 5:
+ buf = BlockPool256K.Get()
+ case 6:
+ buf = BlockPool1M.Get()
+ case 7:
+ buf = BlockPool4M.Get()
+ case 3:
+ buf = BlockPool8M.Get()
+ }
+ return buf.([]byte)
+}
+
+func Put(buf []byte) {
+ // Safeguard: do not allow invalid buffers.
+ switch c := cap(buf); uint32(c) {
+ case Block64Kb:
+ BlockPool64K.Put(buf[:c])
+ case Block256Kb:
+ BlockPool256K.Put(buf[:c])
+ case Block1Mb:
+ BlockPool1M.Put(buf[:c])
+ case Block4Mb:
+ BlockPool4M.Put(buf[:c])
+ case legacyBlockSize:
+ BlockPool8M.Put(buf[:c])
+ }
+}
+
+type CompressionLevel uint32
+
+const Fast CompressionLevel = 0
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s
new file mode 100644
index 0000000000..be79faa3fe
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s
@@ -0,0 +1,369 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// AX scratch
+// BX scratch
+// CX scratch
+// DX token
+//
+// DI &dst
+// SI &src
+// R8 &dst + len(dst)
+// R9 &src + len(src)
+// R11 &dst
+// R12 short output end
+// R13 short input end
+// func decodeBlock(dst, src []byte) int
+// using 50 bytes of stack currently
+TEXT ·decodeBlock(SB), NOSPLIT, $64-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ DI, R11
+ MOVQ dst_len+8(FP), R8
+ ADDQ DI, R8
+
+ MOVQ src_base+24(FP), SI
+ MOVQ src_len+32(FP), R9
+ CMPQ R9, $0
+ JE err_corrupt
+ ADDQ SI, R9
+
+ // shortcut ends
+ // short output end
+ MOVQ R8, R12
+ SUBQ $32, R12
+ // short input end
+ MOVQ R9, R13
+ SUBQ $16, R13
+
+loop:
+ // for si < len(src)
+ CMPQ SI, R9
+ JGE end
+
+ // token := uint32(src[si])
+ MOVBQZX (SI), DX
+ INCQ SI
+
+ // lit_len = token >> 4
+ // if lit_len > 0
+ // CX = lit_len
+ MOVQ DX, CX
+ SHRQ $4, CX
+
+ // if lit_len != 0xF
+ CMPQ CX, $0xF
+ JEQ lit_len_loop_pre
+ CMPQ DI, R12
+ JGE lit_len_loop_pre
+ CMPQ SI, R13
+ JGE lit_len_loop_pre
+
+ // copy shortcut
+
+ // A two-stage shortcut for the most common case:
+ // 1) If the literal length is 0..14, and there is enough space,
+ // enter the shortcut and copy 16 bytes on behalf of the literals
+ // (in the fast mode, only 8 bytes can be safely copied this way).
+ // 2) Further if the match length is 4..18, copy 18 bytes in a similar
+ // manner; but we ensure that there's enough space in the output for
+ // those 18 bytes earlier, upon entering the shortcut (in other words,
+ // there is a combined check for both stages).
+
+ // copy literal
+ MOVOU (SI), X0
+ MOVOU X0, (DI)
+ ADDQ CX, DI
+ ADDQ CX, SI
+
+ MOVQ DX, CX
+ ANDQ $0xF, CX
+
+ // The second stage: prepare for match copying, decode full info.
+ // If it doesn't work out, the info won't be wasted.
+ // offset := uint16(data[:2])
+ MOVWQZX (SI), DX
+ ADDQ $2, SI
+
+ MOVQ DI, AX
+ SUBQ DX, AX
+ CMPQ AX, DI
+ JGT err_short_buf
+
+ // if we can't do the second stage then jump straight to read the
+ // match length, we already have the offset.
+ CMPQ CX, $0xF
+ JEQ match_len_loop_pre
+ CMPQ DX, $8
+ JLT match_len_loop_pre
+ CMPQ AX, R11
+ JLT err_short_buf
+
+ // memcpy(op + 0, match + 0, 8);
+ MOVQ (AX), BX
+ MOVQ BX, (DI)
+ // memcpy(op + 8, match + 8, 8);
+ MOVQ 8(AX), BX
+ MOVQ BX, 8(DI)
+ // memcpy(op +16, match +16, 2);
+ MOVW 16(AX), BX
+ MOVW BX, 16(DI)
+
+ LEAQ 4(DI)(CX*1), DI // minmatch
+
+ // shortcut complete, load next token
+ JMP loop
+
+lit_len_loop_pre:
+ // if lit_len > 0
+ CMPQ CX, $0
+ JEQ offset
+ CMPQ CX, $0xF
+ JNE copy_literal
+
+lit_len_loop:
+ // for src[si] == 0xFF
+ CMPB (SI), $0xFF
+ JNE lit_len_finalise
+
+ // bounds check src[si+1]
+ LEAQ 1(SI), AX
+ CMPQ AX, R9
+ JGT err_short_buf
+
+ // lit_len += 0xFF
+ ADDQ $0xFF, CX
+ INCQ SI
+ JMP lit_len_loop
+
+lit_len_finalise:
+ // lit_len += int(src[si])
+ // si++
+ MOVBQZX (SI), AX
+ ADDQ AX, CX
+ INCQ SI
+
+copy_literal:
+ // bounds check src and dst
+ LEAQ (SI)(CX*1), AX
+ CMPQ AX, R9
+ JGT err_short_buf
+
+ LEAQ (DI)(CX*1), AX
+ CMPQ AX, R8
+ JGT err_short_buf
+
+ // whats a good cut off to call memmove?
+ CMPQ CX, $16
+ JGT memmove_lit
+
+ // if len(dst[di:]) < 16
+ MOVQ R8, AX
+ SUBQ DI, AX
+ CMPQ AX, $16
+ JLT memmove_lit
+
+ // if len(src[si:]) < 16
+ MOVQ R9, AX
+ SUBQ SI, AX
+ CMPQ AX, $16
+ JLT memmove_lit
+
+ MOVOU (SI), X0
+ MOVOU X0, (DI)
+
+ JMP finish_lit_copy
+
+memmove_lit:
+ // memmove(to, from, len)
+ MOVQ DI, 0(SP)
+ MOVQ SI, 8(SP)
+ MOVQ CX, 16(SP)
+ // spill
+ MOVQ DI, 24(SP)
+ MOVQ SI, 32(SP)
+ MOVQ CX, 40(SP) // need len to inc SI, DI after
+ MOVB DX, 48(SP)
+ CALL runtime·memmove(SB)
+
+ // restore registers
+ MOVQ 24(SP), DI
+ MOVQ 32(SP), SI
+ MOVQ 40(SP), CX
+ MOVB 48(SP), DX
+
+ // recalc initial values
+ MOVQ dst_base+0(FP), R8
+ MOVQ R8, R11
+ ADDQ dst_len+8(FP), R8
+ MOVQ src_base+24(FP), R9
+ ADDQ src_len+32(FP), R9
+ MOVQ R8, R12
+ SUBQ $32, R12
+ MOVQ R9, R13
+ SUBQ $16, R13
+
+finish_lit_copy:
+ ADDQ CX, SI
+ ADDQ CX, DI
+
+ CMPQ SI, R9
+ JGE end
+
+offset:
+ // CX := mLen
+ // free up DX to use for offset
+ MOVQ DX, CX
+
+ LEAQ 2(SI), AX
+ CMPQ AX, R9
+ JGT err_short_buf
+
+ // offset
+ // DX := int(src[si]) | int(src[si+1])<<8
+ MOVWQZX (SI), DX
+ ADDQ $2, SI
+
+ // 0 offset is invalid
+ CMPQ DX, $0
+ JEQ err_corrupt
+
+ ANDB $0xF, CX
+
+match_len_loop_pre:
+ // if mlen != 0xF
+ CMPB CX, $0xF
+ JNE copy_match
+
+match_len_loop:
+ // for src[si] == 0xFF
+ // lit_len += 0xFF
+ CMPB (SI), $0xFF
+ JNE match_len_finalise
+
+ // bounds check src[si+1]
+ LEAQ 1(SI), AX
+ CMPQ AX, R9
+ JGT err_short_buf
+
+ ADDQ $0xFF, CX
+ INCQ SI
+ JMP match_len_loop
+
+match_len_finalise:
+ // lit_len += int(src[si])
+ // si++
+ MOVBQZX (SI), AX
+ ADDQ AX, CX
+ INCQ SI
+
+copy_match:
+ // mLen += minMatch
+ ADDQ $4, CX
+
+ // check we have match_len bytes left in dst
+ // di+match_len < len(dst)
+ LEAQ (DI)(CX*1), AX
+ CMPQ AX, R8
+ JGT err_short_buf
+
+ // DX = offset
+ // CX = match_len
+ // BX = &dst + (di - offset)
+ MOVQ DI, BX
+ SUBQ DX, BX
+
+ // check BX is within dst
+ // if BX < &dst
+ CMPQ BX, R11
+ JLT err_short_buf
+
+ // if offset + match_len < di
+ LEAQ (BX)(CX*1), AX
+ CMPQ DI, AX
+ JGT copy_interior_match
+
+ // AX := len(dst[:di])
+ // MOVQ DI, AX
+ // SUBQ R11, AX
+
+ // copy 16 bytes at a time
+ // if di-offset < 16 copy 16-(di-offset) bytes to di
+ // then do the remaining
+
+copy_match_loop:
+ // for match_len >= 0
+ // dst[di] = dst[i]
+ // di++
+ // i++
+ MOVB (BX), AX
+ MOVB AX, (DI)
+ INCQ DI
+ INCQ BX
+ DECQ CX
+
+ CMPQ CX, $0
+ JGT copy_match_loop
+
+ JMP loop
+
+copy_interior_match:
+ CMPQ CX, $16
+ JGT memmove_match
+
+ // if len(dst[di:]) < 16
+ MOVQ R8, AX
+ SUBQ DI, AX
+ CMPQ AX, $16
+ JLT memmove_match
+
+ MOVOU (BX), X0
+ MOVOU X0, (DI)
+
+ ADDQ CX, DI
+ JMP loop
+
+memmove_match:
+ // memmove(to, from, len)
+ MOVQ DI, 0(SP)
+ MOVQ BX, 8(SP)
+ MOVQ CX, 16(SP)
+ // spill
+ MOVQ DI, 24(SP)
+ MOVQ SI, 32(SP)
+ MOVQ CX, 40(SP) // need len to inc SI, DI after
+ CALL runtime·memmove(SB)
+
+ // restore registers
+ MOVQ 24(SP), DI
+ MOVQ 32(SP), SI
+ MOVQ 40(SP), CX
+
+ // recalc initial values
+ MOVQ dst_base+0(FP), R8
+ MOVQ R8, R11 // TODO: make these sensible numbers
+ ADDQ dst_len+8(FP), R8
+ MOVQ src_base+24(FP), R9
+ ADDQ src_len+32(FP), R9
+ MOVQ R8, R12
+ SUBQ $32, R12
+ MOVQ R9, R13
+ SUBQ $16, R13
+
+ ADDQ CX, DI
+ JMP loop
+
+err_corrupt:
+ MOVQ $-1, ret+48(FP)
+ RET
+
+err_short_buf:
+ MOVQ $-2, ret+48(FP)
+ RET
+
+end:
+ SUBQ R11, DI
+ MOVQ DI, ret+48(FP)
+ RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s
new file mode 100644
index 0000000000..ec94b7b3c3
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s
@@ -0,0 +1,201 @@
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// Register allocation.
+#define dst R0
+#define dstorig R1
+#define src R2
+#define dstend R3
+#define srcend R4
+#define match R5 // Match address.
+#define token R6
+#define len R7 // Literal and match lengths.
+#define offset R6 // Match offset; overlaps with token.
+#define tmp1 R8
+#define tmp2 R9
+#define tmp3 R12
+
+#define minMatch $4
+
+// func decodeBlock(dst, src []byte) int
+TEXT ·decodeBlock(SB), NOFRAME|NOSPLIT, $-4-28
+ MOVW dst_base +0(FP), dst
+ MOVW dst_len +4(FP), dstend
+ MOVW src_base+12(FP), src
+ MOVW src_len +16(FP), srcend
+
+ CMP $0, srcend
+ BEQ shortSrc
+
+ ADD dst, dstend
+ ADD src, srcend
+
+ MOVW dst, dstorig
+
+loop:
+ // Read token. Extract literal length.
+ MOVBU.P 1(src), token
+ MOVW token >> 4, len
+ CMP $15, len
+ BNE readLitlenDone
+
+readLitlenLoop:
+ CMP src, srcend
+ BEQ shortSrc
+ MOVBU.P 1(src), tmp1
+ ADD tmp1, len
+ CMP $255, tmp1
+ BEQ readLitlenLoop
+
+readLitlenDone:
+ CMP $0, len
+ BEQ copyLiteralDone
+
+ // Bounds check dst+len and src+len.
+ ADD dst, len, tmp1
+ CMP dstend, tmp1
+ //BHI shortDst // Uncomment for distinct error codes.
+ ADD src, len, tmp2
+ CMP.LS srcend, tmp2
+ BHI shortSrc
+
+ // Copy literal.
+ CMP $4, len
+ BLO copyLiteralFinish
+
+ // Copy 0-3 bytes until src is aligned.
+ TST $1, src
+ MOVBU.NE.P 1(src), tmp1
+ MOVB.NE.P tmp1, 1(dst)
+ SUB.NE $1, len
+
+ TST $2, src
+ MOVHU.NE.P 2(src), tmp2
+ MOVB.NE.P tmp2, 1(dst)
+ MOVW.NE tmp2 >> 8, tmp1
+ MOVB.NE.P tmp1, 1(dst)
+ SUB.NE $2, len
+
+ B copyLiteralLoopCond
+
+copyLiteralLoop:
+ // Aligned load, unaligned write.
+ MOVW.P 4(src), tmp1
+ MOVW tmp1 >> 8, tmp2
+ MOVB tmp2, 1(dst)
+ MOVW tmp1 >> 16, tmp3
+ MOVB tmp3, 2(dst)
+ MOVW tmp1 >> 24, tmp2
+ MOVB tmp2, 3(dst)
+ MOVB.P tmp1, 4(dst)
+copyLiteralLoopCond:
+ // Loop until len-4 < 0.
+ SUB.S $4, len
+ BPL copyLiteralLoop
+
+ // Restore len, which is now negative.
+ ADD $4, len
+
+copyLiteralFinish:
+ // Copy remaining 0-3 bytes.
+ TST $2, len
+ MOVHU.NE.P 2(src), tmp2
+ MOVB.NE.P tmp2, 1(dst)
+ MOVW.NE tmp2 >> 8, tmp1
+ MOVB.NE.P tmp1, 1(dst)
+ TST $1, len
+ MOVBU.NE.P 1(src), tmp1
+ MOVB.NE.P tmp1, 1(dst)
+
+copyLiteralDone:
+ CMP src, srcend
+ BEQ end
+
+ // Initial part of match length.
+ // This frees up the token register for reuse as offset.
+ AND $15, token, len
+
+ // Read offset.
+ ADD $2, src
+ CMP srcend, src
+ BHI shortSrc
+ MOVBU -2(src), offset
+ MOVBU -1(src), tmp1
+ ORR tmp1 << 8, offset
+ CMP $0, offset
+ BEQ corrupt
+
+ // Read rest of match length.
+ CMP $15, len
+ BNE readMatchlenDone
+
+readMatchlenLoop:
+ CMP src, srcend
+ BEQ shortSrc
+ MOVBU.P 1(src), tmp1
+ ADD tmp1, len
+ CMP $255, tmp1
+ BEQ readMatchlenLoop
+
+readMatchlenDone:
+ ADD minMatch, len
+
+ // Bounds check dst+len and match = dst-offset.
+ ADD dst, len, tmp1
+ CMP dstend, tmp1
+ //BHI shortDst // Uncomment for distinct error codes.
+ SUB offset, dst, match
+ CMP.LS match, dstorig
+ BHI corrupt
+
+ // If the offset is at least four (len is, because of minMatch),
+ // do a four-way unrolled byte copy loop. Using MOVD instead of four
+ // byte loads is much faster, but to remain portable we'd have to
+ // align match first, which in turn is too expensive.
+ CMP $4, offset
+ BLO copyMatch
+
+ SUB $4, len
+copyMatch4:
+ MOVBU.P 4(match), tmp1
+ MOVB.P tmp1, 4(dst)
+ MOVBU -3(match), tmp2
+ MOVB tmp2, -3(dst)
+ MOVBU -2(match), tmp3
+ MOVB tmp3, -2(dst)
+ MOVBU -1(match), tmp1
+ MOVB tmp1, -1(dst)
+ SUB.S $4, len
+ BPL copyMatch4
+
+ // Restore len, which is now negative.
+ ADD.S $4, len
+ BEQ copyMatchDone
+
+copyMatch:
+ // Simple byte-at-a-time copy.
+ SUB.S $1, len
+ MOVBU.P 1(match), tmp2
+ MOVB.P tmp2, 1(dst)
+ BNE copyMatch
+
+copyMatchDone:
+ CMP src, srcend
+ BNE loop
+
+end:
+ SUB dstorig, dst, tmp1
+ MOVW tmp1, ret+24(FP)
+ RET
+
+ // The three error cases have distinct labels so we can put different
+ // return codes here when debugging, or if the error returns need to
+ // be changed.
+shortDst:
+shortSrc:
+corrupt:
+ MOVW $-1, tmp1
+ MOVW tmp1, ret+24(FP)
+ RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go
new file mode 100644
index 0000000000..e26f8cd613
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go
@@ -0,0 +1,9 @@
+// +build amd64 arm
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package lz4block
+
+//go:noescape
+func decodeBlock(dst, src []byte) int
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go
new file mode 100644
index 0000000000..9065653a9f
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go
@@ -0,0 +1,100 @@
+// +build !amd64,!arm appengine !gc noasm
+
+package lz4block
+
+func decodeBlock(dst, src []byte) (ret int) {
+ const hasError = -2
+ defer func() {
+ if recover() != nil {
+ ret = hasError
+ }
+ }()
+
+ var si, di uint
+ for {
+ // Literals and match lengths (token).
+ b := uint(src[si])
+ si++
+
+ // Literals.
+ if lLen := b >> 4; lLen > 0 {
+ switch {
+ case lLen < 0xF && si+16 < uint(len(src)):
+ // Shortcut 1
+ // if we have enough room in src and dst, and the literals length
+ // is small enough (0..14) then copy all 16 bytes, even if not all
+ // are part of the literals.
+ copy(dst[di:], src[si:si+16])
+ si += lLen
+ di += lLen
+ if mLen := b & 0xF; mLen < 0xF {
+ // Shortcut 2
+ // if the match length (4..18) fits within the literals, then copy
+ // all 18 bytes, even if not all are part of the literals.
+ mLen += 4
+ if offset := uint(src[si]) | uint(src[si+1])<<8; mLen <= offset {
+ i := di - offset
+ end := i + 18
+ if end > uint(len(dst)) {
+ // The remaining buffer may not hold 18 bytes.
+ // See https://github.com/pierrec/lz4/issues/51.
+ end = uint(len(dst))
+ }
+ copy(dst[di:], dst[i:end])
+ si += 2
+ di += mLen
+ continue
+ }
+ }
+ case lLen == 0xF:
+ for src[si] == 0xFF {
+ lLen += 0xFF
+ si++
+ }
+ lLen += uint(src[si])
+ si++
+ fallthrough
+ default:
+ copy(dst[di:di+lLen], src[si:si+lLen])
+ si += lLen
+ di += lLen
+ }
+ }
+ if si == uint(len(src)) {
+ return int(di)
+ } else if si > uint(len(src)) {
+ return hasError
+ }
+
+ offset := uint(src[si]) | uint(src[si+1])<<8
+ if offset == 0 {
+ return hasError
+ }
+ si += 2
+
+ // Match.
+ mLen := b & 0xF
+ if mLen == 0xF {
+ for src[si] == 0xFF {
+ mLen += 0xFF
+ si++
+ }
+ mLen += uint(src[si])
+ si++
+ }
+ mLen += minMatch
+
+ // Copy the match.
+ expanded := dst[di-offset:]
+ if mLen > offset {
+ // Efficiently copy the match dst[di-offset:di] into the dst slice.
+ bytesToCopy := offset * (mLen / offset)
+ for n := offset; n <= bytesToCopy+offset; n *= 2 {
+ copy(expanded[n:], expanded[:n])
+ }
+ di += bytesToCopy
+ mLen -= bytesToCopy
+ }
+ di += uint(copy(dst[di:di+mLen], expanded[:mLen]))
+ }
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go
new file mode 100644
index 0000000000..710ea42812
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go
@@ -0,0 +1,19 @@
+package lz4errors
+
+type Error string
+
+func (e Error) Error() string { return string(e) }
+
+const (
+ ErrInvalidSourceShortBuffer Error = "lz4: invalid source or destination buffer too short"
+ ErrInvalidFrame Error = "lz4: bad magic number"
+ ErrInternalUnhandledState Error = "lz4: unhandled state"
+ ErrInvalidHeaderChecksum Error = "lz4: invalid header checksum"
+ ErrInvalidBlockChecksum Error = "lz4: invalid block checksum"
+ ErrInvalidFrameChecksum Error = "lz4: invalid frame checksum"
+ ErrOptionInvalidCompressionLevel Error = "lz4: invalid compression level"
+ ErrOptionClosedOrError Error = "lz4: cannot apply options on closed or in error object"
+ ErrOptionInvalidBlockSize Error = "lz4: invalid block size"
+ ErrOptionNotApplicable Error = "lz4: option not applicable"
+ ErrWriterNotClosed Error = "lz4: writer not closed"
+)
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go
new file mode 100644
index 0000000000..279a8cc493
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go
@@ -0,0 +1,331 @@
+package lz4stream
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/pierrec/lz4/v4/internal/lz4block"
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+ "github.com/pierrec/lz4/v4/internal/xxh32"
+)
+
+type Blocks struct {
+ Block *FrameDataBlock
+ Blocks chan chan *FrameDataBlock
+ mu sync.Mutex
+ err error
+}
+
+func (b *Blocks) initW(f *Frame, dst io.Writer, num int) {
+ if num == 1 {
+ b.Blocks = nil
+ b.Block = NewFrameDataBlock(f)
+ return
+ }
+ b.Block = nil
+ if cap(b.Blocks) != num {
+ b.Blocks = make(chan chan *FrameDataBlock, num)
+ }
+ // goroutine managing concurrent block compression goroutines.
+ go func() {
+ // Process next block compression item.
+ for c := range b.Blocks {
+ // Read the next compressed block result.
+ // Waiting here ensures that the blocks are output in the order they were sent.
+ // The incoming channel is always closed as it indicates to the caller that
+ // the block has been processed.
+ block := <-c
+ if block == nil {
+ // Notify the block compression routine that we are done with its result.
+ // This is used when a sentinel block is sent to terminate the compression.
+ close(c)
+ return
+ }
+ // Do not attempt to write the block upon any previous failure.
+ if b.err == nil {
+ // Write the block.
+ if err := block.Write(f, dst); err != nil {
+ // Keep the first error.
+ b.err = err
+ // All pending compression goroutines need to shut down, so we need to keep going.
+ }
+ }
+ close(c)
+ }
+ }()
+}
+
+func (b *Blocks) close(f *Frame, num int) error {
+ if num == 1 {
+ if b.Block != nil {
+ b.Block.Close(f)
+ }
+ err := b.err
+ b.err = nil
+ return err
+ }
+ if b.Blocks == nil {
+ // Not initialized yet.
+ return nil
+ }
+ c := make(chan *FrameDataBlock)
+ b.Blocks <- c
+ c <- nil
+ <-c
+ err := b.err
+ b.err = nil
+ return err
+}
+
+// ErrorR returns any error set while uncompressing a stream.
+func (b *Blocks) ErrorR() error {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ return b.err
+}
+
+// initR returns a channel that streams the uncompressed blocks if in concurrent
+// mode and no error. When the channel is closed, check for any error with b.ErrorR.
+//
+// If not in concurrent mode, the uncompressed block is b.Block and the returned error
+// needs to be checked.
+func (b *Blocks) initR(f *Frame, num int, src io.Reader) (chan []byte, error) {
+ size := f.Descriptor.Flags.BlockSizeIndex()
+ if num == 1 {
+ b.Blocks = nil
+ b.Block = NewFrameDataBlock(f)
+ return nil, nil
+ }
+ b.Block = nil
+ blocks := make(chan chan []byte, num)
+ // data receives the uncompressed blocks.
+ data := make(chan []byte)
+ // Read blocks from the source sequentially
+ // and uncompress them concurrently.
+
+ // In legacy mode, accrue the uncompress sizes in cum.
+ var cum uint32
+ go func() {
+ var cumx uint32
+ var err error
+ for b.ErrorR() == nil {
+ block := NewFrameDataBlock(f)
+ cumx, err = block.Read(f, src, 0)
+ if err != nil {
+ break
+ }
+ // Recheck for an error as reading may be slow and uncompressing is expensive.
+ if b.ErrorR() != nil {
+ break
+ }
+ c := make(chan []byte)
+ blocks <- c
+ go func() {
+ data, err := block.Uncompress(f, size.Get(), false)
+ if err != nil {
+ b.closeR(err)
+ } else {
+ c <- data
+ }
+ }()
+ }
+ // End the collection loop and the data channel.
+ c := make(chan []byte)
+ blocks <- c
+ c <- nil // signal the collection loop that we are done
+ <-c // wait for the collect loop to complete
+ if f.isLegacy() && cum == cumx {
+ err = io.EOF
+ }
+ b.closeR(err)
+ close(data)
+ }()
+ // Collect the uncompressed blocks and make them available
+ // on the returned channel.
+ go func(leg bool) {
+ defer close(blocks)
+ for c := range blocks {
+ buf := <-c
+ if buf == nil {
+ // Signal to end the loop.
+ close(c)
+ return
+ }
+ // Perform checksum now as the blocks are received in order.
+ if f.Descriptor.Flags.ContentChecksum() {
+ _, _ = f.checksum.Write(buf)
+ }
+ if leg {
+ cum += uint32(len(buf))
+ }
+ data <- buf
+ close(c)
+ }
+ }(f.isLegacy())
+ return data, nil
+}
+
+// closeR safely sets the error on b if not already set.
+func (b *Blocks) closeR(err error) {
+ b.mu.Lock()
+ if b.err == nil {
+ b.err = err
+ }
+ b.mu.Unlock()
+}
+
+func NewFrameDataBlock(f *Frame) *FrameDataBlock {
+ buf := f.Descriptor.Flags.BlockSizeIndex().Get()
+ return &FrameDataBlock{Data: buf, data: buf}
+}
+
+type FrameDataBlock struct {
+ Size DataBlockSize
+ Data []byte // compressed or uncompressed data (.data or .src)
+ Checksum uint32
+ data []byte // buffer for compressed data
+ src []byte // uncompressed data
+ err error // used in concurrent mode
+}
+
+func (b *FrameDataBlock) Close(f *Frame) {
+ b.Size = 0
+ b.Checksum = 0
+ b.err = nil
+ if b.data != nil {
+ // Block was not already closed.
+ lz4block.Put(b.data)
+ b.Data = nil
+ b.data = nil
+ b.src = nil
+ }
+}
+
+// Block compression errors are ignored since the buffer is sized appropriately.
+func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.CompressionLevel) *FrameDataBlock {
+ data := b.data
+ if f.isLegacy() {
+ data = data[:cap(data)]
+ } else {
+ data = data[:len(src)] // trigger the incompressible flag in CompressBlock
+ }
+ var n int
+ switch level {
+ case lz4block.Fast:
+ n, _ = lz4block.CompressBlock(src, data)
+ default:
+ n, _ = lz4block.CompressBlockHC(src, data, level)
+ }
+ if n == 0 {
+ b.Size.UncompressedSet(true)
+ b.Data = src
+ } else {
+ b.Size.UncompressedSet(false)
+ b.Data = data[:n]
+ }
+ b.Size.sizeSet(len(b.Data))
+ b.src = src // keep track of the source for content checksum
+
+ if f.Descriptor.Flags.BlockChecksum() {
+ b.Checksum = xxh32.ChecksumZero(src)
+ }
+ return b
+}
+
+func (b *FrameDataBlock) Write(f *Frame, dst io.Writer) error {
+ // Write is called in the same order as blocks are compressed,
+ // so content checksum must be done here.
+ if f.Descriptor.Flags.ContentChecksum() {
+ _, _ = f.checksum.Write(b.src)
+ }
+ buf := f.buf[:]
+ binary.LittleEndian.PutUint32(buf, uint32(b.Size))
+ if _, err := dst.Write(buf[:4]); err != nil {
+ return err
+ }
+
+ if _, err := dst.Write(b.Data); err != nil {
+ return err
+ }
+
+ if b.Checksum == 0 {
+ return nil
+ }
+ binary.LittleEndian.PutUint32(buf, b.Checksum)
+ _, err := dst.Write(buf[:4])
+ return err
+}
+
+// Read updates b with the next block data, size and checksum if available.
+func (b *FrameDataBlock) Read(f *Frame, src io.Reader, cum uint32) (uint32, error) {
+ x, err := f.readUint32(src)
+ if err != nil {
+ return 0, err
+ }
+ if f.isLegacy() {
+ switch x {
+ case frameMagicLegacy:
+ // Concatenated legacy frame.
+ return b.Read(f, src, cum)
+ case cum:
+ // Only works in non concurrent mode, for concurrent mode
+ // it is handled separately.
+ // Linux kernel format appends the total uncompressed size at the end.
+ return 0, io.EOF
+ }
+ } else if x == 0 {
+ // Marker for end of stream.
+ return 0, io.EOF
+ }
+ b.Size = DataBlockSize(x)
+
+ size := b.Size.size()
+ if size > cap(b.data) {
+ return x, lz4errors.ErrOptionInvalidBlockSize
+ }
+ b.data = b.data[:size]
+ if _, err := io.ReadFull(src, b.data); err != nil {
+ return x, err
+ }
+ if f.Descriptor.Flags.BlockChecksum() {
+ sum, err := f.readUint32(src)
+ if err != nil {
+ return 0, err
+ }
+ b.Checksum = sum
+ }
+ return x, nil
+}
+
+func (b *FrameDataBlock) Uncompress(f *Frame, dst []byte, sum bool) ([]byte, error) {
+ if b.Size.Uncompressed() {
+ n := copy(dst, b.data)
+ dst = dst[:n]
+ } else {
+ n, err := lz4block.UncompressBlock(b.data, dst)
+ if err != nil {
+ return nil, err
+ }
+ dst = dst[:n]
+ }
+ if f.Descriptor.Flags.BlockChecksum() {
+ if c := xxh32.ChecksumZero(dst); c != b.Checksum {
+ err := fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidBlockChecksum, c, b.Checksum)
+ return nil, err
+ }
+ }
+ if sum && f.Descriptor.Flags.ContentChecksum() {
+ _, _ = f.checksum.Write(dst)
+ }
+ return dst, nil
+}
+
+func (f *Frame) readUint32(r io.Reader) (x uint32, err error) {
+ if _, err = io.ReadFull(r, f.buf[:4]); err != nil {
+ return
+ }
+ x = binary.LittleEndian.Uint32(f.buf[:4])
+ return
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go
new file mode 100644
index 0000000000..cfbd5674d9
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go
@@ -0,0 +1,200 @@
+// Package lz4stream provides the types that support reading and writing LZ4 data streams.
+package lz4stream
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "io/ioutil"
+
+ "github.com/pierrec/lz4/v4/internal/lz4block"
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+ "github.com/pierrec/lz4/v4/internal/xxh32"
+)
+
+//go:generate go run gen.go
+
+const (
+ frameMagic uint32 = 0x184D2204
+ frameSkipMagic uint32 = 0x184D2A50
+ frameMagicLegacy uint32 = 0x184C2102
+)
+
+func NewFrame() *Frame {
+ return &Frame{}
+}
+
+type Frame struct {
+ buf [15]byte // frame descriptor needs at most 4(magic)+4+8+1=11 bytes
+ Magic uint32
+ Descriptor FrameDescriptor
+ Blocks Blocks
+ Checksum uint32
+ checksum xxh32.XXHZero
+}
+
+// Reset allows reusing the Frame.
+// The Descriptor configuration is not modified.
+func (f *Frame) Reset(num int) {
+ f.Magic = 0
+ f.Descriptor.Checksum = 0
+ f.Descriptor.ContentSize = 0
+ _ = f.Blocks.close(f, num)
+ f.Checksum = 0
+}
+
+func (f *Frame) InitW(dst io.Writer, num int, legacy bool) {
+ if legacy {
+ f.Magic = frameMagicLegacy
+ idx := lz4block.Index(lz4block.Block8Mb)
+ f.Descriptor.Flags.BlockSizeIndexSet(idx)
+ } else {
+ f.Magic = frameMagic
+ f.Descriptor.initW()
+ }
+ f.Blocks.initW(f, dst, num)
+ f.checksum.Reset()
+}
+
+func (f *Frame) CloseW(dst io.Writer, num int) error {
+ if err := f.Blocks.close(f, num); err != nil {
+ return err
+ }
+ if f.isLegacy() {
+ return nil
+ }
+ buf := f.buf[:0]
+ // End mark (data block size of uint32(0)).
+ buf = append(buf, 0, 0, 0, 0)
+ if f.Descriptor.Flags.ContentChecksum() {
+ buf = f.checksum.Sum(buf)
+ }
+ _, err := dst.Write(buf)
+ return err
+}
+
+func (f *Frame) isLegacy() bool {
+ return f.Magic == frameMagicLegacy
+}
+
+func (f *Frame) InitR(src io.Reader, num int) (chan []byte, error) {
+ if f.Magic > 0 {
+ // Header already read.
+ return nil, nil
+ }
+
+newFrame:
+ var err error
+ if f.Magic, err = f.readUint32(src); err != nil {
+ return nil, err
+ }
+ switch m := f.Magic; {
+ case m == frameMagic || m == frameMagicLegacy:
+ // All 16 values of frameSkipMagic are valid.
+ case m>>8 == frameSkipMagic>>8:
+ skip, err := f.readUint32(src)
+ if err != nil {
+ return nil, err
+ }
+ if _, err := io.CopyN(ioutil.Discard, src, int64(skip)); err != nil {
+ return nil, err
+ }
+ goto newFrame
+ default:
+ return nil, lz4errors.ErrInvalidFrame
+ }
+ if err := f.Descriptor.initR(f, src); err != nil {
+ return nil, err
+ }
+ f.checksum.Reset()
+ return f.Blocks.initR(f, num, src)
+}
+
+func (f *Frame) CloseR(src io.Reader) (err error) {
+ if f.isLegacy() {
+ return nil
+ }
+ if !f.Descriptor.Flags.ContentChecksum() {
+ return nil
+ }
+ if f.Checksum, err = f.readUint32(src); err != nil {
+ return err
+ }
+ if c := f.checksum.Sum32(); c != f.Checksum {
+ return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidFrameChecksum, c, f.Checksum)
+ }
+ return nil
+}
+
+type FrameDescriptor struct {
+ Flags DescriptorFlags
+ ContentSize uint64
+ Checksum uint8
+}
+
+func (fd *FrameDescriptor) initW() {
+ fd.Flags.VersionSet(1)
+ fd.Flags.BlockIndependenceSet(true)
+}
+
+func (fd *FrameDescriptor) Write(f *Frame, dst io.Writer) error {
+ if fd.Checksum > 0 {
+ // Header already written.
+ return nil
+ }
+
+ buf := f.buf[:4]
+ // Write the magic number here even though it belongs to the Frame.
+ binary.LittleEndian.PutUint32(buf, f.Magic)
+ if !f.isLegacy() {
+ buf = buf[:4+2]
+ binary.LittleEndian.PutUint16(buf[4:], uint16(fd.Flags))
+
+ if fd.Flags.Size() {
+ buf = buf[:4+2+8]
+ binary.LittleEndian.PutUint64(buf[4+2:], fd.ContentSize)
+ }
+ fd.Checksum = descriptorChecksum(buf[4:])
+ buf = append(buf, fd.Checksum)
+ }
+
+ _, err := dst.Write(buf)
+ return err
+}
+
+func (fd *FrameDescriptor) initR(f *Frame, src io.Reader) error {
+ if f.isLegacy() {
+ idx := lz4block.Index(lz4block.Block8Mb)
+ f.Descriptor.Flags.BlockSizeIndexSet(idx)
+ return nil
+ }
+ // Read the flags and the checksum, hoping that there is not content size.
+ buf := f.buf[:3]
+ if _, err := io.ReadFull(src, buf); err != nil {
+ return err
+ }
+ descr := binary.LittleEndian.Uint16(buf)
+ fd.Flags = DescriptorFlags(descr)
+ if fd.Flags.Size() {
+ // Append the 8 missing bytes.
+ buf = buf[:3+8]
+ if _, err := io.ReadFull(src, buf[3:]); err != nil {
+ return err
+ }
+ fd.ContentSize = binary.LittleEndian.Uint64(buf[2:])
+ }
+ fd.Checksum = buf[len(buf)-1] // the checksum is the last byte
+ buf = buf[:len(buf)-1] // all descriptor fields except checksum
+ if c := descriptorChecksum(buf); fd.Checksum != c {
+ return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidHeaderChecksum, c, fd.Checksum)
+ }
+ // Validate the elements that can be.
+ if idx := fd.Flags.BlockSizeIndex(); !idx.IsValid() {
+ return lz4errors.ErrOptionInvalidBlockSize
+ }
+ return nil
+}
+
+func descriptorChecksum(buf []byte) byte {
+ return byte(xxh32.ChecksumZero(buf) >> 8)
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go
new file mode 100644
index 0000000000..d33a6be95c
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go
@@ -0,0 +1,103 @@
+// Code generated by `gen.exe`. DO NOT EDIT.
+
+package lz4stream
+
+import "github.com/pierrec/lz4/v4/internal/lz4block"
+
+// DescriptorFlags is defined as follow:
+// field bits
+// ----- ----
+// _ 2
+// ContentChecksum 1
+// Size 1
+// BlockChecksum 1
+// BlockIndependence 1
+// Version 2
+// _ 4
+// BlockSizeIndex 3
+// _ 1
+type DescriptorFlags uint16
+
+// Getters.
+func (x DescriptorFlags) ContentChecksum() bool { return x>>2&1 != 0 }
+func (x DescriptorFlags) Size() bool { return x>>3&1 != 0 }
+func (x DescriptorFlags) BlockChecksum() bool { return x>>4&1 != 0 }
+func (x DescriptorFlags) BlockIndependence() bool { return x>>5&1 != 0 }
+func (x DescriptorFlags) Version() uint16 { return uint16(x >> 6 & 0x3) }
+func (x DescriptorFlags) BlockSizeIndex() lz4block.BlockSizeIndex {
+ return lz4block.BlockSizeIndex(x >> 12 & 0x7)
+}
+
+// Setters.
+func (x *DescriptorFlags) ContentChecksumSet(v bool) *DescriptorFlags {
+ const b = 1 << 2
+ if v {
+ *x = *x&^b | b
+ } else {
+ *x &^= b
+ }
+ return x
+}
+func (x *DescriptorFlags) SizeSet(v bool) *DescriptorFlags {
+ const b = 1 << 3
+ if v {
+ *x = *x&^b | b
+ } else {
+ *x &^= b
+ }
+ return x
+}
+func (x *DescriptorFlags) BlockChecksumSet(v bool) *DescriptorFlags {
+ const b = 1 << 4
+ if v {
+ *x = *x&^b | b
+ } else {
+ *x &^= b
+ }
+ return x
+}
+func (x *DescriptorFlags) BlockIndependenceSet(v bool) *DescriptorFlags {
+ const b = 1 << 5
+ if v {
+ *x = *x&^b | b
+ } else {
+ *x &^= b
+ }
+ return x
+}
+func (x *DescriptorFlags) VersionSet(v uint16) *DescriptorFlags {
+ *x = *x&^(0x3<<6) | (DescriptorFlags(v) & 0x3 << 6)
+ return x
+}
+func (x *DescriptorFlags) BlockSizeIndexSet(v lz4block.BlockSizeIndex) *DescriptorFlags {
+ *x = *x&^(0x7<<12) | (DescriptorFlags(v) & 0x7 << 12)
+ return x
+}
+
+// Code generated by `gen.exe`. DO NOT EDIT.
+
+// DataBlockSize is defined as follow:
+// field bits
+// ----- ----
+// size 31
+// Uncompressed 1
+type DataBlockSize uint32
+
+// Getters.
+func (x DataBlockSize) size() int { return int(x & 0x7FFFFFFF) }
+func (x DataBlockSize) Uncompressed() bool { return x>>31&1 != 0 }
+
+// Setters.
+func (x *DataBlockSize) sizeSet(v int) *DataBlockSize {
+ *x = *x&^0x7FFFFFFF | DataBlockSize(v)&0x7FFFFFFF
+ return x
+}
+func (x *DataBlockSize) UncompressedSet(v bool) *DataBlockSize {
+ const b = 1 << 31
+ if v {
+ *x = *x&^b | b
+ } else {
+ *x &^= b
+ }
+ return x
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go
new file mode 100644
index 0000000000..8d3206a87c
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go
@@ -0,0 +1,212 @@
+// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version).
+// (https://github.com/Cyan4973/XXH/)
+package xxh32
+
+import (
+ "encoding/binary"
+)
+
+const (
+ prime1 uint32 = 2654435761
+ prime2 uint32 = 2246822519
+ prime3 uint32 = 3266489917
+ prime4 uint32 = 668265263
+ prime5 uint32 = 374761393
+
+ primeMask = 0xFFFFFFFF
+ prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984
+ prime1minus = uint32((-int64(prime1)) & primeMask) // 1640531535
+)
+
+// XXHZero represents an xxhash32 object with seed 0.
+type XXHZero struct {
+ v [4]uint32
+ totalLen uint64
+ buf [16]byte
+ bufused int
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+// It does not change the underlying hash state.
+func (xxh XXHZero) Sum(b []byte) []byte {
+ h32 := xxh.Sum32()
+ return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24))
+}
+
+// Reset resets the Hash to its initial state.
+func (xxh *XXHZero) Reset() {
+ xxh.v[0] = prime1plus2
+ xxh.v[1] = prime2
+ xxh.v[2] = 0
+ xxh.v[3] = prime1minus
+ xxh.totalLen = 0
+ xxh.bufused = 0
+}
+
+// Size returns the number of bytes returned by Sum().
+func (xxh *XXHZero) Size() int {
+ return 4
+}
+
+// BlockSizeIndex gives the minimum number of bytes accepted by Write().
+func (xxh *XXHZero) BlockSize() int {
+ return 1
+}
+
+// Write adds input bytes to the Hash.
+// It never returns an error.
+func (xxh *XXHZero) Write(input []byte) (int, error) {
+ if xxh.totalLen == 0 {
+ xxh.Reset()
+ }
+ n := len(input)
+ m := xxh.bufused
+
+ xxh.totalLen += uint64(n)
+
+ r := len(xxh.buf) - m
+ if n < r {
+ copy(xxh.buf[m:], input)
+ xxh.bufused += len(input)
+ return n, nil
+ }
+
+ var buf *[16]byte
+ if m != 0 {
+ // some data left from previous update
+ buf = &xxh.buf
+ c := copy(buf[m:], input)
+ n -= c
+ input = input[c:]
+ }
+ update(&xxh.v, buf, input)
+ xxh.bufused = copy(xxh.buf[:], input[n-n%16:])
+
+ return n, nil
+}
+
+// Portable version of update. This updates v by processing all of buf
+// (if not nil) and all full 16-byte blocks of input.
+func updateGo(v *[4]uint32, buf *[16]byte, input []byte) {
+ // Causes compiler to work directly from registers instead of stack:
+ v1, v2, v3, v4 := v[0], v[1], v[2], v[3]
+
+ if buf != nil {
+ v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1
+ v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1
+ v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1
+ v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1
+ }
+
+ for ; len(input) >= 16; input = input[16:] {
+ sub := input[:16] //BCE hint for compiler
+ v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
+ v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
+ v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
+ v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
+ }
+ v[0], v[1], v[2], v[3] = v1, v2, v3, v4
+}
+
+// Sum32 returns the 32 bits Hash value.
+func (xxh *XXHZero) Sum32() uint32 {
+ h32 := uint32(xxh.totalLen)
+ if h32 >= 16 {
+ h32 += rol1(xxh.v[0]) + rol7(xxh.v[1]) + rol12(xxh.v[2]) + rol18(xxh.v[3])
+ } else {
+ h32 += prime5
+ }
+
+ p := 0
+ n := xxh.bufused
+ buf := xxh.buf
+ for n := n - 4; p <= n; p += 4 {
+ h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3
+ h32 = rol17(h32) * prime4
+ }
+ for ; p < n; p++ {
+ h32 += uint32(buf[p]) * prime5
+ h32 = rol11(h32) * prime1
+ }
+
+ h32 ^= h32 >> 15
+ h32 *= prime2
+ h32 ^= h32 >> 13
+ h32 *= prime3
+ h32 ^= h32 >> 16
+
+ return h32
+}
+
+// Portable version of ChecksumZero.
+func checksumZeroGo(input []byte) uint32 {
+ n := len(input)
+ h32 := uint32(n)
+
+ if n < 16 {
+ h32 += prime5
+ } else {
+ v1 := prime1plus2
+ v2 := prime2
+ v3 := uint32(0)
+ v4 := prime1minus
+ p := 0
+ for n := n - 16; p <= n; p += 16 {
+ sub := input[p:][:16] //BCE hint for compiler
+ v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
+ v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
+ v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
+ v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
+ }
+ input = input[p:]
+ n -= p
+ h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ }
+
+ p := 0
+ for n := n - 4; p <= n; p += 4 {
+ h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3
+ h32 = rol17(h32) * prime4
+ }
+ for p < n {
+ h32 += uint32(input[p]) * prime5
+ h32 = rol11(h32) * prime1
+ p++
+ }
+
+ h32 ^= h32 >> 15
+ h32 *= prime2
+ h32 ^= h32 >> 13
+ h32 *= prime3
+ h32 ^= h32 >> 16
+
+ return h32
+}
+
+func rol1(u uint32) uint32 {
+ return u<<1 | u>>31
+}
+
+func rol7(u uint32) uint32 {
+ return u<<7 | u>>25
+}
+
+func rol11(u uint32) uint32 {
+ return u<<11 | u>>21
+}
+
+func rol12(u uint32) uint32 {
+ return u<<12 | u>>20
+}
+
+func rol13(u uint32) uint32 {
+ return u<<13 | u>>19
+}
+
+func rol17(u uint32) uint32 {
+ return u<<17 | u>>15
+}
+
+func rol18(u uint32) uint32 {
+ return u<<18 | u>>14
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go
new file mode 100644
index 0000000000..0978b2665b
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go
@@ -0,0 +1,11 @@
+// +build !noasm
+
+package xxh32
+
+// ChecksumZero returns the 32-bit hash of input.
+//
+//go:noescape
+func ChecksumZero(input []byte) uint32
+
+//go:noescape
+func update(v *[4]uint32, buf *[16]byte, input []byte)
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s
new file mode 100644
index 0000000000..0e9f146a36
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s
@@ -0,0 +1,259 @@
+// +build !noasm
+
+#include "textflag.h"
+
+#define prime1 $2654435761
+#define prime2 $2246822519
+#define prime3 $3266489917
+#define prime4 $668265263
+#define prime5 $374761393
+
+#define prime1plus2 $606290984
+#define prime1minus $1640531535
+
+// Register allocation.
+#define p R0
+#define n R1
+#define h R2
+#define v1 R2 // Alias for h.
+#define v2 R3
+#define v3 R4
+#define v4 R5
+#define x1 R6
+#define x2 R7
+#define x3 R8
+#define x4 R9
+
+// We need the primes in registers. The 16-byte loop only uses prime{1,2}.
+#define prime1r R11
+#define prime2r R12
+#define prime3r R3 // The rest can alias v{2-4}.
+#define prime4r R4
+#define prime5r R5
+
+// Update round macros. These read from and increment p.
+
+#define round16aligned \
+ MOVM.IA.W (p), [x1, x2, x3, x4] \
+ \
+ MULA x1, prime2r, v1, v1 \
+ MULA x2, prime2r, v2, v2 \
+ MULA x3, prime2r, v3, v3 \
+ MULA x4, prime2r, v4, v4 \
+ \
+ MOVW v1 @> 19, v1 \
+ MOVW v2 @> 19, v2 \
+ MOVW v3 @> 19, v3 \
+ MOVW v4 @> 19, v4 \
+ \
+ MUL prime1r, v1 \
+ MUL prime1r, v2 \
+ MUL prime1r, v3 \
+ MUL prime1r, v4 \
+
+#define round16unaligned \
+ MOVBU.P 16(p), x1 \
+ MOVBU -15(p), x2 \
+ ORR x2 << 8, x1 \
+ MOVBU -14(p), x3 \
+ MOVBU -13(p), x4 \
+ ORR x4 << 8, x3 \
+ ORR x3 << 16, x1 \
+ \
+ MULA x1, prime2r, v1, v1 \
+ MOVW v1 @> 19, v1 \
+ MUL prime1r, v1 \
+ \
+ MOVBU -12(p), x1 \
+ MOVBU -11(p), x2 \
+ ORR x2 << 8, x1 \
+ MOVBU -10(p), x3 \
+ MOVBU -9(p), x4 \
+ ORR x4 << 8, x3 \
+ ORR x3 << 16, x1 \
+ \
+ MULA x1, prime2r, v2, v2 \
+ MOVW v2 @> 19, v2 \
+ MUL prime1r, v2 \
+ \
+ MOVBU -8(p), x1 \
+ MOVBU -7(p), x2 \
+ ORR x2 << 8, x1 \
+ MOVBU -6(p), x3 \
+ MOVBU -5(p), x4 \
+ ORR x4 << 8, x3 \
+ ORR x3 << 16, x1 \
+ \
+ MULA x1, prime2r, v3, v3 \
+ MOVW v3 @> 19, v3 \
+ MUL prime1r, v3 \
+ \
+ MOVBU -4(p), x1 \
+ MOVBU -3(p), x2 \
+ ORR x2 << 8, x1 \
+ MOVBU -2(p), x3 \
+ MOVBU -1(p), x4 \
+ ORR x4 << 8, x3 \
+ ORR x3 << 16, x1 \
+ \
+ MULA x1, prime2r, v4, v4 \
+ MOVW v4 @> 19, v4 \
+ MUL prime1r, v4 \
+
+
+// func ChecksumZero([]byte) uint32
+TEXT ·ChecksumZero(SB), NOFRAME|NOSPLIT, $-4-16
+ MOVW input_base+0(FP), p
+ MOVW input_len+4(FP), n
+
+ MOVW prime1, prime1r
+ MOVW prime2, prime2r
+
+ // Set up h for n < 16. It's tempting to say {ADD prime5, n, h}
+ // here, but that's a pseudo-op that generates a load through R11.
+ MOVW prime5, prime5r
+ ADD prime5r, n, h
+ CMP $0, n
+ BEQ end
+
+ // We let n go negative so we can do comparisons with SUB.S
+ // instead of separate CMP.
+ SUB.S $16, n
+ BMI loop16done
+
+ MOVW prime1plus2, v1
+ MOVW prime2, v2
+ MOVW $0, v3
+ MOVW prime1minus, v4
+
+ TST $3, p
+ BNE loop16unaligned
+
+loop16aligned:
+ SUB.S $16, n
+ round16aligned
+ BPL loop16aligned
+ B loop16finish
+
+loop16unaligned:
+ SUB.S $16, n
+ round16unaligned
+ BPL loop16unaligned
+
+loop16finish:
+ MOVW v1 @> 31, h
+ ADD v2 @> 25, h
+ ADD v3 @> 20, h
+ ADD v4 @> 14, h
+
+ // h += len(input) with v2 as temporary.
+ MOVW input_len+4(FP), v2
+ ADD v2, h
+
+loop16done:
+ ADD $16, n // Restore number of bytes left.
+
+ SUB.S $4, n
+ MOVW prime3, prime3r
+ BMI loop4done
+ MOVW prime4, prime4r
+
+ TST $3, p
+ BNE loop4unaligned
+
+loop4aligned:
+ SUB.S $4, n
+
+ MOVW.P 4(p), x1
+ MULA prime3r, x1, h, h
+ MOVW h @> 15, h
+ MUL prime4r, h
+
+ BPL loop4aligned
+ B loop4done
+
+loop4unaligned:
+ SUB.S $4, n
+
+ MOVBU.P 4(p), x1
+ MOVBU -3(p), x2
+ ORR x2 << 8, x1
+ MOVBU -2(p), x3
+ ORR x3 << 16, x1
+ MOVBU -1(p), x4
+ ORR x4 << 24, x1
+
+ MULA prime3r, x1, h, h
+ MOVW h @> 15, h
+ MUL prime4r, h
+
+ BPL loop4unaligned
+
+loop4done:
+ ADD.S $4, n // Restore number of bytes left.
+ BEQ end
+
+ MOVW prime5, prime5r
+
+loop1:
+ SUB.S $1, n
+
+ MOVBU.P 1(p), x1
+ MULA prime5r, x1, h, h
+ MOVW h @> 21, h
+ MUL prime1r, h
+
+ BNE loop1
+
+end:
+ MOVW prime3, prime3r
+ EOR h >> 15, h
+ MUL prime2r, h
+ EOR h >> 13, h
+ MUL prime3r, h
+ EOR h >> 16, h
+
+ MOVW h, ret+12(FP)
+ RET
+
+
+// func update(v *[4]uint64, buf *[16]byte, p []byte)
+TEXT ·update(SB), NOFRAME|NOSPLIT, $-4-20
+ MOVW v+0(FP), p
+ MOVM.IA (p), [v1, v2, v3, v4]
+
+ MOVW prime1, prime1r
+ MOVW prime2, prime2r
+
+ // Process buf, if not nil.
+ MOVW buf+4(FP), p
+ CMP $0, p
+ BEQ noBuffered
+
+ round16aligned
+
+noBuffered:
+ MOVW input_base +8(FP), p
+ MOVW input_len +12(FP), n
+
+ SUB.S $16, n
+ BMI end
+
+ TST $3, p
+ BNE loop16unaligned
+
+loop16aligned:
+ SUB.S $16, n
+ round16aligned
+ BPL loop16aligned
+ B end
+
+loop16unaligned:
+ SUB.S $16, n
+ round16unaligned
+ BPL loop16unaligned
+
+end:
+ MOVW v+0(FP), p
+ MOVM.IA [v1, v2, v3, v4], (p)
+ RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go
new file mode 100644
index 0000000000..c96b59b8c3
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go
@@ -0,0 +1,10 @@
+// +build !arm noasm
+
+package xxh32
+
+// ChecksumZero returns the 32-bit hash of input.
+func ChecksumZero(input []byte) uint32 { return checksumZeroGo(input) }
+
+func update(v *[4]uint32, buf *[16]byte, input []byte) {
+ updateGo(v, buf, input)
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/lz4.go b/vendor/github.com/pierrec/lz4/v4/lz4.go
new file mode 100644
index 0000000000..c585d4064f
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/lz4.go
@@ -0,0 +1,147 @@
+// Package lz4 implements reading and writing lz4 compressed data.
+//
+// The package supports both the LZ4 stream format,
+// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html,
+// and the LZ4 block format, defined at
+// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html.
+//
+// See https://github.com/lz4/lz4 for the reference C implementation.
+package lz4
+
+import (
+ "github.com/pierrec/lz4/v4/internal/lz4block"
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+func _() {
+ // Safety checks for duplicated elements.
+ var x [1]struct{}
+ _ = x[lz4block.CompressionLevel(Fast)-lz4block.Fast]
+ _ = x[Block64Kb-BlockSize(lz4block.Block64Kb)]
+ _ = x[Block256Kb-BlockSize(lz4block.Block256Kb)]
+ _ = x[Block1Mb-BlockSize(lz4block.Block1Mb)]
+ _ = x[Block4Mb-BlockSize(lz4block.Block4Mb)]
+}
+
+// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
+func CompressBlockBound(n int) int {
+ return lz4block.CompressBlockBound(n)
+}
+
+// UncompressBlock uncompresses the source buffer into the destination one,
+// and returns the uncompressed size.
+//
+// The destination buffer must be sized appropriately.
+//
+// An error is returned if the source data is invalid or the destination buffer is too small.
+func UncompressBlock(src, dst []byte) (int, error) {
+ return lz4block.UncompressBlock(src, dst)
+}
+
+// A Compressor compresses data into the LZ4 block format.
+// It uses a fast compression algorithm.
+//
+// A Compressor is not safe for concurrent use by multiple goroutines.
+//
+// Use a Writer to compress into the LZ4 stream format.
+type Compressor struct{ c lz4block.Compressor }
+
+// CompressBlock compresses the source buffer src into the destination dst.
+//
+// If compression is successful, the first return value is the size of the
+// compressed data, which is always >0.
+//
+// If dst has length at least CompressBlockBound(len(src)), compression always
+// succeeds. Otherwise, the first return value is zero. The error return is
+// non-nil if the compressed data does not fit in dst, but it might fit in a
+// larger buffer that is still smaller than CompressBlockBound(len(src)). The
+// return value (0, nil) means the data is likely incompressible and a buffer
+// of length CompressBlockBound(len(src)) should be passed in.
+func (c *Compressor) CompressBlock(src, dst []byte) (int, error) {
+ return c.c.CompressBlock(src, dst)
+}
+
+// CompressBlock compresses the source buffer into the destination one.
+// This is the fast version of LZ4 compression and also the default one.
+//
+// The argument hashTable is scratch space for a hash table used by the
+// compressor. If provided, it should have length at least 1<<16. If it is
+// shorter (or nil), CompressBlock allocates its own hash table.
+//
+// The size of the compressed data is returned.
+//
+// If the destination buffer size is lower than CompressBlockBound and
+// the compressed size is 0 and no error, then the data is incompressible.
+//
+// An error is returned if the destination buffer is too small.
+
+// CompressBlock is equivalent to Compressor.CompressBlock.
+// The final argument is ignored and should be set to nil.
+//
+// This function is deprecated. Use a Compressor instead.
+func CompressBlock(src, dst []byte, _ []int) (int, error) {
+ return lz4block.CompressBlock(src, dst)
+}
+
+// A CompressorHC compresses data into the LZ4 block format.
+// Its compression ratio is potentially better than that of a Compressor,
+// but it is also slower and requires more memory.
+//
+// A Compressor is not safe for concurrent use by multiple goroutines.
+//
+// Use a Writer to compress into the LZ4 stream format.
+type CompressorHC struct {
+ // Level is the maximum search depth for compression.
+ // Values <= 0 mean no maximum.
+ Level CompressionLevel
+ c lz4block.CompressorHC
+}
+
+// CompressBlock compresses the source buffer src into the destination dst.
+//
+// If compression is successful, the first return value is the size of the
+// compressed data, which is always >0.
+//
+// If dst has length at least CompressBlockBound(len(src)), compression always
+// succeeds. Otherwise, the first return value is zero. The error return is
+// non-nil if the compressed data does not fit in dst, but it might fit in a
+// larger buffer that is still smaller than CompressBlockBound(len(src)). The
+// return value (0, nil) means the data is likely incompressible and a buffer
+// of length CompressBlockBound(len(src)) should be passed in.
+func (c *CompressorHC) CompressBlock(src, dst []byte) (int, error) {
+ return c.c.CompressBlock(src, dst, lz4block.CompressionLevel(c.Level))
+}
+
+// CompressBlockHC is equivalent to CompressorHC.CompressBlock.
+// The final two arguments are ignored and should be set to nil.
+//
+// This function is deprecated. Use a CompressorHC instead.
+func CompressBlockHC(src, dst []byte, depth CompressionLevel, _, _ []int) (int, error) {
+ return lz4block.CompressBlockHC(src, dst, lz4block.CompressionLevel(depth))
+}
+
+const (
+ // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
+ // block is corrupted or the destination buffer is not large enough for the uncompressed data.
+ ErrInvalidSourceShortBuffer = lz4errors.ErrInvalidSourceShortBuffer
+ // ErrInvalidFrame is returned when reading an invalid LZ4 archive.
+ ErrInvalidFrame = lz4errors.ErrInvalidFrame
+ // ErrInternalUnhandledState is an internal error.
+ ErrInternalUnhandledState = lz4errors.ErrInternalUnhandledState
+ // ErrInvalidHeaderChecksum is returned when reading a frame.
+ ErrInvalidHeaderChecksum = lz4errors.ErrInvalidHeaderChecksum
+ // ErrInvalidBlockChecksum is returned when reading a frame.
+ ErrInvalidBlockChecksum = lz4errors.ErrInvalidBlockChecksum
+ // ErrInvalidFrameChecksum is returned when reading a frame.
+ ErrInvalidFrameChecksum = lz4errors.ErrInvalidFrameChecksum
+ // ErrOptionInvalidCompressionLevel is returned when the supplied compression level is invalid.
+ ErrOptionInvalidCompressionLevel = lz4errors.ErrOptionInvalidCompressionLevel
+ // ErrOptionClosedOrError is returned when an option is applied to a closed or in error object.
+ ErrOptionClosedOrError = lz4errors.ErrOptionClosedOrError
+ // ErrOptionInvalidBlockSize is returned when
+ ErrOptionInvalidBlockSize = lz4errors.ErrOptionInvalidBlockSize
+ // ErrOptionNotApplicable is returned when trying to apply an option to an object not supporting it.
+ ErrOptionNotApplicable = lz4errors.ErrOptionNotApplicable
+ // ErrWriterNotClosed is returned when attempting to reset an unclosed writer.
+ ErrWriterNotClosed = lz4errors.ErrWriterNotClosed
+)
diff --git a/vendor/github.com/pierrec/lz4/v4/options.go b/vendor/github.com/pierrec/lz4/v4/options.go
new file mode 100644
index 0000000000..4e1b6703b5
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/options.go
@@ -0,0 +1,213 @@
+package lz4
+
+import (
+ "fmt"
+ "github.com/pierrec/lz4/v4/internal/lz4block"
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+ "reflect"
+ "runtime"
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer -type=BlockSize,CompressionLevel -output options_gen.go
+
+type (
+ applier interface {
+ Apply(...Option) error
+ private()
+ }
+ // Option defines the parameters to setup an LZ4 Writer or Reader.
+ Option func(applier) error
+)
+
+// String returns a string representation of the option with its parameter(s).
+func (o Option) String() string {
+ return o(nil).Error()
+}
+
+// Default options.
+var (
+ DefaultBlockSizeOption = BlockSizeOption(Block4Mb)
+ DefaultChecksumOption = ChecksumOption(true)
+ DefaultConcurrency = ConcurrencyOption(1)
+ defaultOnBlockDone = OnBlockDoneOption(nil)
+)
+
+const (
+ Block64Kb BlockSize = 1 << (16 + iota*2)
+ Block256Kb
+ Block1Mb
+ Block4Mb
+)
+
+// BlockSizeIndex defines the size of the blocks to be compressed.
+type BlockSize uint32
+
+// BlockSizeOption defines the maximum size of compressed blocks (default=Block4Mb).
+func BlockSizeOption(size BlockSize) Option {
+ return func(a applier) error {
+ switch w := a.(type) {
+ case nil:
+ s := fmt.Sprintf("BlockSizeOption(%s)", size)
+ return lz4errors.Error(s)
+ case *Writer:
+ size := uint32(size)
+ if !lz4block.IsValid(size) {
+ return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size)
+ }
+ w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size))
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+// BlockChecksumOption enables or disables block checksum (default=false).
+func BlockChecksumOption(flag bool) Option {
+ return func(a applier) error {
+ switch w := a.(type) {
+ case nil:
+ s := fmt.Sprintf("BlockChecksumOption(%v)", flag)
+ return lz4errors.Error(s)
+ case *Writer:
+ w.frame.Descriptor.Flags.BlockChecksumSet(flag)
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+// ChecksumOption enables/disables all blocks or content checksum (default=true).
+func ChecksumOption(flag bool) Option {
+ return func(a applier) error {
+ switch w := a.(type) {
+ case nil:
+ s := fmt.Sprintf("ChecksumOption(%v)", flag)
+ return lz4errors.Error(s)
+ case *Writer:
+ w.frame.Descriptor.Flags.ContentChecksumSet(flag)
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+// SizeOption sets the size of the original uncompressed data (default=0). It is useful to know the size of the
+// whole uncompressed data stream.
+func SizeOption(size uint64) Option {
+ return func(a applier) error {
+ switch w := a.(type) {
+ case nil:
+ s := fmt.Sprintf("SizeOption(%d)", size)
+ return lz4errors.Error(s)
+ case *Writer:
+ w.frame.Descriptor.Flags.SizeSet(size > 0)
+ w.frame.Descriptor.ContentSize = size
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+// ConcurrencyOption sets the number of go routines used for compression.
+// If n <= 0, then the output of runtime.GOMAXPROCS(0) is used.
+func ConcurrencyOption(n int) Option {
+ if n <= 0 {
+ n = runtime.GOMAXPROCS(0)
+ }
+ return func(a applier) error {
+ switch rw := a.(type) {
+ case nil:
+ s := fmt.Sprintf("ConcurrencyOption(%d)", n)
+ return lz4errors.Error(s)
+ case *Writer:
+ rw.num = n
+ return nil
+ case *Reader:
+ rw.num = n
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+// CompressionLevel defines the level of compression to use. The higher the better, but slower, compression.
+type CompressionLevel uint32
+
+const (
+ Fast CompressionLevel = 0
+ Level1 CompressionLevel = 1 << (8 + iota)
+ Level2
+ Level3
+ Level4
+ Level5
+ Level6
+ Level7
+ Level8
+ Level9
+)
+
+// CompressionLevelOption defines the compression level (default=Fast).
+func CompressionLevelOption(level CompressionLevel) Option {
+ return func(a applier) error {
+ switch w := a.(type) {
+ case nil:
+ s := fmt.Sprintf("CompressionLevelOption(%s)", level)
+ return lz4errors.Error(s)
+ case *Writer:
+ switch level {
+ case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9:
+ default:
+ return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level)
+ }
+ w.level = lz4block.CompressionLevel(level)
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+func onBlockDone(int) {}
+
+// OnBlockDoneOption is triggered when a block has been processed. For a Writer, it is when is has been compressed,
+// for a Reader, it is when it has been uncompressed.
+func OnBlockDoneOption(handler func(size int)) Option {
+ if handler == nil {
+ handler = onBlockDone
+ }
+ return func(a applier) error {
+ switch rw := a.(type) {
+ case nil:
+ s := fmt.Sprintf("OnBlockDoneOption(%s)", reflect.TypeOf(handler).String())
+ return lz4errors.Error(s)
+ case *Writer:
+ rw.handler = handler
+ return nil
+ case *Reader:
+ rw.handler = handler
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+// LegacyOption provides support for writing LZ4 frames in the legacy format.
+//
+// See https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame.
+//
+// NB. compressed Linux kernel images use a tweaked LZ4 legacy format where
+// the compressed stream is followed by the original (uncompressed) size of
+// the kernel (https://events.static.linuxfound.org/sites/events/files/lcjpcojp13_klee.pdf).
+// This is also supported as a special case.
+func LegacyOption(legacy bool) Option {
+ return func(a applier) error {
+ switch rw := a.(type) {
+ case nil:
+ s := fmt.Sprintf("LegacyOption(%v)", legacy)
+ return lz4errors.Error(s)
+ case *Writer:
+ rw.legacy = legacy
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/options_gen.go b/vendor/github.com/pierrec/lz4/v4/options_gen.go
new file mode 100644
index 0000000000..2de814909e
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/options_gen.go
@@ -0,0 +1,92 @@
+// Code generated by "stringer -type=BlockSize,CompressionLevel -output options_gen.go"; DO NOT EDIT.
+
+package lz4
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Block64Kb-65536]
+ _ = x[Block256Kb-262144]
+ _ = x[Block1Mb-1048576]
+ _ = x[Block4Mb-4194304]
+}
+
+const (
+ _BlockSize_name_0 = "Block64Kb"
+ _BlockSize_name_1 = "Block256Kb"
+ _BlockSize_name_2 = "Block1Mb"
+ _BlockSize_name_3 = "Block4Mb"
+)
+
+func (i BlockSize) String() string {
+ switch {
+ case i == 65536:
+ return _BlockSize_name_0
+ case i == 262144:
+ return _BlockSize_name_1
+ case i == 1048576:
+ return _BlockSize_name_2
+ case i == 4194304:
+ return _BlockSize_name_3
+ default:
+ return "BlockSize(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Fast-0]
+ _ = x[Level1-512]
+ _ = x[Level2-1024]
+ _ = x[Level3-2048]
+ _ = x[Level4-4096]
+ _ = x[Level5-8192]
+ _ = x[Level6-16384]
+ _ = x[Level7-32768]
+ _ = x[Level8-65536]
+ _ = x[Level9-131072]
+}
+
+const (
+ _CompressionLevel_name_0 = "Fast"
+ _CompressionLevel_name_1 = "Level1"
+ _CompressionLevel_name_2 = "Level2"
+ _CompressionLevel_name_3 = "Level3"
+ _CompressionLevel_name_4 = "Level4"
+ _CompressionLevel_name_5 = "Level5"
+ _CompressionLevel_name_6 = "Level6"
+ _CompressionLevel_name_7 = "Level7"
+ _CompressionLevel_name_8 = "Level8"
+ _CompressionLevel_name_9 = "Level9"
+)
+
+func (i CompressionLevel) String() string {
+ switch {
+ case i == 0:
+ return _CompressionLevel_name_0
+ case i == 512:
+ return _CompressionLevel_name_1
+ case i == 1024:
+ return _CompressionLevel_name_2
+ case i == 2048:
+ return _CompressionLevel_name_3
+ case i == 4096:
+ return _CompressionLevel_name_4
+ case i == 8192:
+ return _CompressionLevel_name_5
+ case i == 16384:
+ return _CompressionLevel_name_6
+ case i == 32768:
+ return _CompressionLevel_name_7
+ case i == 65536:
+ return _CompressionLevel_name_8
+ case i == 131072:
+ return _CompressionLevel_name_9
+ default:
+ return "CompressionLevel(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/reader.go b/vendor/github.com/pierrec/lz4/v4/reader.go
new file mode 100644
index 0000000000..403aaf697a
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/reader.go
@@ -0,0 +1,243 @@
+package lz4
+
+import (
+ "io"
+
+ "github.com/pierrec/lz4/v4/internal/lz4block"
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+ "github.com/pierrec/lz4/v4/internal/lz4stream"
+)
+
+var readerStates = []aState{
+ noState: newState,
+ errorState: newState,
+ newState: readState,
+ readState: closedState,
+ closedState: newState,
+}
+
+// NewReader returns a new LZ4 frame decoder.
+func NewReader(r io.Reader) *Reader {
+ return newReader(r, false)
+}
+
+func newReader(r io.Reader, legacy bool) *Reader {
+ zr := &Reader{frame: lz4stream.NewFrame()}
+ zr.state.init(readerStates)
+ _ = zr.Apply(DefaultConcurrency, defaultOnBlockDone)
+ zr.Reset(r)
+ return zr
+}
+
+// Reader allows reading an LZ4 stream.
+type Reader struct {
+ state _State
+ src io.Reader // source reader
+ num int // concurrency level
+ frame *lz4stream.Frame // frame being read
+ data []byte // block buffer allocated in non concurrent mode
+ reads chan []byte // pending data
+ idx int // size of pending data
+ handler func(int)
+ cum uint32
+}
+
+func (*Reader) private() {}
+
+func (r *Reader) Apply(options ...Option) (err error) {
+ defer r.state.check(&err)
+ switch r.state.state {
+ case newState:
+ case errorState:
+ return r.state.err
+ default:
+ return lz4errors.ErrOptionClosedOrError
+ }
+ for _, o := range options {
+ if err = o(r); err != nil {
+ return
+ }
+ }
+ return
+}
+
+// Size returns the size of the underlying uncompressed data, if set in the stream.
+func (r *Reader) Size() int {
+ switch r.state.state {
+ case readState, closedState:
+ if r.frame.Descriptor.Flags.Size() {
+ return int(r.frame.Descriptor.ContentSize)
+ }
+ }
+ return 0
+}
+
+func (r *Reader) isNotConcurrent() bool {
+ return r.num == 1
+}
+
+func (r *Reader) init() error {
+ data, err := r.frame.InitR(r.src, r.num)
+ if err != nil {
+ return err
+ }
+ r.reads = data
+ r.idx = 0
+ size := r.frame.Descriptor.Flags.BlockSizeIndex()
+ r.data = size.Get()
+ r.cum = 0
+ return nil
+}
+
+func (r *Reader) Read(buf []byte) (n int, err error) {
+ defer r.state.check(&err)
+ switch r.state.state {
+ case readState:
+ case closedState, errorState:
+ return 0, r.state.err
+ case newState:
+ // First initialization.
+ if err = r.init(); r.state.next(err) {
+ return
+ }
+ default:
+ return 0, r.state.fail()
+ }
+ for len(buf) > 0 {
+ var bn int
+ if r.idx == 0 {
+ if r.isNotConcurrent() {
+ bn, err = r.read(buf)
+ } else {
+ lz4block.Put(r.data)
+ r.data = <-r.reads
+ if len(r.data) == 0 {
+ // No uncompressed data: something went wrong or we are done.
+ err = r.frame.Blocks.ErrorR()
+ }
+ }
+ switch err {
+ case nil:
+ case io.EOF:
+ if er := r.frame.CloseR(r.src); er != nil {
+ err = er
+ }
+ lz4block.Put(r.data)
+ r.data = nil
+ return
+ default:
+ return
+ }
+ }
+ if bn == 0 {
+ // Fill buf with buffered data.
+ bn = copy(buf, r.data[r.idx:])
+ r.idx += bn
+ if r.idx == len(r.data) {
+ // All data read, get ready for the next Read.
+ r.idx = 0
+ }
+ }
+ buf = buf[bn:]
+ n += bn
+ r.handler(bn)
+ }
+ return
+}
+
+// read uncompresses the next block as follow:
+// - if buf has enough room, the block is uncompressed into it directly
+// and the lenght of used space is returned
+// - else, the uncompress data is stored in r.data and 0 is returned
+func (r *Reader) read(buf []byte) (int, error) {
+ block := r.frame.Blocks.Block
+ _, err := block.Read(r.frame, r.src, r.cum)
+ if err != nil {
+ return 0, err
+ }
+ var direct bool
+ dst := r.data[:cap(r.data)]
+ if len(buf) >= len(dst) {
+ // Uncompress directly into buf.
+ direct = true
+ dst = buf
+ }
+ dst, err = block.Uncompress(r.frame, dst, true)
+ if err != nil {
+ return 0, err
+ }
+ r.cum += uint32(len(dst))
+ if direct {
+ return len(dst), nil
+ }
+ r.data = dst
+ return 0, nil
+}
+
+// Reset clears the state of the Reader r such that it is equivalent to its
+// initial state from NewReader, but instead writing to writer.
+// No access to reader is performed.
+//
+// w.Close must be called before Reset.
+func (r *Reader) Reset(reader io.Reader) {
+ if r.data != nil {
+ lz4block.Put(r.data)
+ r.data = nil
+ }
+ r.frame.Reset(r.num)
+ r.state.reset()
+ r.src = reader
+ r.reads = nil
+}
+
+// WriteTo efficiently uncompresses the data from the Reader underlying source to w.
+func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
+ switch r.state.state {
+ case closedState, errorState:
+ return 0, r.state.err
+ case newState:
+ if err = r.init(); r.state.next(err) {
+ return
+ }
+ default:
+ return 0, r.state.fail()
+ }
+ defer r.state.nextd(&err)
+
+ var data []byte
+ if r.isNotConcurrent() {
+ size := r.frame.Descriptor.Flags.BlockSizeIndex()
+ data = size.Get()
+ defer lz4block.Put(data)
+ }
+ for {
+ var bn int
+ var dst []byte
+ if r.isNotConcurrent() {
+ bn, err = r.read(data)
+ dst = data[:bn]
+ } else {
+ lz4block.Put(dst)
+ dst = <-r.reads
+ bn = len(dst)
+ if bn == 0 {
+ // No uncompressed data: something went wrong or we are done.
+ err = r.frame.Blocks.ErrorR()
+ }
+ }
+ switch err {
+ case nil:
+ case io.EOF:
+ err = r.frame.CloseR(r.src)
+ return
+ default:
+ return
+ }
+ r.handler(bn)
+ bn, err = w.Write(dst)
+ n += int64(bn)
+ if err != nil {
+ return
+ }
+ }
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/state.go b/vendor/github.com/pierrec/lz4/v4/state.go
new file mode 100644
index 0000000000..d94f04d05e
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/state.go
@@ -0,0 +1,75 @@
+package lz4
+
+import (
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer -type=aState -output state_gen.go
+
+const (
+ noState aState = iota // uninitialized reader
+ errorState // unrecoverable error encountered
+ newState // instantiated object
+ readState // reading data
+ writeState // writing data
+ closedState // all done
+)
+
+type (
+ aState uint8
+ _State struct {
+ states []aState
+ state aState
+ err error
+ }
+)
+
+func (s *_State) init(states []aState) {
+ s.states = states
+ s.state = states[0]
+}
+
+func (s *_State) reset() {
+ s.state = s.states[0]
+ s.err = nil
+}
+
+// next sets the state to the next one unless it is passed a non nil error.
+// It returns whether or not it is in error.
+func (s *_State) next(err error) bool {
+ if err != nil {
+ s.err = fmt.Errorf("%s: %w", s.state, err)
+ s.state = errorState
+ return true
+ }
+ s.state = s.states[s.state]
+ return false
+}
+
+// nextd is like next but for defers.
+func (s *_State) nextd(errp *error) bool {
+ return errp != nil && s.next(*errp)
+}
+
+// check sets s in error if not already in error and if the error is not nil or io.EOF,
+func (s *_State) check(errp *error) {
+ if s.state == errorState || errp == nil {
+ return
+ }
+ if err := *errp; err != nil {
+ s.err = fmt.Errorf("%w[%s]", err, s.state)
+ if !errors.Is(err, io.EOF) {
+ s.state = errorState
+ }
+ }
+}
+
+func (s *_State) fail() error {
+ s.state = errorState
+ s.err = fmt.Errorf("%w[%s]", lz4errors.ErrInternalUnhandledState, s.state)
+ return s.err
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/state_gen.go b/vendor/github.com/pierrec/lz4/v4/state_gen.go
new file mode 100644
index 0000000000..75fb828924
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/state_gen.go
@@ -0,0 +1,28 @@
+// Code generated by "stringer -type=aState -output state_gen.go"; DO NOT EDIT.
+
+package lz4
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[noState-0]
+ _ = x[errorState-1]
+ _ = x[newState-2]
+ _ = x[readState-3]
+ _ = x[writeState-4]
+ _ = x[closedState-5]
+}
+
+const _aState_name = "noStateerrorStatenewStatereadStatewriteStateclosedState"
+
+var _aState_index = [...]uint8{0, 7, 17, 25, 34, 44, 55}
+
+func (i aState) String() string {
+ if i >= aState(len(_aState_index)-1) {
+ return "aState(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _aState_name[_aState_index[i]:_aState_index[i+1]]
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/writer.go b/vendor/github.com/pierrec/lz4/v4/writer.go
new file mode 100644
index 0000000000..44a43d251b
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/writer.go
@@ -0,0 +1,233 @@
+package lz4
+
+import (
+ "io"
+
+ "github.com/pierrec/lz4/v4/internal/lz4block"
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+ "github.com/pierrec/lz4/v4/internal/lz4stream"
+)
+
+var writerStates = []aState{
+ noState: newState,
+ newState: writeState,
+ writeState: closedState,
+ closedState: newState,
+ errorState: newState,
+}
+
+// NewWriter returns a new LZ4 frame encoder.
+func NewWriter(w io.Writer) *Writer {
+ zw := &Writer{frame: lz4stream.NewFrame()}
+ zw.state.init(writerStates)
+ _ = zw.Apply(DefaultBlockSizeOption, DefaultChecksumOption, DefaultConcurrency, defaultOnBlockDone)
+ zw.Reset(w)
+ return zw
+}
+
+// Writer allows writing an LZ4 stream.
+type Writer struct {
+ state _State
+ src io.Writer // destination writer
+ level lz4block.CompressionLevel // how hard to try
+ num int // concurrency level
+ frame *lz4stream.Frame // frame being built
+ data []byte // pending data
+ idx int // size of pending data
+ handler func(int)
+ legacy bool
+}
+
+func (*Writer) private() {}
+
+func (w *Writer) Apply(options ...Option) (err error) {
+ defer w.state.check(&err)
+ switch w.state.state {
+ case newState:
+ case errorState:
+ return w.state.err
+ default:
+ return lz4errors.ErrOptionClosedOrError
+ }
+ for _, o := range options {
+ if err = o(w); err != nil {
+ return
+ }
+ }
+ w.Reset(w.src)
+ return
+}
+
+func (w *Writer) isNotConcurrent() bool {
+ return w.num == 1
+}
+
+// init sets up the Writer when in newState. It does not change the Writer state.
+func (w *Writer) init() error {
+ w.frame.InitW(w.src, w.num, w.legacy)
+ if true || !w.isNotConcurrent() {
+ size := w.frame.Descriptor.Flags.BlockSizeIndex()
+ w.data = size.Get()
+ }
+ w.idx = 0
+ return w.frame.Descriptor.Write(w.frame, w.src)
+}
+
+func (w *Writer) Write(buf []byte) (n int, err error) {
+ defer w.state.check(&err)
+ switch w.state.state {
+ case writeState:
+ case closedState, errorState:
+ return 0, w.state.err
+ case newState:
+ if err = w.init(); w.state.next(err) {
+ return
+ }
+ default:
+ return 0, w.state.fail()
+ }
+
+ zn := len(w.data)
+ for len(buf) > 0 {
+ if w.idx == 0 && len(buf) >= zn {
+ // Avoid a copy as there is enough data for a block.
+ if err = w.write(buf[:zn], false); err != nil {
+ return
+ }
+ n += zn
+ buf = buf[zn:]
+ continue
+ }
+ // Accumulate the data to be compressed.
+ m := copy(w.data[w.idx:], buf)
+ n += m
+ w.idx += m
+ buf = buf[m:]
+
+ if w.idx < len(w.data) {
+ // Buffer not filled.
+ return
+ }
+
+ // Buffer full.
+ if err = w.write(w.data, true); err != nil {
+ return
+ }
+ if !w.isNotConcurrent() {
+ size := w.frame.Descriptor.Flags.BlockSizeIndex()
+ w.data = size.Get()
+ }
+ w.idx = 0
+ }
+ return
+}
+
+func (w *Writer) write(data []byte, safe bool) error {
+ if w.isNotConcurrent() {
+ block := w.frame.Blocks.Block
+ err := block.Compress(w.frame, data, w.level).Write(w.frame, w.src)
+ w.handler(len(block.Data))
+ return err
+ }
+ c := make(chan *lz4stream.FrameDataBlock)
+ w.frame.Blocks.Blocks <- c
+ go func(c chan *lz4stream.FrameDataBlock, data []byte, safe bool) {
+ b := lz4stream.NewFrameDataBlock(w.frame)
+ c <- b.Compress(w.frame, data, w.level)
+ <-c
+ w.handler(len(b.Data))
+ b.Close(w.frame)
+ if safe {
+ // safe to put it back as the last usage of it was FrameDataBlock.Write() called before c is closed
+ lz4block.Put(data)
+ }
+ }(c, data, safe)
+
+ return nil
+}
+
+// Close closes the Writer, flushing any unwritten data to the underlying io.Writer,
+// but does not close the underlying io.Writer.
+func (w *Writer) Close() (err error) {
+ switch w.state.state {
+ case writeState:
+ case errorState:
+ return w.state.err
+ default:
+ return nil
+ }
+ defer w.state.nextd(&err)
+ if w.idx > 0 {
+ // Flush pending data, disable w.data freeing as it is done later on.
+ if err = w.write(w.data[:w.idx], false); err != nil {
+ return err
+ }
+ w.idx = 0
+ }
+ err = w.frame.CloseW(w.src, w.num)
+ // It is now safe to free the buffer.
+ if w.data != nil {
+ lz4block.Put(w.data)
+ w.data = nil
+ }
+ return
+}
+
+// Reset clears the state of the Writer w such that it is equivalent to its
+// initial state from NewWriter, but instead writing to writer.
+// Reset keeps the previous options unless overwritten by the supplied ones.
+// No access to writer is performed.
+//
+// w.Close must be called before Reset or pending data may be dropped.
+func (w *Writer) Reset(writer io.Writer) {
+ w.frame.Reset(w.num)
+ w.state.reset()
+ w.src = writer
+}
+
+// ReadFrom efficiently reads from r and compressed into the Writer destination.
+func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) {
+ switch w.state.state {
+ case closedState, errorState:
+ return 0, w.state.err
+ case newState:
+ if err = w.init(); w.state.next(err) {
+ return
+ }
+ default:
+ return 0, w.state.fail()
+ }
+ defer w.state.check(&err)
+
+ size := w.frame.Descriptor.Flags.BlockSizeIndex()
+ var done bool
+ var rn int
+ data := size.Get()
+ if w.isNotConcurrent() {
+ // Keep the same buffer for the whole process.
+ defer lz4block.Put(data)
+ }
+ for !done {
+ rn, err = io.ReadFull(r, data)
+ switch err {
+ case nil:
+ case io.EOF, io.ErrUnexpectedEOF: // read may be partial
+ done = true
+ default:
+ return
+ }
+ n += int64(rn)
+ err = w.write(data[:rn], true)
+ if err != nil {
+ return
+ }
+ w.handler(rn)
+ if !done && !w.isNotConcurrent() {
+ // The buffer will be returned automatically by go routines (safe=true)
+ // so get a new one fo the next round.
+ data = size.Get()
+ }
+ }
+ err = w.Close()
+ return
+}