mirror of
				https://github.com/go-gitea/gitea
				synced 2025-11-03 21:08:25 +00:00 
			
		
		
		
	Vendor Update Go Libs (#13444)
* denisenkom/go-mssqldb untagged -> v0.9.0 * github.com/editorconfig/editorconfig-core-go v2.3.7 -> v2.3.8 * github.com/go-testfixtures/testfixtures v3.4.0 -> v3.4.1 * github.com/mholt/archiver v3.3.2 -> v3.5.0 * github.com/olivere/elastic v7.0.20 -> v7.0.21 * github.com/urfave/cli v1.22.4 -> v1.22.5 * github.com/xanzy/go-gitlab v0.38.1 -> v0.39.0 * github.com/yuin/goldmark-meta untagged -> v1.0.0 * github.com/ethantkoenig/rupture 0a76f03a811a -> c3b3b810dc77 * github.com/jaytaylor/html2text 8fb95d837f7d -> 3577fbdbcff7 * github.com/kballard/go-shellquote cd60e84ee657 -> 95032a82bc51 * github.com/msteinert/pam 02ccfbfaf0cc -> 913b8f8cdf8b * github.com/unknwon/paginater 7748a72e0141 -> 042474bd0eae * CI.restart() Co-authored-by: techknowlogick <techknowlogick@gitea.io>
This commit is contained in:
		
							
								
								
									
										23
									
								
								vendor/github.com/pierrec/lz4/v3/debug.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										23
									
								
								vendor/github.com/pierrec/lz4/v3/debug.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,23 +0,0 @@
 | 
			
		||||
// +build lz4debug
 | 
			
		||||
 | 
			
		||||
package lz4
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"os"
 | 
			
		||||
	"path/filepath"
 | 
			
		||||
	"runtime"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const debugFlag = true
 | 
			
		||||
 | 
			
		||||
func debug(args ...interface{}) {
 | 
			
		||||
	_, file, line, _ := runtime.Caller(1)
 | 
			
		||||
	file = filepath.Base(file)
 | 
			
		||||
 | 
			
		||||
	f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0])
 | 
			
		||||
	if f[len(f)-1] != '\n' {
 | 
			
		||||
		f += "\n"
 | 
			
		||||
	}
 | 
			
		||||
	fmt.Fprintf(os.Stderr, f, args[1:]...)
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										7
									
								
								vendor/github.com/pierrec/lz4/v3/debug_stub.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								vendor/github.com/pierrec/lz4/v3/debug_stub.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,7 +0,0 @@
 | 
			
		||||
// +build !lz4debug
 | 
			
		||||
 | 
			
		||||
package lz4
 | 
			
		||||
 | 
			
		||||
const debugFlag = false
 | 
			
		||||
 | 
			
		||||
func debug(args ...interface{}) {}
 | 
			
		||||
							
								
								
									
										30
									
								
								vendor/github.com/pierrec/lz4/v3/errors.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										30
									
								
								vendor/github.com/pierrec/lz4/v3/errors.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,30 +0,0 @@
 | 
			
		||||
package lz4
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"os"
 | 
			
		||||
	rdebug "runtime/debug"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
 | 
			
		||||
	// block is corrupted or the destination buffer is not large enough for the uncompressed data.
 | 
			
		||||
	ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short")
 | 
			
		||||
	// ErrInvalid is returned when reading an invalid LZ4 archive.
 | 
			
		||||
	ErrInvalid = errors.New("lz4: bad magic number")
 | 
			
		||||
	// ErrBlockDependency is returned when attempting to decompress an archive created with block dependency.
 | 
			
		||||
	ErrBlockDependency = errors.New("lz4: block dependency not supported")
 | 
			
		||||
	// ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position.
 | 
			
		||||
	ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent")
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func recoverBlock(e *error) {
 | 
			
		||||
	if r := recover(); r != nil && *e == nil {
 | 
			
		||||
		if debugFlag {
 | 
			
		||||
			fmt.Fprintln(os.Stderr, r)
 | 
			
		||||
			rdebug.PrintStack()
 | 
			
		||||
		}
 | 
			
		||||
		*e = ErrInvalidSourceShortBuffer
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										12
									
								
								vendor/github.com/pierrec/lz4/v3/go.mod
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										12
									
								
								vendor/github.com/pierrec/lz4/v3/go.mod
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,12 +0,0 @@
 | 
			
		||||
module github.com/pierrec/lz4/v3
 | 
			
		||||
 | 
			
		||||
go 1.12
 | 
			
		||||
 | 
			
		||||
require (
 | 
			
		||||
	code.cloudfoundry.org/bytefmt v0.0.0-20190710193110-1eb035ffe2b6
 | 
			
		||||
	github.com/frankban/quicktest v1.4.0
 | 
			
		||||
	github.com/onsi/ginkgo v1.8.0 // indirect
 | 
			
		||||
	github.com/onsi/gomega v1.5.0 // indirect
 | 
			
		||||
	github.com/pierrec/cmdflag v0.0.2
 | 
			
		||||
	github.com/schollz/progressbar/v2 v2.13.2
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										52
									
								
								vendor/github.com/pierrec/lz4/v3/go.sum
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										52
									
								
								vendor/github.com/pierrec/lz4/v3/go.sum
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,52 +0,0 @@
 | 
			
		||||
code.cloudfoundry.org/bytefmt v0.0.0-20190710193110-1eb035ffe2b6 h1:tW+ztA4A9UT9xnco5wUjW1oNi35k22eUEn9tNpPYVwE=
 | 
			
		||||
code.cloudfoundry.org/bytefmt v0.0.0-20190710193110-1eb035ffe2b6/go.mod h1:wN/zk7mhREp/oviagqUXY3EwuHhWyOvAdsn5Y4CzOrc=
 | 
			
		||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 | 
			
		||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 | 
			
		||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 | 
			
		||||
github.com/frankban/quicktest v1.4.0 h1:rCSCih1FnSWJEel/eub9wclBSqpF2F/PuvxUWGWnbO8=
 | 
			
		||||
github.com/frankban/quicktest v1.4.0/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ=
 | 
			
		||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
 | 
			
		||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
 | 
			
		||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
 | 
			
		||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
 | 
			
		||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
 | 
			
		||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
 | 
			
		||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
 | 
			
		||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
 | 
			
		||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
 | 
			
		||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
 | 
			
		||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
 | 
			
		||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
 | 
			
		||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
 | 
			
		||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=
 | 
			
		||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw=
 | 
			
		||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 | 
			
		||||
github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
 | 
			
		||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 | 
			
		||||
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
 | 
			
		||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
 | 
			
		||||
github.com/pierrec/cmdflag v0.0.2 h1:ybjGJnPr/aURn2IKWjO49znx9N0DL6YfGsIxN0PYuVY=
 | 
			
		||||
github.com/pierrec/cmdflag v0.0.2/go.mod h1:a3zKGZ3cdQUfxjd0RGMLZr8xI3nvpJOB+m6o/1X5BmU=
 | 
			
		||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 | 
			
		||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 | 
			
		||||
github.com/schollz/progressbar/v2 v2.13.2 h1:3L9bP5KQOGEnFP8P5V8dz+U0yo5I29iY5Oa9s9EAwn0=
 | 
			
		||||
github.com/schollz/progressbar/v2 v2.13.2/go.mod h1:6YZjqdthH6SCZKv2rqGryrxPtfmRB/DWZxSMfCXPyD8=
 | 
			
		||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 | 
			
		||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
 | 
			
		||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
 | 
			
		||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
 | 
			
		||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 | 
			
		||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
 | 
			
		||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 | 
			
		||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
 | 
			
		||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 | 
			
		||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
 | 
			
		||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 | 
			
		||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
 | 
			
		||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 | 
			
		||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
 | 
			
		||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
 | 
			
		||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
 | 
			
		||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
 | 
			
		||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
 | 
			
		||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 | 
			
		||||
							
								
								
									
										113
									
								
								vendor/github.com/pierrec/lz4/v3/lz4.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										113
									
								
								vendor/github.com/pierrec/lz4/v3/lz4.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,113 +0,0 @@
 | 
			
		||||
// Package lz4 implements reading and writing lz4 compressed data (a frame),
 | 
			
		||||
// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html.
 | 
			
		||||
//
 | 
			
		||||
// Although the block level compression and decompression functions are exposed and are fully compatible
 | 
			
		||||
// with the lz4 block format definition, they are low level and should not be used directly.
 | 
			
		||||
// For a complete description of an lz4 compressed block, see:
 | 
			
		||||
// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
 | 
			
		||||
//
 | 
			
		||||
// See https://github.com/Cyan4973/lz4 for the reference C implementation.
 | 
			
		||||
//
 | 
			
		||||
package lz4
 | 
			
		||||
 | 
			
		||||
import "math/bits"
 | 
			
		||||
 | 
			
		||||
import "sync"
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// Extension is the LZ4 frame file name extension
 | 
			
		||||
	Extension = ".lz4"
 | 
			
		||||
	// Version is the LZ4 frame format version
 | 
			
		||||
	Version = 1
 | 
			
		||||
 | 
			
		||||
	frameMagic     uint32 = 0x184D2204
 | 
			
		||||
	frameSkipMagic uint32 = 0x184D2A50
 | 
			
		||||
 | 
			
		||||
	// The following constants are used to setup the compression algorithm.
 | 
			
		||||
	minMatch            = 4  // the minimum size of the match sequence size (4 bytes)
 | 
			
		||||
	winSizeLog          = 16 // LZ4 64Kb window size limit
 | 
			
		||||
	winSize             = 1 << winSizeLog
 | 
			
		||||
	winMask             = winSize - 1 // 64Kb window of previous data for dependent blocks
 | 
			
		||||
	compressedBlockFlag = 1 << 31
 | 
			
		||||
	compressedBlockMask = compressedBlockFlag - 1
 | 
			
		||||
 | 
			
		||||
	// hashLog determines the size of the hash table used to quickly find a previous match position.
 | 
			
		||||
	// Its value influences the compression speed and memory usage, the lower the faster,
 | 
			
		||||
	// but at the expense of the compression ratio.
 | 
			
		||||
	// 16 seems to be the best compromise for fast compression.
 | 
			
		||||
	hashLog = 16
 | 
			
		||||
	htSize  = 1 << hashLog
 | 
			
		||||
 | 
			
		||||
	mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes.
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb.
 | 
			
		||||
const (
 | 
			
		||||
	blockSize64K = 1 << (16 + 2*iota)
 | 
			
		||||
	blockSize256K
 | 
			
		||||
	blockSize1M
 | 
			
		||||
	blockSize4M
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// Keep a pool of buffers for each valid block sizes.
 | 
			
		||||
	bsMapValue = [...]*sync.Pool{
 | 
			
		||||
		newBufferPool(2 * blockSize64K),
 | 
			
		||||
		newBufferPool(2 * blockSize256K),
 | 
			
		||||
		newBufferPool(2 * blockSize1M),
 | 
			
		||||
		newBufferPool(2 * blockSize4M),
 | 
			
		||||
	}
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// newBufferPool returns a pool for buffers of the given size.
 | 
			
		||||
func newBufferPool(size int) *sync.Pool {
 | 
			
		||||
	return &sync.Pool{
 | 
			
		||||
		New: func() interface{} {
 | 
			
		||||
			return make([]byte, size)
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// getBuffer returns a buffer to its pool.
 | 
			
		||||
func getBuffer(size int) []byte {
 | 
			
		||||
	idx := blockSizeValueToIndex(size) - 4
 | 
			
		||||
	return bsMapValue[idx].Get().([]byte)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// putBuffer returns a buffer to its pool.
 | 
			
		||||
func putBuffer(size int, buf []byte) {
 | 
			
		||||
	if cap(buf) > 0 {
 | 
			
		||||
		idx := blockSizeValueToIndex(size) - 4
 | 
			
		||||
		bsMapValue[idx].Put(buf[:cap(buf)])
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
func blockSizeIndexToValue(i byte) int {
 | 
			
		||||
	return 1 << (16 + 2*uint(i))
 | 
			
		||||
}
 | 
			
		||||
func isValidBlockSize(size int) bool {
 | 
			
		||||
	const blockSizeMask = blockSize64K | blockSize256K | blockSize1M | blockSize4M
 | 
			
		||||
 | 
			
		||||
	return size&blockSizeMask > 0 && bits.OnesCount(uint(size)) == 1
 | 
			
		||||
}
 | 
			
		||||
func blockSizeValueToIndex(size int) byte {
 | 
			
		||||
	return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Header describes the various flags that can be set on a Writer or obtained from a Reader.
 | 
			
		||||
// The default values match those of the LZ4 frame format definition
 | 
			
		||||
// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html).
 | 
			
		||||
//
 | 
			
		||||
// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls.
 | 
			
		||||
// It is the caller's responsibility to check them if necessary.
 | 
			
		||||
type Header struct {
 | 
			
		||||
	BlockChecksum    bool   // Compressed blocks checksum flag.
 | 
			
		||||
	NoChecksum       bool   // Frame checksum flag.
 | 
			
		||||
	BlockMaxSize     int    // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB.
 | 
			
		||||
	Size             uint64 // Frame total size. It is _not_ computed by the Writer.
 | 
			
		||||
	CompressionLevel int    // Compression level (higher is better, use 0 for fastest compression).
 | 
			
		||||
	done             bool   // Header processed flag (Read or Write and checked).
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (h *Header) Reset() {
 | 
			
		||||
	h.done = false
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										29
									
								
								vendor/github.com/pierrec/lz4/v3/lz4_go1.10.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										29
									
								
								vendor/github.com/pierrec/lz4/v3/lz4_go1.10.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,29 +0,0 @@
 | 
			
		||||
//+build go1.10
 | 
			
		||||
 | 
			
		||||
package lz4
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strings"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func (h Header) String() string {
 | 
			
		||||
	var s strings.Builder
 | 
			
		||||
 | 
			
		||||
	s.WriteString(fmt.Sprintf("%T{", h))
 | 
			
		||||
	if h.BlockChecksum {
 | 
			
		||||
		s.WriteString("BlockChecksum: true ")
 | 
			
		||||
	}
 | 
			
		||||
	if h.NoChecksum {
 | 
			
		||||
		s.WriteString("NoChecksum: true ")
 | 
			
		||||
	}
 | 
			
		||||
	if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
 | 
			
		||||
		s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs))
 | 
			
		||||
	}
 | 
			
		||||
	if l := h.CompressionLevel; l != 0 {
 | 
			
		||||
		s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l))
 | 
			
		||||
	}
 | 
			
		||||
	s.WriteByte('}')
 | 
			
		||||
 | 
			
		||||
	return s.String()
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										29
									
								
								vendor/github.com/pierrec/lz4/v3/lz4_notgo1.10.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										29
									
								
								vendor/github.com/pierrec/lz4/v3/lz4_notgo1.10.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,29 +0,0 @@
 | 
			
		||||
//+build !go1.10
 | 
			
		||||
 | 
			
		||||
package lz4
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"fmt"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func (h Header) String() string {
 | 
			
		||||
	var s bytes.Buffer
 | 
			
		||||
 | 
			
		||||
	s.WriteString(fmt.Sprintf("%T{", h))
 | 
			
		||||
	if h.BlockChecksum {
 | 
			
		||||
		s.WriteString("BlockChecksum: true ")
 | 
			
		||||
	}
 | 
			
		||||
	if h.NoChecksum {
 | 
			
		||||
		s.WriteString("NoChecksum: true ")
 | 
			
		||||
	}
 | 
			
		||||
	if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
 | 
			
		||||
		s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs))
 | 
			
		||||
	}
 | 
			
		||||
	if l := h.CompressionLevel; l != 0 {
 | 
			
		||||
		s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l))
 | 
			
		||||
	}
 | 
			
		||||
	s.WriteByte('}')
 | 
			
		||||
 | 
			
		||||
	return s.String()
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										335
									
								
								vendor/github.com/pierrec/lz4/v3/reader.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										335
									
								
								vendor/github.com/pierrec/lz4/v3/reader.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,335 +0,0 @@
 | 
			
		||||
package lz4
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/binary"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io"
 | 
			
		||||
	"io/ioutil"
 | 
			
		||||
 | 
			
		||||
	"github.com/pierrec/lz4/v3/internal/xxh32"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Reader implements the LZ4 frame decoder.
 | 
			
		||||
// The Header is set after the first call to Read().
 | 
			
		||||
// The Header may change between Read() calls in case of concatenated frames.
 | 
			
		||||
type Reader struct {
 | 
			
		||||
	Header
 | 
			
		||||
	// Handler called when a block has been successfully read.
 | 
			
		||||
	// It provides the number of bytes read.
 | 
			
		||||
	OnBlockDone func(size int)
 | 
			
		||||
 | 
			
		||||
	buf      [8]byte       // Scrap buffer.
 | 
			
		||||
	pos      int64         // Current position in src.
 | 
			
		||||
	src      io.Reader     // Source.
 | 
			
		||||
	zdata    []byte        // Compressed data.
 | 
			
		||||
	data     []byte        // Uncompressed data.
 | 
			
		||||
	idx      int           // Index of unread bytes into data.
 | 
			
		||||
	checksum xxh32.XXHZero // Frame hash.
 | 
			
		||||
	skip     int64         // Bytes to skip before next read.
 | 
			
		||||
	dpos     int64         // Position in dest
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewReader returns a new LZ4 frame decoder.
 | 
			
		||||
// No access to the underlying io.Reader is performed.
 | 
			
		||||
func NewReader(src io.Reader) *Reader {
 | 
			
		||||
	r := &Reader{src: src}
 | 
			
		||||
	return r
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// readHeader checks the frame magic number and parses the frame descriptoz.
 | 
			
		||||
// Skippable frames are supported even as a first frame although the LZ4
 | 
			
		||||
// specifications recommends skippable frames not to be used as first frames.
 | 
			
		||||
func (z *Reader) readHeader(first bool) error {
 | 
			
		||||
	defer z.checksum.Reset()
 | 
			
		||||
 | 
			
		||||
	buf := z.buf[:]
 | 
			
		||||
	for {
 | 
			
		||||
		magic, err := z.readUint32()
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			z.pos += 4
 | 
			
		||||
			if !first && err == io.ErrUnexpectedEOF {
 | 
			
		||||
				return io.EOF
 | 
			
		||||
			}
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		if magic == frameMagic {
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
		if magic>>8 != frameSkipMagic>>8 {
 | 
			
		||||
			return ErrInvalid
 | 
			
		||||
		}
 | 
			
		||||
		skipSize, err := z.readUint32()
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		z.pos += 4
 | 
			
		||||
		m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize))
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		z.pos += m
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Header.
 | 
			
		||||
	if _, err := io.ReadFull(z.src, buf[:2]); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	z.pos += 8
 | 
			
		||||
 | 
			
		||||
	b := buf[0]
 | 
			
		||||
	if v := b >> 6; v != Version {
 | 
			
		||||
		return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version)
 | 
			
		||||
	}
 | 
			
		||||
	if b>>5&1 == 0 {
 | 
			
		||||
		return ErrBlockDependency
 | 
			
		||||
	}
 | 
			
		||||
	z.BlockChecksum = b>>4&1 > 0
 | 
			
		||||
	frameSize := b>>3&1 > 0
 | 
			
		||||
	z.NoChecksum = b>>2&1 == 0
 | 
			
		||||
 | 
			
		||||
	bmsID := buf[1] >> 4 & 0x7
 | 
			
		||||
	if bmsID < 4 || bmsID > 7 {
 | 
			
		||||
		return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID)
 | 
			
		||||
	}
 | 
			
		||||
	bSize := blockSizeIndexToValue(bmsID - 4)
 | 
			
		||||
	z.BlockMaxSize = bSize
 | 
			
		||||
 | 
			
		||||
	// Allocate the compressed/uncompressed buffers.
 | 
			
		||||
	// The compressed buffer cannot exceed the uncompressed one.
 | 
			
		||||
	if n := 2 * bSize; cap(z.zdata) < n {
 | 
			
		||||
		z.zdata = make([]byte, n, n)
 | 
			
		||||
	}
 | 
			
		||||
	if debugFlag {
 | 
			
		||||
		debug("header block max size id=%d size=%d", bmsID, bSize)
 | 
			
		||||
	}
 | 
			
		||||
	z.zdata = z.zdata[:bSize]
 | 
			
		||||
	z.data = z.zdata[:cap(z.zdata)][bSize:]
 | 
			
		||||
	z.idx = len(z.data)
 | 
			
		||||
 | 
			
		||||
	_, _ = z.checksum.Write(buf[0:2])
 | 
			
		||||
 | 
			
		||||
	if frameSize {
 | 
			
		||||
		buf := buf[:8]
 | 
			
		||||
		if _, err := io.ReadFull(z.src, buf); err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		z.Size = binary.LittleEndian.Uint64(buf)
 | 
			
		||||
		z.pos += 8
 | 
			
		||||
		_, _ = z.checksum.Write(buf)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Header checksum.
 | 
			
		||||
	if _, err := io.ReadFull(z.src, buf[:1]); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	z.pos++
 | 
			
		||||
	if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] {
 | 
			
		||||
		return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	z.Header.done = true
 | 
			
		||||
	if debugFlag {
 | 
			
		||||
		debug("header read: %v", z.Header)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Read decompresses data from the underlying source into the supplied buffer.
 | 
			
		||||
//
 | 
			
		||||
// Since there can be multiple streams concatenated, Header values may
 | 
			
		||||
// change between calls to Read(). If that is the case, no data is actually read from
 | 
			
		||||
// the underlying io.Reader, to allow for potential input buffer resizing.
 | 
			
		||||
func (z *Reader) Read(buf []byte) (int, error) {
 | 
			
		||||
	if debugFlag {
 | 
			
		||||
		debug("Read buf len=%d", len(buf))
 | 
			
		||||
	}
 | 
			
		||||
	if !z.Header.done {
 | 
			
		||||
		if err := z.readHeader(true); err != nil {
 | 
			
		||||
			return 0, err
 | 
			
		||||
		}
 | 
			
		||||
		if debugFlag {
 | 
			
		||||
			debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d",
 | 
			
		||||
				len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(buf) == 0 {
 | 
			
		||||
		return 0, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if z.idx == len(z.data) {
 | 
			
		||||
		// No data ready for reading, process the next block.
 | 
			
		||||
		if debugFlag {
 | 
			
		||||
			debug("reading block from writer")
 | 
			
		||||
		}
 | 
			
		||||
		// Reset uncompressed buffer
 | 
			
		||||
		z.data = z.zdata[:cap(z.zdata)][len(z.zdata):]
 | 
			
		||||
 | 
			
		||||
		// Block length: 0 = end of frame, highest bit set: uncompressed.
 | 
			
		||||
		bLen, err := z.readUint32()
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return 0, err
 | 
			
		||||
		}
 | 
			
		||||
		z.pos += 4
 | 
			
		||||
 | 
			
		||||
		if bLen == 0 {
 | 
			
		||||
			// End of frame reached.
 | 
			
		||||
			if !z.NoChecksum {
 | 
			
		||||
				// Validate the frame checksum.
 | 
			
		||||
				checksum, err := z.readUint32()
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					return 0, err
 | 
			
		||||
				}
 | 
			
		||||
				if debugFlag {
 | 
			
		||||
					debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum)
 | 
			
		||||
				}
 | 
			
		||||
				z.pos += 4
 | 
			
		||||
				if h := z.checksum.Sum32(); checksum != h {
 | 
			
		||||
					return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// Get ready for the next concatenated frame and keep the position.
 | 
			
		||||
			pos := z.pos
 | 
			
		||||
			z.Reset(z.src)
 | 
			
		||||
			z.pos = pos
 | 
			
		||||
 | 
			
		||||
			// Since multiple frames can be concatenated, check for more.
 | 
			
		||||
			return 0, z.readHeader(false)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if debugFlag {
 | 
			
		||||
			debug("raw block size %d", bLen)
 | 
			
		||||
		}
 | 
			
		||||
		if bLen&compressedBlockFlag > 0 {
 | 
			
		||||
			// Uncompressed block.
 | 
			
		||||
			bLen &= compressedBlockMask
 | 
			
		||||
			if debugFlag {
 | 
			
		||||
				debug("uncompressed block size %d", bLen)
 | 
			
		||||
			}
 | 
			
		||||
			if int(bLen) > cap(z.data) {
 | 
			
		||||
				return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
 | 
			
		||||
			}
 | 
			
		||||
			z.data = z.data[:bLen]
 | 
			
		||||
			if _, err := io.ReadFull(z.src, z.data); err != nil {
 | 
			
		||||
				return 0, err
 | 
			
		||||
			}
 | 
			
		||||
			z.pos += int64(bLen)
 | 
			
		||||
			if z.OnBlockDone != nil {
 | 
			
		||||
				z.OnBlockDone(int(bLen))
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if z.BlockChecksum {
 | 
			
		||||
				checksum, err := z.readUint32()
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					return 0, err
 | 
			
		||||
				}
 | 
			
		||||
				z.pos += 4
 | 
			
		||||
 | 
			
		||||
				if h := xxh32.ChecksumZero(z.data); h != checksum {
 | 
			
		||||
					return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
		} else {
 | 
			
		||||
			// Compressed block.
 | 
			
		||||
			if debugFlag {
 | 
			
		||||
				debug("compressed block size %d", bLen)
 | 
			
		||||
			}
 | 
			
		||||
			if int(bLen) > cap(z.data) {
 | 
			
		||||
				return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
 | 
			
		||||
			}
 | 
			
		||||
			zdata := z.zdata[:bLen]
 | 
			
		||||
			if _, err := io.ReadFull(z.src, zdata); err != nil {
 | 
			
		||||
				return 0, err
 | 
			
		||||
			}
 | 
			
		||||
			z.pos += int64(bLen)
 | 
			
		||||
 | 
			
		||||
			if z.BlockChecksum {
 | 
			
		||||
				checksum, err := z.readUint32()
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					return 0, err
 | 
			
		||||
				}
 | 
			
		||||
				z.pos += 4
 | 
			
		||||
 | 
			
		||||
				if h := xxh32.ChecksumZero(zdata); h != checksum {
 | 
			
		||||
					return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			n, err := UncompressBlock(zdata, z.data)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return 0, err
 | 
			
		||||
			}
 | 
			
		||||
			z.data = z.data[:n]
 | 
			
		||||
			if z.OnBlockDone != nil {
 | 
			
		||||
				z.OnBlockDone(n)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if !z.NoChecksum {
 | 
			
		||||
			_, _ = z.checksum.Write(z.data)
 | 
			
		||||
			if debugFlag {
 | 
			
		||||
				debug("current frame checksum %x", z.checksum.Sum32())
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		z.idx = 0
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if z.skip > int64(len(z.data[z.idx:])) {
 | 
			
		||||
		z.skip -= int64(len(z.data[z.idx:]))
 | 
			
		||||
		z.dpos += int64(len(z.data[z.idx:]))
 | 
			
		||||
		z.idx = len(z.data)
 | 
			
		||||
		return 0, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	z.idx += int(z.skip)
 | 
			
		||||
	z.dpos += z.skip
 | 
			
		||||
	z.skip = 0
 | 
			
		||||
 | 
			
		||||
	n := copy(buf, z.data[z.idx:])
 | 
			
		||||
	z.idx += n
 | 
			
		||||
	z.dpos += int64(n)
 | 
			
		||||
	if debugFlag {
 | 
			
		||||
		debug("copied %d bytes to input", n)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return n, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Seek implements io.Seeker, but supports seeking forward from the current
 | 
			
		||||
// position only. Any other seek will return an error. Allows skipping output
 | 
			
		||||
// bytes which aren't needed, which in some scenarios is faster than reading
 | 
			
		||||
// and discarding them.
 | 
			
		||||
// Note this may cause future calls to Read() to read 0 bytes if all of the
 | 
			
		||||
// data they would have returned is skipped.
 | 
			
		||||
func (z *Reader) Seek(offset int64, whence int) (int64, error) {
 | 
			
		||||
	if offset < 0 || whence != io.SeekCurrent {
 | 
			
		||||
		return z.dpos + z.skip, ErrUnsupportedSeek
 | 
			
		||||
	}
 | 
			
		||||
	z.skip += offset
 | 
			
		||||
	return z.dpos + z.skip, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Reset discards the Reader's state and makes it equivalent to the
 | 
			
		||||
// result of its original state from NewReader, but reading from r instead.
 | 
			
		||||
// This permits reusing a Reader rather than allocating a new one.
 | 
			
		||||
func (z *Reader) Reset(r io.Reader) {
 | 
			
		||||
	z.Header = Header{}
 | 
			
		||||
	z.pos = 0
 | 
			
		||||
	z.src = r
 | 
			
		||||
	z.zdata = z.zdata[:0]
 | 
			
		||||
	z.data = z.data[:0]
 | 
			
		||||
	z.idx = 0
 | 
			
		||||
	z.checksum.Reset()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// readUint32 reads an uint32 into the supplied buffer.
 | 
			
		||||
// The idea is to make use of the already allocated buffers avoiding additional allocations.
 | 
			
		||||
func (z *Reader) readUint32() (uint32, error) {
 | 
			
		||||
	buf := z.buf[:4]
 | 
			
		||||
	_, err := io.ReadFull(z.src, buf)
 | 
			
		||||
	x := binary.LittleEndian.Uint32(buf)
 | 
			
		||||
	return x, err
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										409
									
								
								vendor/github.com/pierrec/lz4/v3/writer.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										409
									
								
								vendor/github.com/pierrec/lz4/v3/writer.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,409 +0,0 @@
 | 
			
		||||
package lz4
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/binary"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io"
 | 
			
		||||
	"runtime"
 | 
			
		||||
 | 
			
		||||
	"github.com/pierrec/lz4/v3/internal/xxh32"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// zResult contains the results of compressing a block.
 | 
			
		||||
type zResult struct {
 | 
			
		||||
	size     uint32 // Block header
 | 
			
		||||
	data     []byte // Compressed data
 | 
			
		||||
	checksum uint32 // Data checksum
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Writer implements the LZ4 frame encoder.
 | 
			
		||||
type Writer struct {
 | 
			
		||||
	Header
 | 
			
		||||
	// Handler called when a block has been successfully written out.
 | 
			
		||||
	// It provides the number of bytes written.
 | 
			
		||||
	OnBlockDone func(size int)
 | 
			
		||||
 | 
			
		||||
	buf       [19]byte      // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes
 | 
			
		||||
	dst       io.Writer     // Destination.
 | 
			
		||||
	checksum  xxh32.XXHZero // Frame checksum.
 | 
			
		||||
	data      []byte        // Data to be compressed + buffer for compressed data.
 | 
			
		||||
	idx       int           // Index into data.
 | 
			
		||||
	hashtable [winSize]int  // Hash table used in CompressBlock().
 | 
			
		||||
 | 
			
		||||
	// For concurrency.
 | 
			
		||||
	c   chan chan zResult // Channel for block compression goroutines and writer goroutine.
 | 
			
		||||
	err error             // Any error encountered while writing to the underlying destination.
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewWriter returns a new LZ4 frame encoder.
 | 
			
		||||
// No access to the underlying io.Writer is performed.
 | 
			
		||||
// The supplied Header is checked at the first Write.
 | 
			
		||||
// It is ok to change it before the first Write but then not until a Reset() is performed.
 | 
			
		||||
func NewWriter(dst io.Writer) *Writer {
 | 
			
		||||
	z := new(Writer)
 | 
			
		||||
	z.Reset(dst)
 | 
			
		||||
	return z
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WithConcurrency sets the number of concurrent go routines used for compression.
 | 
			
		||||
// A negative value sets the concurrency to GOMAXPROCS.
 | 
			
		||||
func (z *Writer) WithConcurrency(n int) *Writer {
 | 
			
		||||
	switch {
 | 
			
		||||
	case n == 0 || n == 1:
 | 
			
		||||
		z.c = nil
 | 
			
		||||
		return z
 | 
			
		||||
	case n < 0:
 | 
			
		||||
		n = runtime.GOMAXPROCS(0)
 | 
			
		||||
	}
 | 
			
		||||
	z.c = make(chan chan zResult, n)
 | 
			
		||||
	// Writer goroutine managing concurrent block compression goroutines.
 | 
			
		||||
	go func() {
 | 
			
		||||
		// Process next block compression item.
 | 
			
		||||
		for c := range z.c {
 | 
			
		||||
			// Read the next compressed block result.
 | 
			
		||||
			// Waiting here ensures that the blocks are output in the order they were sent.
 | 
			
		||||
			// The incoming channel is always closed as it indicates to the caller that
 | 
			
		||||
			// the block has been processed.
 | 
			
		||||
			res := <-c
 | 
			
		||||
			n := len(res.data)
 | 
			
		||||
			if n == 0 {
 | 
			
		||||
				// Notify the block compression routine that we are done with its result.
 | 
			
		||||
				// This is used when a sentinel block is sent to terminate the compression.
 | 
			
		||||
				close(c)
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
			// Write the block.
 | 
			
		||||
			if err := z.writeUint32(res.size); err != nil && z.err == nil {
 | 
			
		||||
				z.err = err
 | 
			
		||||
			}
 | 
			
		||||
			if _, err := z.dst.Write(res.data); err != nil && z.err == nil {
 | 
			
		||||
				z.err = err
 | 
			
		||||
			}
 | 
			
		||||
			if z.BlockChecksum {
 | 
			
		||||
				if err := z.writeUint32(res.checksum); err != nil && z.err == nil {
 | 
			
		||||
					z.err = err
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			if isCompressed := res.size&compressedBlockFlag == 0; isCompressed {
 | 
			
		||||
				// It is now safe to release the buffer as no longer in use by any goroutine.
 | 
			
		||||
				putBuffer(cap(res.data), res.data)
 | 
			
		||||
			}
 | 
			
		||||
			if h := z.OnBlockDone; h != nil {
 | 
			
		||||
				h(n)
 | 
			
		||||
			}
 | 
			
		||||
			close(c)
 | 
			
		||||
		}
 | 
			
		||||
	}()
 | 
			
		||||
	return z
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// newBuffers instantiates new buffers which size matches the one in Header.
 | 
			
		||||
// The returned buffers are for decompression and compression respectively.
 | 
			
		||||
func (z *Writer) newBuffers() {
 | 
			
		||||
	bSize := z.Header.BlockMaxSize
 | 
			
		||||
	buf := getBuffer(bSize)
 | 
			
		||||
	z.data = buf[:bSize] // Uncompressed buffer is the first half.
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// freeBuffers puts the writer's buffers back to the pool.
 | 
			
		||||
func (z *Writer) freeBuffers() {
 | 
			
		||||
	// Put the buffer back into the pool, if any.
 | 
			
		||||
	putBuffer(z.Header.BlockMaxSize, z.data)
 | 
			
		||||
	z.data = nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// writeHeader builds and writes the header (magic+header) to the underlying io.Writer.
 | 
			
		||||
func (z *Writer) writeHeader() error {
 | 
			
		||||
	// Default to 4Mb if BlockMaxSize is not set.
 | 
			
		||||
	if z.Header.BlockMaxSize == 0 {
 | 
			
		||||
		z.Header.BlockMaxSize = blockSize4M
 | 
			
		||||
	}
 | 
			
		||||
	// The only option that needs to be validated.
 | 
			
		||||
	bSize := z.Header.BlockMaxSize
 | 
			
		||||
	if !isValidBlockSize(z.Header.BlockMaxSize) {
 | 
			
		||||
		return fmt.Errorf("lz4: invalid block max size: %d", bSize)
 | 
			
		||||
	}
 | 
			
		||||
	// Allocate the compressed/uncompressed buffers.
 | 
			
		||||
	// The compressed buffer cannot exceed the uncompressed one.
 | 
			
		||||
	z.newBuffers()
 | 
			
		||||
	z.idx = 0
 | 
			
		||||
 | 
			
		||||
	// Size is optional.
 | 
			
		||||
	buf := z.buf[:]
 | 
			
		||||
 | 
			
		||||
	// Set the fixed size data: magic number, block max size and flags.
 | 
			
		||||
	binary.LittleEndian.PutUint32(buf[0:], frameMagic)
 | 
			
		||||
	flg := byte(Version << 6)
 | 
			
		||||
	flg |= 1 << 5 // No block dependency.
 | 
			
		||||
	if z.Header.BlockChecksum {
 | 
			
		||||
		flg |= 1 << 4
 | 
			
		||||
	}
 | 
			
		||||
	if z.Header.Size > 0 {
 | 
			
		||||
		flg |= 1 << 3
 | 
			
		||||
	}
 | 
			
		||||
	if !z.Header.NoChecksum {
 | 
			
		||||
		flg |= 1 << 2
 | 
			
		||||
	}
 | 
			
		||||
	buf[4] = flg
 | 
			
		||||
	buf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4
 | 
			
		||||
 | 
			
		||||
	// Current buffer size: magic(4) + flags(1) + block max size (1).
 | 
			
		||||
	n := 6
 | 
			
		||||
	// Optional items.
 | 
			
		||||
	if z.Header.Size > 0 {
 | 
			
		||||
		binary.LittleEndian.PutUint64(buf[n:], z.Header.Size)
 | 
			
		||||
		n += 8
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// The header checksum includes the flags, block max size and optional Size.
 | 
			
		||||
	buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF)
 | 
			
		||||
	z.checksum.Reset()
 | 
			
		||||
 | 
			
		||||
	// Header ready, write it out.
 | 
			
		||||
	if _, err := z.dst.Write(buf[0 : n+1]); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	z.Header.done = true
 | 
			
		||||
	if debugFlag {
 | 
			
		||||
		debug("wrote header %v", z.Header)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Write compresses data from the supplied buffer into the underlying io.Writer.
 | 
			
		||||
// Write does not return until the data has been written.
 | 
			
		||||
func (z *Writer) Write(buf []byte) (int, error) {
 | 
			
		||||
	if !z.Header.done {
 | 
			
		||||
		if err := z.writeHeader(); err != nil {
 | 
			
		||||
			return 0, err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if debugFlag {
 | 
			
		||||
		debug("input buffer len=%d index=%d", len(buf), z.idx)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	zn := len(z.data)
 | 
			
		||||
	var n int
 | 
			
		||||
	for len(buf) > 0 {
 | 
			
		||||
		if z.idx == 0 && len(buf) >= zn {
 | 
			
		||||
			// Avoid a copy as there is enough data for a block.
 | 
			
		||||
			if err := z.compressBlock(buf[:zn]); err != nil {
 | 
			
		||||
				return n, err
 | 
			
		||||
			}
 | 
			
		||||
			n += zn
 | 
			
		||||
			buf = buf[zn:]
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		// Accumulate the data to be compressed.
 | 
			
		||||
		m := copy(z.data[z.idx:], buf)
 | 
			
		||||
		n += m
 | 
			
		||||
		z.idx += m
 | 
			
		||||
		buf = buf[m:]
 | 
			
		||||
		if debugFlag {
 | 
			
		||||
			debug("%d bytes copied to buf, current index %d", n, z.idx)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if z.idx < len(z.data) {
 | 
			
		||||
			// Buffer not filled.
 | 
			
		||||
			if debugFlag {
 | 
			
		||||
				debug("need more data for compression")
 | 
			
		||||
			}
 | 
			
		||||
			return n, nil
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Buffer full.
 | 
			
		||||
		if err := z.compressBlock(z.data); err != nil {
 | 
			
		||||
			return n, err
 | 
			
		||||
		}
 | 
			
		||||
		z.idx = 0
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return n, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// compressBlock compresses a block.
 | 
			
		||||
func (z *Writer) compressBlock(data []byte) error {
 | 
			
		||||
	if !z.NoChecksum {
 | 
			
		||||
		_, _ = z.checksum.Write(data)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if z.c != nil {
 | 
			
		||||
		c := make(chan zResult)
 | 
			
		||||
		z.c <- c // Send now to guarantee order
 | 
			
		||||
		go writerCompressBlock(c, z.Header, data)
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	zdata := z.data[z.Header.BlockMaxSize:cap(z.data)]
 | 
			
		||||
	// The compressed block size cannot exceed the input's.
 | 
			
		||||
	var zn int
 | 
			
		||||
 | 
			
		||||
	if level := z.Header.CompressionLevel; level != 0 {
 | 
			
		||||
		zn, _ = CompressBlockHC(data, zdata, level)
 | 
			
		||||
	} else {
 | 
			
		||||
		zn, _ = CompressBlock(data, zdata, z.hashtable[:])
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var bLen uint32
 | 
			
		||||
	if debugFlag {
 | 
			
		||||
		debug("block compression %d => %d", len(data), zn)
 | 
			
		||||
	}
 | 
			
		||||
	if zn > 0 && zn < len(data) {
 | 
			
		||||
		// Compressible and compressed size smaller than uncompressed: ok!
 | 
			
		||||
		bLen = uint32(zn)
 | 
			
		||||
		zdata = zdata[:zn]
 | 
			
		||||
	} else {
 | 
			
		||||
		// Uncompressed block.
 | 
			
		||||
		bLen = uint32(len(data)) | compressedBlockFlag
 | 
			
		||||
		zdata = data
 | 
			
		||||
	}
 | 
			
		||||
	if debugFlag {
 | 
			
		||||
		debug("block compression to be written len=%d data len=%d", bLen, len(zdata))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Write the block.
 | 
			
		||||
	if err := z.writeUint32(bLen); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	written, err := z.dst.Write(zdata)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	if h := z.OnBlockDone; h != nil {
 | 
			
		||||
		h(written)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if !z.BlockChecksum {
 | 
			
		||||
		if debugFlag {
 | 
			
		||||
			debug("current frame checksum %x", z.checksum.Sum32())
 | 
			
		||||
		}
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	checksum := xxh32.ChecksumZero(zdata)
 | 
			
		||||
	if debugFlag {
 | 
			
		||||
		debug("block checksum %x", checksum)
 | 
			
		||||
		defer func() { debug("current frame checksum %x", z.checksum.Sum32()) }()
 | 
			
		||||
	}
 | 
			
		||||
	return z.writeUint32(checksum)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Flush flushes any pending compressed data to the underlying writer.
 | 
			
		||||
// Flush does not return until the data has been written.
 | 
			
		||||
// If the underlying writer returns an error, Flush returns that error.
 | 
			
		||||
func (z *Writer) Flush() error {
 | 
			
		||||
	if debugFlag {
 | 
			
		||||
		debug("flush with index %d", z.idx)
 | 
			
		||||
	}
 | 
			
		||||
	if z.idx == 0 {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	data := z.data[:z.idx]
 | 
			
		||||
	z.idx = 0
 | 
			
		||||
	if z.c == nil {
 | 
			
		||||
		return z.compressBlock(data)
 | 
			
		||||
	}
 | 
			
		||||
	if !z.NoChecksum {
 | 
			
		||||
		_, _ = z.checksum.Write(data)
 | 
			
		||||
	}
 | 
			
		||||
	c := make(chan zResult)
 | 
			
		||||
	z.c <- c
 | 
			
		||||
	writerCompressBlock(c, z.Header, data)
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (z *Writer) close() error {
 | 
			
		||||
	if z.c == nil {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	// Send a sentinel block (no data to compress) to terminate the writer main goroutine.
 | 
			
		||||
	c := make(chan zResult)
 | 
			
		||||
	z.c <- c
 | 
			
		||||
	c <- zResult{}
 | 
			
		||||
	// Wait for the main goroutine to complete.
 | 
			
		||||
	<-c
 | 
			
		||||
	// At this point the main goroutine has shut down or is about to return.
 | 
			
		||||
	z.c = nil
 | 
			
		||||
	return z.err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer.
 | 
			
		||||
func (z *Writer) Close() error {
 | 
			
		||||
	if !z.Header.done {
 | 
			
		||||
		if err := z.writeHeader(); err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if err := z.Flush(); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	if err := z.close(); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	z.freeBuffers()
 | 
			
		||||
 | 
			
		||||
	if debugFlag {
 | 
			
		||||
		debug("writing last empty block")
 | 
			
		||||
	}
 | 
			
		||||
	if err := z.writeUint32(0); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	if z.NoChecksum {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	checksum := z.checksum.Sum32()
 | 
			
		||||
	if debugFlag {
 | 
			
		||||
		debug("stream checksum %x", checksum)
 | 
			
		||||
	}
 | 
			
		||||
	return z.writeUint32(checksum)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Reset clears the state of the Writer z such that it is equivalent to its
 | 
			
		||||
// initial state from NewWriter, but instead writing to w.
 | 
			
		||||
// No access to the underlying io.Writer is performed.
 | 
			
		||||
func (z *Writer) Reset(w io.Writer) {
 | 
			
		||||
	n := cap(z.c)
 | 
			
		||||
	_ = z.close()
 | 
			
		||||
	z.freeBuffers()
 | 
			
		||||
	z.Header.Reset()
 | 
			
		||||
	z.dst = w
 | 
			
		||||
	z.checksum.Reset()
 | 
			
		||||
	z.idx = 0
 | 
			
		||||
	z.err = nil
 | 
			
		||||
	z.WithConcurrency(n)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// writeUint32 writes a uint32 to the underlying writer.
 | 
			
		||||
func (z *Writer) writeUint32(x uint32) error {
 | 
			
		||||
	buf := z.buf[:4]
 | 
			
		||||
	binary.LittleEndian.PutUint32(buf, x)
 | 
			
		||||
	_, err := z.dst.Write(buf)
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// writerCompressBlock compresses data into a pooled buffer and writes its result
 | 
			
		||||
// out to the input channel.
 | 
			
		||||
func writerCompressBlock(c chan zResult, header Header, data []byte) {
 | 
			
		||||
	zdata := getBuffer(header.BlockMaxSize)
 | 
			
		||||
	// The compressed block size cannot exceed the input's.
 | 
			
		||||
	var zn int
 | 
			
		||||
	if level := header.CompressionLevel; level != 0 {
 | 
			
		||||
		zn, _ = CompressBlockHC(data, zdata, level)
 | 
			
		||||
	} else {
 | 
			
		||||
		var hashTable [winSize]int
 | 
			
		||||
		zn, _ = CompressBlock(data, zdata, hashTable[:])
 | 
			
		||||
	}
 | 
			
		||||
	var res zResult
 | 
			
		||||
	if zn > 0 && zn < len(data) {
 | 
			
		||||
		res.size = uint32(zn)
 | 
			
		||||
		res.data = zdata[:zn]
 | 
			
		||||
	} else {
 | 
			
		||||
		res.size = uint32(len(data)) | compressedBlockFlag
 | 
			
		||||
		res.data = data
 | 
			
		||||
	}
 | 
			
		||||
	if header.BlockChecksum {
 | 
			
		||||
		res.checksum = xxh32.ChecksumZero(res.data)
 | 
			
		||||
	}
 | 
			
		||||
	c <- res
 | 
			
		||||
}
 | 
			
		||||
@@ -1,19 +1,14 @@
 | 
			
		||||
language: go
 | 
			
		||||
 | 
			
		||||
env:
 | 
			
		||||
  - GO111MODULE=on
 | 
			
		||||
  - GO111MODULE=off
 | 
			
		||||
 | 
			
		||||
go:
 | 
			
		||||
  - 1.9.x
 | 
			
		||||
  - 1.10.x
 | 
			
		||||
  - 1.11.x
 | 
			
		||||
  - 1.12.x
 | 
			
		||||
  - master
 | 
			
		||||
  - 1.13.x
 | 
			
		||||
  - 1.14.x
 | 
			
		||||
 | 
			
		||||
matrix:
 | 
			
		||||
 fast_finish: true
 | 
			
		||||
 allow_failures:
 | 
			
		||||
   - go: master
 | 
			
		||||
 | 
			
		||||
sudo: false
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										0
									
								
								vendor/github.com/pierrec/lz4/v3/LICENSE → vendor/github.com/pierrec/lz4/v4/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										0
									
								
								vendor/github.com/pierrec/lz4/v3/LICENSE → vendor/github.com/pierrec/lz4/v4/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -15,7 +15,7 @@ The implementation is based on the reference C [one](https://github.com/lz4/lz4)
 | 
			
		||||
Assuming you have the go toolchain installed:
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
go get github.com/pierrec/lz4/v3
 | 
			
		||||
go get github.com/pierrec/lz4
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
There is a command line interface tool to compress and decompress LZ4 files.
 | 
			
		||||
							
								
								
									
										3
									
								
								vendor/github.com/pierrec/lz4/v4/go.mod
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								vendor/github.com/pierrec/lz4/v4/go.mod
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,3 @@
 | 
			
		||||
module github.com/pierrec/lz4/v4
 | 
			
		||||
 | 
			
		||||
go 1.14
 | 
			
		||||
							
								
								
									
										3
									
								
								vendor/github.com/pierrec/lz4/v4/go.sum
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								vendor/github.com/pierrec/lz4/v4/go.sum
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,3 @@
 | 
			
		||||
github.com/pierrec/lz4 v1.0.1 h1:w6GMGWSsCI04fTM8wQRdnW74MuJISakuUU0onU0TYB4=
 | 
			
		||||
github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A=
 | 
			
		||||
github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
 | 
			
		||||
@@ -1,28 +1,46 @@
 | 
			
		||||
package lz4
 | 
			
		||||
package lz4block
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/binary"
 | 
			
		||||
	"math/bits"
 | 
			
		||||
	"sync"
 | 
			
		||||
 | 
			
		||||
	"github.com/pierrec/lz4/v4/internal/lz4errors"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// The following constants are used to setup the compression algorithm.
 | 
			
		||||
	minMatch   = 4  // the minimum size of the match sequence size (4 bytes)
 | 
			
		||||
	winSizeLog = 16 // LZ4 64Kb window size limit
 | 
			
		||||
	winSize    = 1 << winSizeLog
 | 
			
		||||
	winMask    = winSize - 1 // 64Kb window of previous data for dependent blocks
 | 
			
		||||
 | 
			
		||||
	// hashLog determines the size of the hash table used to quickly find a previous match position.
 | 
			
		||||
	// Its value influences the compression speed and memory usage, the lower the faster,
 | 
			
		||||
	// but at the expense of the compression ratio.
 | 
			
		||||
	// 16 seems to be the best compromise for fast compression.
 | 
			
		||||
	hashLog = 16
 | 
			
		||||
	htSize  = 1 << hashLog
 | 
			
		||||
 | 
			
		||||
	mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes.
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func recoverBlock(e *error) {
 | 
			
		||||
	if r := recover(); r != nil && *e == nil {
 | 
			
		||||
		*e = lz4errors.ErrInvalidSourceShortBuffer
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// blockHash hashes the lower 6 bytes into a value < htSize.
 | 
			
		||||
func blockHash(x uint64) uint32 {
 | 
			
		||||
	const prime6bytes = 227718039650203
 | 
			
		||||
	return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
 | 
			
		||||
func CompressBlockBound(n int) int {
 | 
			
		||||
	return n + n/255 + 16
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// UncompressBlock uncompresses the source buffer into the destination one,
 | 
			
		||||
// and returns the uncompressed size.
 | 
			
		||||
//
 | 
			
		||||
// The destination buffer must be sized appropriately.
 | 
			
		||||
//
 | 
			
		||||
// An error is returned if the source data is invalid or the destination buffer is too small.
 | 
			
		||||
func UncompressBlock(src, dst []byte) (int, error) {
 | 
			
		||||
	if len(src) == 0 {
 | 
			
		||||
		return 0, nil
 | 
			
		||||
@@ -30,24 +48,57 @@ func UncompressBlock(src, dst []byte) (int, error) {
 | 
			
		||||
	if di := decodeBlock(dst, src); di >= 0 {
 | 
			
		||||
		return di, nil
 | 
			
		||||
	}
 | 
			
		||||
	return 0, ErrInvalidSourceShortBuffer
 | 
			
		||||
	return 0, lz4errors.ErrInvalidSourceShortBuffer
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CompressBlock compresses the source buffer into the destination one.
 | 
			
		||||
// This is the fast version of LZ4 compression and also the default one.
 | 
			
		||||
//
 | 
			
		||||
// The argument hashTable is scratch space for a hash table used by the
 | 
			
		||||
// compressor. If provided, it should have length at least 1<<16. If it is
 | 
			
		||||
// shorter (or nil), CompressBlock allocates its own hash table.
 | 
			
		||||
//
 | 
			
		||||
// The size of the compressed data is returned.
 | 
			
		||||
//
 | 
			
		||||
// If the destination buffer size is lower than CompressBlockBound and
 | 
			
		||||
// the compressed size is 0 and no error, then the data is incompressible.
 | 
			
		||||
//
 | 
			
		||||
// An error is returned if the destination buffer is too small.
 | 
			
		||||
func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) {
 | 
			
		||||
	defer recoverBlock(&err)
 | 
			
		||||
type Compressor struct {
 | 
			
		||||
	// Offsets are at most 64kiB, so we can store only the lower 16 bits of
 | 
			
		||||
	// match positions: effectively, an offset from some 64kiB block boundary.
 | 
			
		||||
	//
 | 
			
		||||
	// When we retrieve such an offset, we interpret it as relative to the last
 | 
			
		||||
	// block boundary si &^ 0xffff, or the one before, (si &^ 0xffff) - 0x10000,
 | 
			
		||||
	// depending on which of these is inside the current window. If a table
 | 
			
		||||
	// entry was generated more than 64kiB back in the input, we find out by
 | 
			
		||||
	// inspecting the input stream.
 | 
			
		||||
	table [htSize]uint16
 | 
			
		||||
 | 
			
		||||
	needsReset bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Get returns the position of a presumptive match for the hash h.
 | 
			
		||||
// The match may be a false positive due to a hash collision or an old entry.
 | 
			
		||||
// If si < winSize, the return value may be negative.
 | 
			
		||||
func (c *Compressor) get(h uint32, si int) int {
 | 
			
		||||
	h &= htSize - 1
 | 
			
		||||
	i := int(c.table[h])
 | 
			
		||||
	i += si &^ winMask
 | 
			
		||||
	if i >= si {
 | 
			
		||||
		// Try previous 64kiB block (negative when in first block).
 | 
			
		||||
		i -= winSize
 | 
			
		||||
	}
 | 
			
		||||
	return i
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Compressor) put(h uint32, si int) {
 | 
			
		||||
	h &= htSize - 1
 | 
			
		||||
	c.table[h] = uint16(si)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var compressorPool = sync.Pool{New: func() interface{} { return new(Compressor) }}
 | 
			
		||||
 | 
			
		||||
func CompressBlock(src, dst []byte) (int, error) {
 | 
			
		||||
	c := compressorPool.Get().(*Compressor)
 | 
			
		||||
	n, err := c.CompressBlock(src, dst)
 | 
			
		||||
	compressorPool.Put(c)
 | 
			
		||||
	return n, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Compressor) CompressBlock(src, dst []byte) (int, error) {
 | 
			
		||||
	if c.needsReset {
 | 
			
		||||
		// Zero out reused table to avoid non-deterministic output (issue #65).
 | 
			
		||||
		c.table = [htSize]uint16{}
 | 
			
		||||
	}
 | 
			
		||||
	c.needsReset = true // Only false on first call.
 | 
			
		||||
 | 
			
		||||
	// Return 0, nil only if the destination buffer size is < CompressBlockBound.
 | 
			
		||||
	isNotCompressible := len(dst) < CompressBlockBound(len(src))
 | 
			
		||||
@@ -56,14 +107,6 @@ func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) {
 | 
			
		||||
	// This significantly speeds up incompressible data and usually has very small impact on compression.
 | 
			
		||||
	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
 | 
			
		||||
	const adaptSkipLog = 7
 | 
			
		||||
	if len(hashTable) < htSize {
 | 
			
		||||
		htIface := htPool.Get()
 | 
			
		||||
		defer htPool.Put(htIface)
 | 
			
		||||
		hashTable = (*(htIface).(*[htSize]int))[:]
 | 
			
		||||
	}
 | 
			
		||||
	// Prove to the compiler the table has at least htSize elements.
 | 
			
		||||
	// The compiler can see that "uint32() >> hashShift" cannot be out of bounds.
 | 
			
		||||
	hashTable = hashTable[:htSize]
 | 
			
		||||
 | 
			
		||||
	// si: Current position of the search.
 | 
			
		||||
	// anchor: Position of the current literals.
 | 
			
		||||
@@ -82,33 +125,30 @@ func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) {
 | 
			
		||||
 | 
			
		||||
		// We check a match at s, s+1 and s+2 and pick the first one we get.
 | 
			
		||||
		// Checking 3 only requires us to load the source one.
 | 
			
		||||
		ref := hashTable[h]
 | 
			
		||||
		ref2 := hashTable[h2]
 | 
			
		||||
		hashTable[h] = si
 | 
			
		||||
		hashTable[h2] = si + 1
 | 
			
		||||
		ref := c.get(h, si)
 | 
			
		||||
		ref2 := c.get(h2, si)
 | 
			
		||||
		c.put(h, si)
 | 
			
		||||
		c.put(h2, si+1)
 | 
			
		||||
 | 
			
		||||
		offset := si - ref
 | 
			
		||||
 | 
			
		||||
		// If offset <= 0 we got an old entry in the hash table.
 | 
			
		||||
		if offset <= 0 || offset >= winSize || // Out of window.
 | 
			
		||||
			uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches.
 | 
			
		||||
		if offset <= 0 || offset >= winSize || uint32(match) != binary.LittleEndian.Uint32(src[ref:]) {
 | 
			
		||||
			// No match. Start calculating another hash.
 | 
			
		||||
			// The processor can usually do this out-of-order.
 | 
			
		||||
			h = blockHash(match >> 16)
 | 
			
		||||
			ref = hashTable[h]
 | 
			
		||||
			ref3 := c.get(h, si+2)
 | 
			
		||||
 | 
			
		||||
			// Check the second match at si+1
 | 
			
		||||
			si += 1
 | 
			
		||||
			offset = si - ref2
 | 
			
		||||
 | 
			
		||||
			if offset <= 0 || offset >= winSize ||
 | 
			
		||||
				uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) {
 | 
			
		||||
			if offset <= 0 || offset >= winSize || uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) {
 | 
			
		||||
				// No match. Check the third match at si+2
 | 
			
		||||
				si += 1
 | 
			
		||||
				offset = si - ref
 | 
			
		||||
				hashTable[h] = si
 | 
			
		||||
				offset = si - ref3
 | 
			
		||||
				c.put(h, si)
 | 
			
		||||
 | 
			
		||||
				if offset <= 0 || offset >= winSize ||
 | 
			
		||||
					uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) {
 | 
			
		||||
				if offset <= 0 || offset >= winSize || uint32(match>>16) != binary.LittleEndian.Uint32(src[ref3:]) {
 | 
			
		||||
					// Skip one extra byte (at si+3) before we check 3 matches again.
 | 
			
		||||
					si += 2 + (si-anchor)>>adaptSkipLog
 | 
			
		||||
					continue
 | 
			
		||||
@@ -169,20 +209,28 @@ func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) {
 | 
			
		||||
		di++
 | 
			
		||||
 | 
			
		||||
		// Literals.
 | 
			
		||||
		if di+lLen > len(dst) {
 | 
			
		||||
			return 0, lz4errors.ErrInvalidSourceShortBuffer
 | 
			
		||||
		}
 | 
			
		||||
		copy(dst[di:di+lLen], src[anchor:anchor+lLen])
 | 
			
		||||
		di += lLen + 2
 | 
			
		||||
		anchor = si
 | 
			
		||||
 | 
			
		||||
		// Encode offset.
 | 
			
		||||
		_ = dst[di] // Bound check elimination.
 | 
			
		||||
		if di > len(dst) {
 | 
			
		||||
			return 0, lz4errors.ErrInvalidSourceShortBuffer
 | 
			
		||||
		}
 | 
			
		||||
		dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
 | 
			
		||||
 | 
			
		||||
		// Encode match length part 2.
 | 
			
		||||
		if mLen >= 0xF {
 | 
			
		||||
			for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
 | 
			
		||||
			for mLen -= 0xF; mLen >= 0xFF && di < len(dst); mLen -= 0xFF {
 | 
			
		||||
				dst[di] = 0xFF
 | 
			
		||||
				di++
 | 
			
		||||
			}
 | 
			
		||||
			if di >= len(dst) {
 | 
			
		||||
				return 0, lz4errors.ErrInvalidSourceShortBuffer
 | 
			
		||||
			}
 | 
			
		||||
			dst[di] = byte(mLen)
 | 
			
		||||
			di++
 | 
			
		||||
		}
 | 
			
		||||
@@ -192,7 +240,7 @@ func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) {
 | 
			
		||||
		}
 | 
			
		||||
		// Hash match end-2
 | 
			
		||||
		h = blockHash(binary.LittleEndian.Uint64(src[si-2:]))
 | 
			
		||||
		hashTable[h] = si - 2
 | 
			
		||||
		c.put(h, si-2)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
lastLiterals:
 | 
			
		||||
@@ -202,16 +250,22 @@ lastLiterals:
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Last literals.
 | 
			
		||||
	if di >= len(dst) {
 | 
			
		||||
		return 0, lz4errors.ErrInvalidSourceShortBuffer
 | 
			
		||||
	}
 | 
			
		||||
	lLen := len(src) - anchor
 | 
			
		||||
	if lLen < 0xF {
 | 
			
		||||
		dst[di] = byte(lLen << 4)
 | 
			
		||||
	} else {
 | 
			
		||||
		dst[di] = 0xF0
 | 
			
		||||
		di++
 | 
			
		||||
		for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF {
 | 
			
		||||
		for lLen -= 0xF; lLen >= 0xFF && di < len(dst); lLen -= 0xFF {
 | 
			
		||||
			dst[di] = 0xFF
 | 
			
		||||
			di++
 | 
			
		||||
		}
 | 
			
		||||
		if di >= len(dst) {
 | 
			
		||||
			return 0, lz4errors.ErrInvalidSourceShortBuffer
 | 
			
		||||
		}
 | 
			
		||||
		dst[di] = byte(lLen)
 | 
			
		||||
	}
 | 
			
		||||
	di++
 | 
			
		||||
@@ -221,35 +275,43 @@ lastLiterals:
 | 
			
		||||
		// Incompressible.
 | 
			
		||||
		return 0, nil
 | 
			
		||||
	}
 | 
			
		||||
	if di+len(src)-anchor > len(dst) {
 | 
			
		||||
		return 0, lz4errors.ErrInvalidSourceShortBuffer
 | 
			
		||||
	}
 | 
			
		||||
	di += copy(dst[di:di+len(src)-anchor], src[anchor:])
 | 
			
		||||
	return di, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Pool of hash tables for CompressBlock.
 | 
			
		||||
var htPool = sync.Pool{
 | 
			
		||||
	New: func() interface{} {
 | 
			
		||||
		return new([htSize]int)
 | 
			
		||||
	},
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// blockHash hashes 4 bytes into a value < winSize.
 | 
			
		||||
func blockHashHC(x uint32) uint32 {
 | 
			
		||||
	const hasher uint32 = 2654435761 // Knuth multiplicative hash.
 | 
			
		||||
	return x * hasher >> (32 - winSizeLog)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CompressBlockHC compresses the source buffer src into the destination dst
 | 
			
		||||
// with max search depth (use 0 or negative value for no max).
 | 
			
		||||
//
 | 
			
		||||
// CompressBlockHC compression ratio is better than CompressBlock but it is also slower.
 | 
			
		||||
//
 | 
			
		||||
// The size of the compressed data is returned.
 | 
			
		||||
//
 | 
			
		||||
// If the destination buffer size is lower than CompressBlockBound and
 | 
			
		||||
// the compressed size is 0 and no error, then the data is incompressible.
 | 
			
		||||
//
 | 
			
		||||
// An error is returned if the destination buffer is too small.
 | 
			
		||||
func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) {
 | 
			
		||||
type CompressorHC struct {
 | 
			
		||||
	// hashTable: stores the last position found for a given hash
 | 
			
		||||
	// chainTable: stores previous positions for a given hash
 | 
			
		||||
	hashTable, chainTable [htSize]int
 | 
			
		||||
	needsReset            bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var compressorHCPool = sync.Pool{New: func() interface{} { return new(CompressorHC) }}
 | 
			
		||||
 | 
			
		||||
func CompressBlockHC(src, dst []byte, depth CompressionLevel) (int, error) {
 | 
			
		||||
	c := compressorHCPool.Get().(*CompressorHC)
 | 
			
		||||
	n, err := c.CompressBlock(src, dst, depth)
 | 
			
		||||
	compressorHCPool.Put(c)
 | 
			
		||||
	return n, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CompressorHC) CompressBlock(src, dst []byte, depth CompressionLevel) (_ int, err error) {
 | 
			
		||||
	if c.needsReset {
 | 
			
		||||
		// Zero out reused table to avoid non-deterministic output (issue #65).
 | 
			
		||||
		c.hashTable = [htSize]int{}
 | 
			
		||||
		c.chainTable = [htSize]int{}
 | 
			
		||||
	}
 | 
			
		||||
	c.needsReset = true // Only false on first call.
 | 
			
		||||
 | 
			
		||||
	defer recoverBlock(&err)
 | 
			
		||||
 | 
			
		||||
	// Return 0, nil only if the destination buffer size is < CompressBlockBound.
 | 
			
		||||
@@ -261,20 +323,15 @@ func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) {
 | 
			
		||||
	const adaptSkipLog = 7
 | 
			
		||||
 | 
			
		||||
	var si, di, anchor int
 | 
			
		||||
 | 
			
		||||
	// hashTable: stores the last position found for a given hash
 | 
			
		||||
	// chainTable: stores previous positions for a given hash
 | 
			
		||||
	var hashTable, chainTable [winSize]int
 | 
			
		||||
 | 
			
		||||
	if depth <= 0 {
 | 
			
		||||
		depth = winSize
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	sn := len(src) - mfLimit
 | 
			
		||||
	if sn <= 0 {
 | 
			
		||||
		goto lastLiterals
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if depth == 0 {
 | 
			
		||||
		depth = winSize
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for si < sn {
 | 
			
		||||
		// Hash the next 4 bytes (sequence).
 | 
			
		||||
		match := binary.LittleEndian.Uint32(src[si:])
 | 
			
		||||
@@ -283,7 +340,7 @@ func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) {
 | 
			
		||||
		// Follow the chain until out of window and give the longest match.
 | 
			
		||||
		mLen := 0
 | 
			
		||||
		offset := 0
 | 
			
		||||
		for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] {
 | 
			
		||||
		for next, try := c.hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next, try = c.chainTable[next&winMask], try-1 {
 | 
			
		||||
			// The first (mLen==0) or next byte (mLen>=minMatch) at current match length
 | 
			
		||||
			// must match to improve on the match length.
 | 
			
		||||
			if src[next+mLen] != src[si+mLen] {
 | 
			
		||||
@@ -309,10 +366,9 @@ func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) {
 | 
			
		||||
			mLen = ml
 | 
			
		||||
			offset = si - next
 | 
			
		||||
			// Try another previous position with the same hash.
 | 
			
		||||
			try--
 | 
			
		||||
		}
 | 
			
		||||
		chainTable[si&winMask] = hashTable[h]
 | 
			
		||||
		hashTable[h] = si
 | 
			
		||||
		c.chainTable[si&winMask] = c.hashTable[h]
 | 
			
		||||
		c.hashTable[h] = si
 | 
			
		||||
 | 
			
		||||
		// No match found.
 | 
			
		||||
		if mLen == 0 {
 | 
			
		||||
@@ -331,8 +387,8 @@ func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) {
 | 
			
		||||
			match >>= 8
 | 
			
		||||
			match |= uint32(src[si+3]) << 24
 | 
			
		||||
			h := blockHashHC(match)
 | 
			
		||||
			chainTable[si&winMask] = hashTable[h]
 | 
			
		||||
			hashTable[h] = si
 | 
			
		||||
			c.chainTable[si&winMask] = c.hashTable[h]
 | 
			
		||||
			c.hashTable[h] = si
 | 
			
		||||
			si++
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										88
									
								
								vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										88
									
								
								vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,88 @@
 | 
			
		||||
// Package lz4block provides LZ4 BlockSize types and pools of buffers.
 | 
			
		||||
package lz4block
 | 
			
		||||
 | 
			
		||||
import "sync"
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	Block64Kb uint32 = 1 << (16 + iota*2)
 | 
			
		||||
	Block256Kb
 | 
			
		||||
	Block1Mb
 | 
			
		||||
	Block4Mb
 | 
			
		||||
	Block8Mb        = 2 * Block4Mb
 | 
			
		||||
	legacyBlockSize = Block8Mb + Block8Mb/255 + 16 // CompressBound(Block8Mb)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	BlockPool64K  = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }}
 | 
			
		||||
	BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }}
 | 
			
		||||
	BlockPool1M   = sync.Pool{New: func() interface{} { return make([]byte, Block1Mb) }}
 | 
			
		||||
	BlockPool4M   = sync.Pool{New: func() interface{} { return make([]byte, Block4Mb) }}
 | 
			
		||||
	BlockPool8M   = sync.Pool{New: func() interface{} { return make([]byte, legacyBlockSize) }}
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func Index(b uint32) BlockSizeIndex {
 | 
			
		||||
	switch b {
 | 
			
		||||
	case Block64Kb:
 | 
			
		||||
		return 4
 | 
			
		||||
	case Block256Kb:
 | 
			
		||||
		return 5
 | 
			
		||||
	case Block1Mb:
 | 
			
		||||
		return 6
 | 
			
		||||
	case Block4Mb:
 | 
			
		||||
		return 7
 | 
			
		||||
	case Block8Mb: // only valid in legacy mode
 | 
			
		||||
		return 3
 | 
			
		||||
	}
 | 
			
		||||
	return 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func IsValid(b uint32) bool {
 | 
			
		||||
	return Index(b) > 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BlockSizeIndex uint8
 | 
			
		||||
 | 
			
		||||
func (b BlockSizeIndex) IsValid() bool {
 | 
			
		||||
	switch b {
 | 
			
		||||
	case 4, 5, 6, 7:
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b BlockSizeIndex) Get() []byte {
 | 
			
		||||
	var buf interface{}
 | 
			
		||||
	switch b {
 | 
			
		||||
	case 4:
 | 
			
		||||
		buf = BlockPool64K.Get()
 | 
			
		||||
	case 5:
 | 
			
		||||
		buf = BlockPool256K.Get()
 | 
			
		||||
	case 6:
 | 
			
		||||
		buf = BlockPool1M.Get()
 | 
			
		||||
	case 7:
 | 
			
		||||
		buf = BlockPool4M.Get()
 | 
			
		||||
	case 3:
 | 
			
		||||
		buf = BlockPool8M.Get()
 | 
			
		||||
	}
 | 
			
		||||
	return buf.([]byte)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Put(buf []byte) {
 | 
			
		||||
	// Safeguard: do not allow invalid buffers.
 | 
			
		||||
	switch c := cap(buf); uint32(c) {
 | 
			
		||||
	case Block64Kb:
 | 
			
		||||
		BlockPool64K.Put(buf[:c])
 | 
			
		||||
	case Block256Kb:
 | 
			
		||||
		BlockPool256K.Put(buf[:c])
 | 
			
		||||
	case Block1Mb:
 | 
			
		||||
		BlockPool1M.Put(buf[:c])
 | 
			
		||||
	case Block4Mb:
 | 
			
		||||
		BlockPool4M.Put(buf[:c])
 | 
			
		||||
	case legacyBlockSize:
 | 
			
		||||
		BlockPool8M.Put(buf[:c])
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type CompressionLevel uint32
 | 
			
		||||
 | 
			
		||||
const Fast CompressionLevel = 0
 | 
			
		||||
@@ -26,6 +26,8 @@ TEXT ·decodeBlock(SB), NOSPLIT, $64-56
 | 
			
		||||
 | 
			
		||||
	MOVQ src_base+24(FP), SI
 | 
			
		||||
	MOVQ src_len+32(FP), R9
 | 
			
		||||
	CMPQ R9, $0
 | 
			
		||||
	JE   err_corrupt
 | 
			
		||||
	ADDQ SI, R9
 | 
			
		||||
 | 
			
		||||
	// shortcut ends
 | 
			
		||||
@@ -109,8 +111,7 @@ loop:
 | 
			
		||||
	MOVW 16(AX), BX
 | 
			
		||||
	MOVW BX, 16(DI)
 | 
			
		||||
 | 
			
		||||
	ADDQ $4, DI // minmatch
 | 
			
		||||
	ADDQ CX, DI
 | 
			
		||||
	LEAQ 4(DI)(CX*1), DI // minmatch
 | 
			
		||||
 | 
			
		||||
	// shortcut complete, load next token
 | 
			
		||||
	JMP loop
 | 
			
		||||
@@ -128,8 +129,7 @@ lit_len_loop:
 | 
			
		||||
	JNE lit_len_finalise
 | 
			
		||||
 | 
			
		||||
	// bounds check src[si+1]
 | 
			
		||||
	MOVQ SI, AX
 | 
			
		||||
	ADDQ $1, AX
 | 
			
		||||
	LEAQ 1(SI), AX
 | 
			
		||||
	CMPQ AX, R9
 | 
			
		||||
	JGT err_short_buf
 | 
			
		||||
 | 
			
		||||
@@ -147,13 +147,11 @@ lit_len_finalise:
 | 
			
		||||
 | 
			
		||||
copy_literal:
 | 
			
		||||
	// bounds check src and dst
 | 
			
		||||
	MOVQ SI, AX
 | 
			
		||||
	ADDQ CX, AX
 | 
			
		||||
	LEAQ (SI)(CX*1), AX
 | 
			
		||||
	CMPQ AX, R9
 | 
			
		||||
	JGT err_short_buf
 | 
			
		||||
 | 
			
		||||
	MOVQ DI, AX
 | 
			
		||||
	ADDQ CX, AX
 | 
			
		||||
	LEAQ (DI)(CX*1), AX
 | 
			
		||||
	CMPQ AX, R8
 | 
			
		||||
	JGT err_short_buf
 | 
			
		||||
 | 
			
		||||
@@ -219,8 +217,7 @@ offset:
 | 
			
		||||
	// free up DX to use for offset
 | 
			
		||||
	MOVQ DX, CX
 | 
			
		||||
 | 
			
		||||
	MOVQ SI, AX
 | 
			
		||||
	ADDQ $2, AX
 | 
			
		||||
	LEAQ 2(SI), AX
 | 
			
		||||
	CMPQ AX, R9
 | 
			
		||||
	JGT err_short_buf
 | 
			
		||||
 | 
			
		||||
@@ -247,8 +244,7 @@ match_len_loop:
 | 
			
		||||
	JNE match_len_finalise
 | 
			
		||||
 | 
			
		||||
	// bounds check src[si+1]
 | 
			
		||||
	MOVQ SI, AX
 | 
			
		||||
	ADDQ $1, AX
 | 
			
		||||
	LEAQ 1(SI), AX
 | 
			
		||||
	CMPQ AX, R9
 | 
			
		||||
	JGT err_short_buf
 | 
			
		||||
 | 
			
		||||
@@ -269,8 +265,7 @@ copy_match:
 | 
			
		||||
 | 
			
		||||
	// check we have match_len bytes left in dst
 | 
			
		||||
	// di+match_len < len(dst)
 | 
			
		||||
	MOVQ DI, AX
 | 
			
		||||
	ADDQ CX, AX
 | 
			
		||||
	LEAQ (DI)(CX*1), AX
 | 
			
		||||
	CMPQ AX, R8
 | 
			
		||||
	JGT err_short_buf
 | 
			
		||||
 | 
			
		||||
@@ -286,8 +281,7 @@ copy_match:
 | 
			
		||||
	JLT err_short_buf
 | 
			
		||||
 | 
			
		||||
	// if offset + match_len < di
 | 
			
		||||
	MOVQ BX, AX
 | 
			
		||||
	ADDQ CX, AX
 | 
			
		||||
	LEAQ (BX)(CX*1), AX
 | 
			
		||||
	CMPQ DI, AX
 | 
			
		||||
	JGT copy_interior_match
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										201
									
								
								vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										201
									
								
								vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,201 @@
 | 
			
		||||
// +build gc
 | 
			
		||||
// +build !noasm
 | 
			
		||||
 | 
			
		||||
#include "textflag.h"
 | 
			
		||||
 | 
			
		||||
// Register allocation.
 | 
			
		||||
#define dst	R0
 | 
			
		||||
#define dstorig	R1
 | 
			
		||||
#define src	R2
 | 
			
		||||
#define dstend	R3
 | 
			
		||||
#define srcend	R4
 | 
			
		||||
#define match	R5	// Match address.
 | 
			
		||||
#define token	R6
 | 
			
		||||
#define len	R7	// Literal and match lengths.
 | 
			
		||||
#define offset	R6	// Match offset; overlaps with token.
 | 
			
		||||
#define tmp1	R8
 | 
			
		||||
#define tmp2	R9
 | 
			
		||||
#define tmp3	R12
 | 
			
		||||
 | 
			
		||||
#define minMatch	$4
 | 
			
		||||
 | 
			
		||||
// func decodeBlock(dst, src []byte) int
 | 
			
		||||
TEXT ·decodeBlock(SB), NOFRAME|NOSPLIT, $-4-28
 | 
			
		||||
	MOVW dst_base +0(FP), dst
 | 
			
		||||
	MOVW dst_len  +4(FP), dstend
 | 
			
		||||
	MOVW src_base+12(FP), src
 | 
			
		||||
	MOVW src_len +16(FP), srcend
 | 
			
		||||
 | 
			
		||||
	CMP $0, srcend
 | 
			
		||||
	BEQ shortSrc
 | 
			
		||||
 | 
			
		||||
	ADD dst, dstend
 | 
			
		||||
	ADD src, srcend
 | 
			
		||||
 | 
			
		||||
	MOVW dst, dstorig
 | 
			
		||||
 | 
			
		||||
loop:
 | 
			
		||||
	// Read token. Extract literal length.
 | 
			
		||||
	MOVBU.P 1(src), token
 | 
			
		||||
	MOVW    token >> 4, len
 | 
			
		||||
	CMP     $15, len
 | 
			
		||||
	BNE     readLitlenDone
 | 
			
		||||
 | 
			
		||||
readLitlenLoop:
 | 
			
		||||
	CMP     src, srcend
 | 
			
		||||
	BEQ     shortSrc
 | 
			
		||||
	MOVBU.P 1(src), tmp1
 | 
			
		||||
	ADD     tmp1, len
 | 
			
		||||
	CMP     $255, tmp1
 | 
			
		||||
	BEQ     readLitlenLoop
 | 
			
		||||
 | 
			
		||||
readLitlenDone:
 | 
			
		||||
	CMP $0, len
 | 
			
		||||
	BEQ copyLiteralDone
 | 
			
		||||
 | 
			
		||||
	// Bounds check dst+len and src+len.
 | 
			
		||||
	ADD    dst, len, tmp1
 | 
			
		||||
	CMP    dstend, tmp1
 | 
			
		||||
	//BHI  shortDst	// Uncomment for distinct error codes.
 | 
			
		||||
	ADD    src, len, tmp2
 | 
			
		||||
	CMP.LS srcend, tmp2
 | 
			
		||||
	BHI    shortSrc
 | 
			
		||||
 | 
			
		||||
	// Copy literal.
 | 
			
		||||
	CMP $4, len
 | 
			
		||||
	BLO copyLiteralFinish
 | 
			
		||||
 | 
			
		||||
	// Copy 0-3 bytes until src is aligned.
 | 
			
		||||
	TST        $1, src
 | 
			
		||||
	MOVBU.NE.P 1(src), tmp1
 | 
			
		||||
	MOVB.NE.P  tmp1, 1(dst)
 | 
			
		||||
	SUB.NE     $1, len
 | 
			
		||||
 | 
			
		||||
	TST        $2, src
 | 
			
		||||
	MOVHU.NE.P 2(src), tmp2
 | 
			
		||||
	MOVB.NE.P  tmp2, 1(dst)
 | 
			
		||||
	MOVW.NE    tmp2 >> 8, tmp1
 | 
			
		||||
	MOVB.NE.P  tmp1, 1(dst)
 | 
			
		||||
	SUB.NE     $2, len
 | 
			
		||||
 | 
			
		||||
	B copyLiteralLoopCond
 | 
			
		||||
 | 
			
		||||
copyLiteralLoop:
 | 
			
		||||
	// Aligned load, unaligned write.
 | 
			
		||||
	MOVW.P 4(src), tmp1
 | 
			
		||||
	MOVW   tmp1 >>  8, tmp2
 | 
			
		||||
	MOVB   tmp2, 1(dst)
 | 
			
		||||
	MOVW   tmp1 >> 16, tmp3
 | 
			
		||||
	MOVB   tmp3, 2(dst)
 | 
			
		||||
	MOVW   tmp1 >> 24, tmp2
 | 
			
		||||
	MOVB   tmp2, 3(dst)
 | 
			
		||||
	MOVB.P tmp1, 4(dst)
 | 
			
		||||
copyLiteralLoopCond:
 | 
			
		||||
	// Loop until len-4 < 0.
 | 
			
		||||
	SUB.S  $4, len
 | 
			
		||||
	BPL    copyLiteralLoop
 | 
			
		||||
 | 
			
		||||
	// Restore len, which is now negative.
 | 
			
		||||
	ADD $4, len
 | 
			
		||||
 | 
			
		||||
copyLiteralFinish:
 | 
			
		||||
	// Copy remaining 0-3 bytes.
 | 
			
		||||
	TST        $2, len
 | 
			
		||||
	MOVHU.NE.P 2(src), tmp2
 | 
			
		||||
	MOVB.NE.P  tmp2, 1(dst)
 | 
			
		||||
	MOVW.NE    tmp2 >> 8, tmp1
 | 
			
		||||
	MOVB.NE.P  tmp1, 1(dst)
 | 
			
		||||
	TST        $1, len
 | 
			
		||||
	MOVBU.NE.P 1(src), tmp1
 | 
			
		||||
	MOVB.NE.P  tmp1, 1(dst)
 | 
			
		||||
 | 
			
		||||
copyLiteralDone:
 | 
			
		||||
	CMP src, srcend
 | 
			
		||||
	BEQ end
 | 
			
		||||
 | 
			
		||||
	// Initial part of match length.
 | 
			
		||||
	// This frees up the token register for reuse as offset.
 | 
			
		||||
	AND $15, token, len
 | 
			
		||||
 | 
			
		||||
	// Read offset.
 | 
			
		||||
	ADD   $2, src
 | 
			
		||||
	CMP   srcend, src
 | 
			
		||||
	BHI   shortSrc
 | 
			
		||||
	MOVBU -2(src), offset
 | 
			
		||||
	MOVBU -1(src), tmp1
 | 
			
		||||
	ORR   tmp1 << 8, offset
 | 
			
		||||
	CMP   $0, offset
 | 
			
		||||
	BEQ   corrupt
 | 
			
		||||
 | 
			
		||||
	// Read rest of match length.
 | 
			
		||||
	CMP $15, len
 | 
			
		||||
	BNE readMatchlenDone
 | 
			
		||||
 | 
			
		||||
readMatchlenLoop:
 | 
			
		||||
	CMP     src, srcend
 | 
			
		||||
	BEQ     shortSrc
 | 
			
		||||
	MOVBU.P 1(src), tmp1
 | 
			
		||||
	ADD     tmp1, len
 | 
			
		||||
	CMP     $255, tmp1
 | 
			
		||||
	BEQ     readMatchlenLoop
 | 
			
		||||
 | 
			
		||||
readMatchlenDone:
 | 
			
		||||
	ADD minMatch, len
 | 
			
		||||
 | 
			
		||||
	// Bounds check dst+len and match = dst-offset.
 | 
			
		||||
	ADD    dst, len, tmp1
 | 
			
		||||
	CMP    dstend, tmp1
 | 
			
		||||
	//BHI  shortDst	// Uncomment for distinct error codes.
 | 
			
		||||
	SUB    offset, dst, match
 | 
			
		||||
	CMP.LS match, dstorig
 | 
			
		||||
	BHI    corrupt
 | 
			
		||||
 | 
			
		||||
	// If the offset is at least four (len is, because of minMatch),
 | 
			
		||||
	// do a four-way unrolled byte copy loop. Using MOVD instead of four
 | 
			
		||||
	// byte loads is much faster, but to remain portable we'd have to
 | 
			
		||||
	// align match first, which in turn is too expensive.
 | 
			
		||||
	CMP $4, offset
 | 
			
		||||
	BLO copyMatch
 | 
			
		||||
 | 
			
		||||
	SUB $4, len
 | 
			
		||||
copyMatch4:
 | 
			
		||||
	MOVBU.P 4(match), tmp1
 | 
			
		||||
	MOVB.P  tmp1, 4(dst)
 | 
			
		||||
	MOVBU   -3(match), tmp2
 | 
			
		||||
	MOVB    tmp2, -3(dst)
 | 
			
		||||
	MOVBU   -2(match), tmp3
 | 
			
		||||
	MOVB    tmp3, -2(dst)
 | 
			
		||||
	MOVBU   -1(match), tmp1
 | 
			
		||||
	MOVB    tmp1, -1(dst)
 | 
			
		||||
	SUB.S   $4, len
 | 
			
		||||
	BPL     copyMatch4
 | 
			
		||||
 | 
			
		||||
	// Restore len, which is now negative.
 | 
			
		||||
	ADD.S $4, len
 | 
			
		||||
	BEQ   copyMatchDone
 | 
			
		||||
 | 
			
		||||
copyMatch:
 | 
			
		||||
	// Simple byte-at-a-time copy.
 | 
			
		||||
	SUB.S   $1, len
 | 
			
		||||
	MOVBU.P 1(match), tmp2
 | 
			
		||||
	MOVB.P  tmp2, 1(dst)
 | 
			
		||||
	BNE     copyMatch
 | 
			
		||||
 | 
			
		||||
copyMatchDone:
 | 
			
		||||
	CMP src, srcend
 | 
			
		||||
	BNE loop
 | 
			
		||||
 | 
			
		||||
end:
 | 
			
		||||
	SUB  dstorig, dst, tmp1
 | 
			
		||||
	MOVW tmp1, ret+24(FP)
 | 
			
		||||
	RET
 | 
			
		||||
 | 
			
		||||
	// The three error cases have distinct labels so we can put different
 | 
			
		||||
	// return codes here when debugging, or if the error returns need to
 | 
			
		||||
	// be changed.
 | 
			
		||||
shortDst:
 | 
			
		||||
shortSrc:
 | 
			
		||||
corrupt:
 | 
			
		||||
	MOVW $-1, tmp1
 | 
			
		||||
	MOVW tmp1, ret+24(FP)
 | 
			
		||||
	RET
 | 
			
		||||
@@ -1,8 +1,9 @@
 | 
			
		||||
// +build amd64 arm
 | 
			
		||||
// +build !appengine
 | 
			
		||||
// +build gc
 | 
			
		||||
// +build !noasm
 | 
			
		||||
 | 
			
		||||
package lz4
 | 
			
		||||
package lz4block
 | 
			
		||||
 | 
			
		||||
//go:noescape
 | 
			
		||||
func decodeBlock(dst, src []byte) int
 | 
			
		||||
@@ -1,6 +1,6 @@
 | 
			
		||||
// +build !amd64 appengine !gc noasm
 | 
			
		||||
// +build !amd64,!arm appengine !gc noasm
 | 
			
		||||
 | 
			
		||||
package lz4
 | 
			
		||||
package lz4block
 | 
			
		||||
 | 
			
		||||
func decodeBlock(dst, src []byte) (ret int) {
 | 
			
		||||
	const hasError = -2
 | 
			
		||||
@@ -10,16 +10,16 @@ func decodeBlock(dst, src []byte) (ret int) {
 | 
			
		||||
		}
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	var si, di int
 | 
			
		||||
	var si, di uint
 | 
			
		||||
	for {
 | 
			
		||||
		// Literals and match lengths (token).
 | 
			
		||||
		b := int(src[si])
 | 
			
		||||
		b := uint(src[si])
 | 
			
		||||
		si++
 | 
			
		||||
 | 
			
		||||
		// Literals.
 | 
			
		||||
		if lLen := b >> 4; lLen > 0 {
 | 
			
		||||
			switch {
 | 
			
		||||
			case lLen < 0xF && si+16 < len(src):
 | 
			
		||||
			case lLen < 0xF && si+16 < uint(len(src)):
 | 
			
		||||
				// Shortcut 1
 | 
			
		||||
				// if we have enough room in src and dst, and the literals length
 | 
			
		||||
				// is small enough (0..14) then copy all 16 bytes, even if not all
 | 
			
		||||
@@ -32,13 +32,13 @@ func decodeBlock(dst, src []byte) (ret int) {
 | 
			
		||||
					// if the match length (4..18) fits within the literals, then copy
 | 
			
		||||
					// all 18 bytes, even if not all are part of the literals.
 | 
			
		||||
					mLen += 4
 | 
			
		||||
					if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset {
 | 
			
		||||
					if offset := uint(src[si]) | uint(src[si+1])<<8; mLen <= offset {
 | 
			
		||||
						i := di - offset
 | 
			
		||||
						end := i + 18
 | 
			
		||||
						if end > len(dst) {
 | 
			
		||||
						if end > uint(len(dst)) {
 | 
			
		||||
							// The remaining buffer may not hold 18 bytes.
 | 
			
		||||
							// See https://github.com/pierrec/lz4/issues/51.
 | 
			
		||||
							end = len(dst)
 | 
			
		||||
							end = uint(len(dst))
 | 
			
		||||
						}
 | 
			
		||||
						copy(dst[di:], dst[i:end])
 | 
			
		||||
						si += 2
 | 
			
		||||
@@ -51,7 +51,7 @@ func decodeBlock(dst, src []byte) (ret int) {
 | 
			
		||||
					lLen += 0xFF
 | 
			
		||||
					si++
 | 
			
		||||
				}
 | 
			
		||||
				lLen += int(src[si])
 | 
			
		||||
				lLen += uint(src[si])
 | 
			
		||||
				si++
 | 
			
		||||
				fallthrough
 | 
			
		||||
			default:
 | 
			
		||||
@@ -60,11 +60,13 @@ func decodeBlock(dst, src []byte) (ret int) {
 | 
			
		||||
				di += lLen
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		if si >= len(src) {
 | 
			
		||||
			return di
 | 
			
		||||
		if si == uint(len(src)) {
 | 
			
		||||
			return int(di)
 | 
			
		||||
		} else if si > uint(len(src)) {
 | 
			
		||||
			return hasError
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		offset := int(src[si]) | int(src[si+1])<<8
 | 
			
		||||
		offset := uint(src[si]) | uint(src[si+1])<<8
 | 
			
		||||
		if offset == 0 {
 | 
			
		||||
			return hasError
 | 
			
		||||
		}
 | 
			
		||||
@@ -77,7 +79,7 @@ func decodeBlock(dst, src []byte) (ret int) {
 | 
			
		||||
				mLen += 0xFF
 | 
			
		||||
				si++
 | 
			
		||||
			}
 | 
			
		||||
			mLen += int(src[si])
 | 
			
		||||
			mLen += uint(src[si])
 | 
			
		||||
			si++
 | 
			
		||||
		}
 | 
			
		||||
		mLen += minMatch
 | 
			
		||||
@@ -93,6 +95,6 @@ func decodeBlock(dst, src []byte) (ret int) {
 | 
			
		||||
			di += bytesToCopy
 | 
			
		||||
			mLen -= bytesToCopy
 | 
			
		||||
		}
 | 
			
		||||
		di += copy(dst[di:di+mLen], expanded[:mLen])
 | 
			
		||||
		di += uint(copy(dst[di:di+mLen], expanded[:mLen]))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										19
									
								
								vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,19 @@
 | 
			
		||||
package lz4errors
 | 
			
		||||
 | 
			
		||||
type Error string
 | 
			
		||||
 | 
			
		||||
func (e Error) Error() string { return string(e) }
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	ErrInvalidSourceShortBuffer      Error = "lz4: invalid source or destination buffer too short"
 | 
			
		||||
	ErrInvalidFrame                  Error = "lz4: bad magic number"
 | 
			
		||||
	ErrInternalUnhandledState        Error = "lz4: unhandled state"
 | 
			
		||||
	ErrInvalidHeaderChecksum         Error = "lz4: invalid header checksum"
 | 
			
		||||
	ErrInvalidBlockChecksum          Error = "lz4: invalid block checksum"
 | 
			
		||||
	ErrInvalidFrameChecksum          Error = "lz4: invalid frame checksum"
 | 
			
		||||
	ErrOptionInvalidCompressionLevel Error = "lz4: invalid compression level"
 | 
			
		||||
	ErrOptionClosedOrError           Error = "lz4: cannot apply options on closed or in error object"
 | 
			
		||||
	ErrOptionInvalidBlockSize        Error = "lz4: invalid block size"
 | 
			
		||||
	ErrOptionNotApplicable           Error = "lz4: option not applicable"
 | 
			
		||||
	ErrWriterNotClosed               Error = "lz4: writer not closed"
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										331
									
								
								vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										331
									
								
								vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,331 @@
 | 
			
		||||
package lz4stream
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/binary"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io"
 | 
			
		||||
	"sync"
 | 
			
		||||
 | 
			
		||||
	"github.com/pierrec/lz4/v4/internal/lz4block"
 | 
			
		||||
	"github.com/pierrec/lz4/v4/internal/lz4errors"
 | 
			
		||||
	"github.com/pierrec/lz4/v4/internal/xxh32"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type Blocks struct {
 | 
			
		||||
	Block  *FrameDataBlock
 | 
			
		||||
	Blocks chan chan *FrameDataBlock
 | 
			
		||||
	mu     sync.Mutex
 | 
			
		||||
	err    error
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *Blocks) initW(f *Frame, dst io.Writer, num int) {
 | 
			
		||||
	if num == 1 {
 | 
			
		||||
		b.Blocks = nil
 | 
			
		||||
		b.Block = NewFrameDataBlock(f)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	b.Block = nil
 | 
			
		||||
	if cap(b.Blocks) != num {
 | 
			
		||||
		b.Blocks = make(chan chan *FrameDataBlock, num)
 | 
			
		||||
	}
 | 
			
		||||
	// goroutine managing concurrent block compression goroutines.
 | 
			
		||||
	go func() {
 | 
			
		||||
		// Process next block compression item.
 | 
			
		||||
		for c := range b.Blocks {
 | 
			
		||||
			// Read the next compressed block result.
 | 
			
		||||
			// Waiting here ensures that the blocks are output in the order they were sent.
 | 
			
		||||
			// The incoming channel is always closed as it indicates to the caller that
 | 
			
		||||
			// the block has been processed.
 | 
			
		||||
			block := <-c
 | 
			
		||||
			if block == nil {
 | 
			
		||||
				// Notify the block compression routine that we are done with its result.
 | 
			
		||||
				// This is used when a sentinel block is sent to terminate the compression.
 | 
			
		||||
				close(c)
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
			// Do not attempt to write the block upon any previous failure.
 | 
			
		||||
			if b.err == nil {
 | 
			
		||||
				// Write the block.
 | 
			
		||||
				if err := block.Write(f, dst); err != nil {
 | 
			
		||||
					// Keep the first error.
 | 
			
		||||
					b.err = err
 | 
			
		||||
					// All pending compression goroutines need to shut down, so we need to keep going.
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			close(c)
 | 
			
		||||
		}
 | 
			
		||||
	}()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *Blocks) close(f *Frame, num int) error {
 | 
			
		||||
	if num == 1 {
 | 
			
		||||
		if b.Block != nil {
 | 
			
		||||
			b.Block.Close(f)
 | 
			
		||||
		}
 | 
			
		||||
		err := b.err
 | 
			
		||||
		b.err = nil
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	if b.Blocks == nil {
 | 
			
		||||
		// Not initialized yet.
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	c := make(chan *FrameDataBlock)
 | 
			
		||||
	b.Blocks <- c
 | 
			
		||||
	c <- nil
 | 
			
		||||
	<-c
 | 
			
		||||
	err := b.err
 | 
			
		||||
	b.err = nil
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ErrorR returns any error set while uncompressing a stream.
 | 
			
		||||
func (b *Blocks) ErrorR() error {
 | 
			
		||||
	b.mu.Lock()
 | 
			
		||||
	defer b.mu.Unlock()
 | 
			
		||||
	return b.err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// initR returns a channel that streams the uncompressed blocks if in concurrent
 | 
			
		||||
// mode and no error. When the channel is closed, check for any error with b.ErrorR.
 | 
			
		||||
//
 | 
			
		||||
// If not in concurrent mode, the uncompressed block is b.Block and the returned error
 | 
			
		||||
// needs to be checked.
 | 
			
		||||
func (b *Blocks) initR(f *Frame, num int, src io.Reader) (chan []byte, error) {
 | 
			
		||||
	size := f.Descriptor.Flags.BlockSizeIndex()
 | 
			
		||||
	if num == 1 {
 | 
			
		||||
		b.Blocks = nil
 | 
			
		||||
		b.Block = NewFrameDataBlock(f)
 | 
			
		||||
		return nil, nil
 | 
			
		||||
	}
 | 
			
		||||
	b.Block = nil
 | 
			
		||||
	blocks := make(chan chan []byte, num)
 | 
			
		||||
	// data receives the uncompressed blocks.
 | 
			
		||||
	data := make(chan []byte)
 | 
			
		||||
	// Read blocks from the source sequentially
 | 
			
		||||
	// and uncompress them concurrently.
 | 
			
		||||
 | 
			
		||||
	// In legacy mode, accrue the uncompress sizes in cum.
 | 
			
		||||
	var cum uint32
 | 
			
		||||
	go func() {
 | 
			
		||||
		var cumx uint32
 | 
			
		||||
		var err error
 | 
			
		||||
		for b.ErrorR() == nil {
 | 
			
		||||
			block := NewFrameDataBlock(f)
 | 
			
		||||
			cumx, err = block.Read(f, src, 0)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				break
 | 
			
		||||
			}
 | 
			
		||||
			// Recheck for an error as reading may be slow and uncompressing is expensive.
 | 
			
		||||
			if b.ErrorR() != nil {
 | 
			
		||||
				break
 | 
			
		||||
			}
 | 
			
		||||
			c := make(chan []byte)
 | 
			
		||||
			blocks <- c
 | 
			
		||||
			go func() {
 | 
			
		||||
				data, err := block.Uncompress(f, size.Get(), false)
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					b.closeR(err)
 | 
			
		||||
				} else {
 | 
			
		||||
					c <- data
 | 
			
		||||
				}
 | 
			
		||||
			}()
 | 
			
		||||
		}
 | 
			
		||||
		// End the collection loop and the data channel.
 | 
			
		||||
		c := make(chan []byte)
 | 
			
		||||
		blocks <- c
 | 
			
		||||
		c <- nil // signal the collection loop that we are done
 | 
			
		||||
		<-c      // wait for the collect loop to complete
 | 
			
		||||
		if f.isLegacy() && cum == cumx {
 | 
			
		||||
			err = io.EOF
 | 
			
		||||
		}
 | 
			
		||||
		b.closeR(err)
 | 
			
		||||
		close(data)
 | 
			
		||||
	}()
 | 
			
		||||
	// Collect the uncompressed blocks and make them available
 | 
			
		||||
	// on the returned channel.
 | 
			
		||||
	go func(leg bool) {
 | 
			
		||||
		defer close(blocks)
 | 
			
		||||
		for c := range blocks {
 | 
			
		||||
			buf := <-c
 | 
			
		||||
			if buf == nil {
 | 
			
		||||
				// Signal to end the loop.
 | 
			
		||||
				close(c)
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
			// Perform checksum now as the blocks are received in order.
 | 
			
		||||
			if f.Descriptor.Flags.ContentChecksum() {
 | 
			
		||||
				_, _ = f.checksum.Write(buf)
 | 
			
		||||
			}
 | 
			
		||||
			if leg {
 | 
			
		||||
				cum += uint32(len(buf))
 | 
			
		||||
			}
 | 
			
		||||
			data <- buf
 | 
			
		||||
			close(c)
 | 
			
		||||
		}
 | 
			
		||||
	}(f.isLegacy())
 | 
			
		||||
	return data, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// closeR safely sets the error on b if not already set.
 | 
			
		||||
func (b *Blocks) closeR(err error) {
 | 
			
		||||
	b.mu.Lock()
 | 
			
		||||
	if b.err == nil {
 | 
			
		||||
		b.err = err
 | 
			
		||||
	}
 | 
			
		||||
	b.mu.Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewFrameDataBlock(f *Frame) *FrameDataBlock {
 | 
			
		||||
	buf := f.Descriptor.Flags.BlockSizeIndex().Get()
 | 
			
		||||
	return &FrameDataBlock{Data: buf, data: buf}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type FrameDataBlock struct {
 | 
			
		||||
	Size     DataBlockSize
 | 
			
		||||
	Data     []byte // compressed or uncompressed data (.data or .src)
 | 
			
		||||
	Checksum uint32
 | 
			
		||||
	data     []byte // buffer for compressed data
 | 
			
		||||
	src      []byte // uncompressed data
 | 
			
		||||
	err      error  // used in concurrent mode
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *FrameDataBlock) Close(f *Frame) {
 | 
			
		||||
	b.Size = 0
 | 
			
		||||
	b.Checksum = 0
 | 
			
		||||
	b.err = nil
 | 
			
		||||
	if b.data != nil {
 | 
			
		||||
		// Block was not already closed.
 | 
			
		||||
		lz4block.Put(b.data)
 | 
			
		||||
		b.Data = nil
 | 
			
		||||
		b.data = nil
 | 
			
		||||
		b.src = nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Block compression errors are ignored since the buffer is sized appropriately.
 | 
			
		||||
func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.CompressionLevel) *FrameDataBlock {
 | 
			
		||||
	data := b.data
 | 
			
		||||
	if f.isLegacy() {
 | 
			
		||||
		data = data[:cap(data)]
 | 
			
		||||
	} else {
 | 
			
		||||
		data = data[:len(src)] // trigger the incompressible flag in CompressBlock
 | 
			
		||||
	}
 | 
			
		||||
	var n int
 | 
			
		||||
	switch level {
 | 
			
		||||
	case lz4block.Fast:
 | 
			
		||||
		n, _ = lz4block.CompressBlock(src, data)
 | 
			
		||||
	default:
 | 
			
		||||
		n, _ = lz4block.CompressBlockHC(src, data, level)
 | 
			
		||||
	}
 | 
			
		||||
	if n == 0 {
 | 
			
		||||
		b.Size.UncompressedSet(true)
 | 
			
		||||
		b.Data = src
 | 
			
		||||
	} else {
 | 
			
		||||
		b.Size.UncompressedSet(false)
 | 
			
		||||
		b.Data = data[:n]
 | 
			
		||||
	}
 | 
			
		||||
	b.Size.sizeSet(len(b.Data))
 | 
			
		||||
	b.src = src // keep track of the source for content checksum
 | 
			
		||||
 | 
			
		||||
	if f.Descriptor.Flags.BlockChecksum() {
 | 
			
		||||
		b.Checksum = xxh32.ChecksumZero(src)
 | 
			
		||||
	}
 | 
			
		||||
	return b
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *FrameDataBlock) Write(f *Frame, dst io.Writer) error {
 | 
			
		||||
	// Write is called in the same order as blocks are compressed,
 | 
			
		||||
	// so content checksum must be done here.
 | 
			
		||||
	if f.Descriptor.Flags.ContentChecksum() {
 | 
			
		||||
		_, _ = f.checksum.Write(b.src)
 | 
			
		||||
	}
 | 
			
		||||
	buf := f.buf[:]
 | 
			
		||||
	binary.LittleEndian.PutUint32(buf, uint32(b.Size))
 | 
			
		||||
	if _, err := dst.Write(buf[:4]); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if _, err := dst.Write(b.Data); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if b.Checksum == 0 {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	binary.LittleEndian.PutUint32(buf, b.Checksum)
 | 
			
		||||
	_, err := dst.Write(buf[:4])
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Read updates b with the next block data, size and checksum if available.
 | 
			
		||||
func (b *FrameDataBlock) Read(f *Frame, src io.Reader, cum uint32) (uint32, error) {
 | 
			
		||||
	x, err := f.readUint32(src)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return 0, err
 | 
			
		||||
	}
 | 
			
		||||
	if f.isLegacy() {
 | 
			
		||||
		switch x {
 | 
			
		||||
		case frameMagicLegacy:
 | 
			
		||||
			// Concatenated legacy frame.
 | 
			
		||||
			return b.Read(f, src, cum)
 | 
			
		||||
		case cum:
 | 
			
		||||
			// Only works in non concurrent mode, for concurrent mode
 | 
			
		||||
			// it is handled separately.
 | 
			
		||||
			// Linux kernel format appends the total uncompressed size at the end.
 | 
			
		||||
			return 0, io.EOF
 | 
			
		||||
		}
 | 
			
		||||
	} else if x == 0 {
 | 
			
		||||
		// Marker for end of stream.
 | 
			
		||||
		return 0, io.EOF
 | 
			
		||||
	}
 | 
			
		||||
	b.Size = DataBlockSize(x)
 | 
			
		||||
 | 
			
		||||
	size := b.Size.size()
 | 
			
		||||
	if size > cap(b.data) {
 | 
			
		||||
		return x, lz4errors.ErrOptionInvalidBlockSize
 | 
			
		||||
	}
 | 
			
		||||
	b.data = b.data[:size]
 | 
			
		||||
	if _, err := io.ReadFull(src, b.data); err != nil {
 | 
			
		||||
		return x, err
 | 
			
		||||
	}
 | 
			
		||||
	if f.Descriptor.Flags.BlockChecksum() {
 | 
			
		||||
		sum, err := f.readUint32(src)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return 0, err
 | 
			
		||||
		}
 | 
			
		||||
		b.Checksum = sum
 | 
			
		||||
	}
 | 
			
		||||
	return x, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *FrameDataBlock) Uncompress(f *Frame, dst []byte, sum bool) ([]byte, error) {
 | 
			
		||||
	if b.Size.Uncompressed() {
 | 
			
		||||
		n := copy(dst, b.data)
 | 
			
		||||
		dst = dst[:n]
 | 
			
		||||
	} else {
 | 
			
		||||
		n, err := lz4block.UncompressBlock(b.data, dst)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		dst = dst[:n]
 | 
			
		||||
	}
 | 
			
		||||
	if f.Descriptor.Flags.BlockChecksum() {
 | 
			
		||||
		if c := xxh32.ChecksumZero(dst); c != b.Checksum {
 | 
			
		||||
			err := fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidBlockChecksum, c, b.Checksum)
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if sum && f.Descriptor.Flags.ContentChecksum() {
 | 
			
		||||
		_, _ = f.checksum.Write(dst)
 | 
			
		||||
	}
 | 
			
		||||
	return dst, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (f *Frame) readUint32(r io.Reader) (x uint32, err error) {
 | 
			
		||||
	if _, err = io.ReadFull(r, f.buf[:4]); err != nil {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	x = binary.LittleEndian.Uint32(f.buf[:4])
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										200
									
								
								vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										200
									
								
								vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,200 @@
 | 
			
		||||
// Package lz4stream provides the types that support reading and writing LZ4 data streams.
 | 
			
		||||
package lz4stream
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/binary"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io"
 | 
			
		||||
	"io/ioutil"
 | 
			
		||||
 | 
			
		||||
	"github.com/pierrec/lz4/v4/internal/lz4block"
 | 
			
		||||
	"github.com/pierrec/lz4/v4/internal/lz4errors"
 | 
			
		||||
	"github.com/pierrec/lz4/v4/internal/xxh32"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
//go:generate go run gen.go
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	frameMagic       uint32 = 0x184D2204
 | 
			
		||||
	frameSkipMagic   uint32 = 0x184D2A50
 | 
			
		||||
	frameMagicLegacy uint32 = 0x184C2102
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func NewFrame() *Frame {
 | 
			
		||||
	return &Frame{}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Frame struct {
 | 
			
		||||
	buf        [15]byte // frame descriptor needs at most 4(magic)+4+8+1=11 bytes
 | 
			
		||||
	Magic      uint32
 | 
			
		||||
	Descriptor FrameDescriptor
 | 
			
		||||
	Blocks     Blocks
 | 
			
		||||
	Checksum   uint32
 | 
			
		||||
	checksum   xxh32.XXHZero
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Reset allows reusing the Frame.
 | 
			
		||||
// The Descriptor configuration is not modified.
 | 
			
		||||
func (f *Frame) Reset(num int) {
 | 
			
		||||
	f.Magic = 0
 | 
			
		||||
	f.Descriptor.Checksum = 0
 | 
			
		||||
	f.Descriptor.ContentSize = 0
 | 
			
		||||
	_ = f.Blocks.close(f, num)
 | 
			
		||||
	f.Checksum = 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (f *Frame) InitW(dst io.Writer, num int, legacy bool) {
 | 
			
		||||
	if legacy {
 | 
			
		||||
		f.Magic = frameMagicLegacy
 | 
			
		||||
		idx := lz4block.Index(lz4block.Block8Mb)
 | 
			
		||||
		f.Descriptor.Flags.BlockSizeIndexSet(idx)
 | 
			
		||||
	} else {
 | 
			
		||||
		f.Magic = frameMagic
 | 
			
		||||
		f.Descriptor.initW()
 | 
			
		||||
	}
 | 
			
		||||
	f.Blocks.initW(f, dst, num)
 | 
			
		||||
	f.checksum.Reset()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (f *Frame) CloseW(dst io.Writer, num int) error {
 | 
			
		||||
	if err := f.Blocks.close(f, num); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	if f.isLegacy() {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	buf := f.buf[:0]
 | 
			
		||||
	// End mark (data block size of uint32(0)).
 | 
			
		||||
	buf = append(buf, 0, 0, 0, 0)
 | 
			
		||||
	if f.Descriptor.Flags.ContentChecksum() {
 | 
			
		||||
		buf = f.checksum.Sum(buf)
 | 
			
		||||
	}
 | 
			
		||||
	_, err := dst.Write(buf)
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (f *Frame) isLegacy() bool {
 | 
			
		||||
	return f.Magic == frameMagicLegacy
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (f *Frame) InitR(src io.Reader, num int) (chan []byte, error) {
 | 
			
		||||
	if f.Magic > 0 {
 | 
			
		||||
		// Header already read.
 | 
			
		||||
		return nil, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
newFrame:
 | 
			
		||||
	var err error
 | 
			
		||||
	if f.Magic, err = f.readUint32(src); err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	switch m := f.Magic; {
 | 
			
		||||
	case m == frameMagic || m == frameMagicLegacy:
 | 
			
		||||
	// All 16 values of frameSkipMagic are valid.
 | 
			
		||||
	case m>>8 == frameSkipMagic>>8:
 | 
			
		||||
		skip, err := f.readUint32(src)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		if _, err := io.CopyN(ioutil.Discard, src, int64(skip)); err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		goto newFrame
 | 
			
		||||
	default:
 | 
			
		||||
		return nil, lz4errors.ErrInvalidFrame
 | 
			
		||||
	}
 | 
			
		||||
	if err := f.Descriptor.initR(f, src); err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	f.checksum.Reset()
 | 
			
		||||
	return f.Blocks.initR(f, num, src)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (f *Frame) CloseR(src io.Reader) (err error) {
 | 
			
		||||
	if f.isLegacy() {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	if !f.Descriptor.Flags.ContentChecksum() {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	if f.Checksum, err = f.readUint32(src); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	if c := f.checksum.Sum32(); c != f.Checksum {
 | 
			
		||||
		return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidFrameChecksum, c, f.Checksum)
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type FrameDescriptor struct {
 | 
			
		||||
	Flags       DescriptorFlags
 | 
			
		||||
	ContentSize uint64
 | 
			
		||||
	Checksum    uint8
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (fd *FrameDescriptor) initW() {
 | 
			
		||||
	fd.Flags.VersionSet(1)
 | 
			
		||||
	fd.Flags.BlockIndependenceSet(true)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (fd *FrameDescriptor) Write(f *Frame, dst io.Writer) error {
 | 
			
		||||
	if fd.Checksum > 0 {
 | 
			
		||||
		// Header already written.
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	buf := f.buf[:4]
 | 
			
		||||
	// Write the magic number here even though it belongs to the Frame.
 | 
			
		||||
	binary.LittleEndian.PutUint32(buf, f.Magic)
 | 
			
		||||
	if !f.isLegacy() {
 | 
			
		||||
		buf = buf[:4+2]
 | 
			
		||||
		binary.LittleEndian.PutUint16(buf[4:], uint16(fd.Flags))
 | 
			
		||||
 | 
			
		||||
		if fd.Flags.Size() {
 | 
			
		||||
			buf = buf[:4+2+8]
 | 
			
		||||
			binary.LittleEndian.PutUint64(buf[4+2:], fd.ContentSize)
 | 
			
		||||
		}
 | 
			
		||||
		fd.Checksum = descriptorChecksum(buf[4:])
 | 
			
		||||
		buf = append(buf, fd.Checksum)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	_, err := dst.Write(buf)
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (fd *FrameDescriptor) initR(f *Frame, src io.Reader) error {
 | 
			
		||||
	if f.isLegacy() {
 | 
			
		||||
		idx := lz4block.Index(lz4block.Block8Mb)
 | 
			
		||||
		f.Descriptor.Flags.BlockSizeIndexSet(idx)
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	// Read the flags and the checksum, hoping that there is not content size.
 | 
			
		||||
	buf := f.buf[:3]
 | 
			
		||||
	if _, err := io.ReadFull(src, buf); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	descr := binary.LittleEndian.Uint16(buf)
 | 
			
		||||
	fd.Flags = DescriptorFlags(descr)
 | 
			
		||||
	if fd.Flags.Size() {
 | 
			
		||||
		// Append the 8 missing bytes.
 | 
			
		||||
		buf = buf[:3+8]
 | 
			
		||||
		if _, err := io.ReadFull(src, buf[3:]); err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		fd.ContentSize = binary.LittleEndian.Uint64(buf[2:])
 | 
			
		||||
	}
 | 
			
		||||
	fd.Checksum = buf[len(buf)-1] // the checksum is the last byte
 | 
			
		||||
	buf = buf[:len(buf)-1]        // all descriptor fields except checksum
 | 
			
		||||
	if c := descriptorChecksum(buf); fd.Checksum != c {
 | 
			
		||||
		return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidHeaderChecksum, c, fd.Checksum)
 | 
			
		||||
	}
 | 
			
		||||
	// Validate the elements that can be.
 | 
			
		||||
	if idx := fd.Flags.BlockSizeIndex(); !idx.IsValid() {
 | 
			
		||||
		return lz4errors.ErrOptionInvalidBlockSize
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func descriptorChecksum(buf []byte) byte {
 | 
			
		||||
	return byte(xxh32.ChecksumZero(buf) >> 8)
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										103
									
								
								vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										103
									
								
								vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,103 @@
 | 
			
		||||
// Code generated by `gen.exe`. DO NOT EDIT.
 | 
			
		||||
 | 
			
		||||
package lz4stream
 | 
			
		||||
 | 
			
		||||
import "github.com/pierrec/lz4/v4/internal/lz4block"
 | 
			
		||||
 | 
			
		||||
// DescriptorFlags is defined as follow:
 | 
			
		||||
//   field              bits
 | 
			
		||||
//   -----              ----
 | 
			
		||||
//   _                  2
 | 
			
		||||
//   ContentChecksum    1
 | 
			
		||||
//   Size               1
 | 
			
		||||
//   BlockChecksum      1
 | 
			
		||||
//   BlockIndependence  1
 | 
			
		||||
//   Version            2
 | 
			
		||||
//   _                  4
 | 
			
		||||
//   BlockSizeIndex     3
 | 
			
		||||
//   _                  1
 | 
			
		||||
type DescriptorFlags uint16
 | 
			
		||||
 | 
			
		||||
// Getters.
 | 
			
		||||
func (x DescriptorFlags) ContentChecksum() bool   { return x>>2&1 != 0 }
 | 
			
		||||
func (x DescriptorFlags) Size() bool              { return x>>3&1 != 0 }
 | 
			
		||||
func (x DescriptorFlags) BlockChecksum() bool     { return x>>4&1 != 0 }
 | 
			
		||||
func (x DescriptorFlags) BlockIndependence() bool { return x>>5&1 != 0 }
 | 
			
		||||
func (x DescriptorFlags) Version() uint16         { return uint16(x >> 6 & 0x3) }
 | 
			
		||||
func (x DescriptorFlags) BlockSizeIndex() lz4block.BlockSizeIndex {
 | 
			
		||||
	return lz4block.BlockSizeIndex(x >> 12 & 0x7)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Setters.
 | 
			
		||||
func (x *DescriptorFlags) ContentChecksumSet(v bool) *DescriptorFlags {
 | 
			
		||||
	const b = 1 << 2
 | 
			
		||||
	if v {
 | 
			
		||||
		*x = *x&^b | b
 | 
			
		||||
	} else {
 | 
			
		||||
		*x &^= b
 | 
			
		||||
	}
 | 
			
		||||
	return x
 | 
			
		||||
}
 | 
			
		||||
func (x *DescriptorFlags) SizeSet(v bool) *DescriptorFlags {
 | 
			
		||||
	const b = 1 << 3
 | 
			
		||||
	if v {
 | 
			
		||||
		*x = *x&^b | b
 | 
			
		||||
	} else {
 | 
			
		||||
		*x &^= b
 | 
			
		||||
	}
 | 
			
		||||
	return x
 | 
			
		||||
}
 | 
			
		||||
func (x *DescriptorFlags) BlockChecksumSet(v bool) *DescriptorFlags {
 | 
			
		||||
	const b = 1 << 4
 | 
			
		||||
	if v {
 | 
			
		||||
		*x = *x&^b | b
 | 
			
		||||
	} else {
 | 
			
		||||
		*x &^= b
 | 
			
		||||
	}
 | 
			
		||||
	return x
 | 
			
		||||
}
 | 
			
		||||
func (x *DescriptorFlags) BlockIndependenceSet(v bool) *DescriptorFlags {
 | 
			
		||||
	const b = 1 << 5
 | 
			
		||||
	if v {
 | 
			
		||||
		*x = *x&^b | b
 | 
			
		||||
	} else {
 | 
			
		||||
		*x &^= b
 | 
			
		||||
	}
 | 
			
		||||
	return x
 | 
			
		||||
}
 | 
			
		||||
func (x *DescriptorFlags) VersionSet(v uint16) *DescriptorFlags {
 | 
			
		||||
	*x = *x&^(0x3<<6) | (DescriptorFlags(v) & 0x3 << 6)
 | 
			
		||||
	return x
 | 
			
		||||
}
 | 
			
		||||
func (x *DescriptorFlags) BlockSizeIndexSet(v lz4block.BlockSizeIndex) *DescriptorFlags {
 | 
			
		||||
	*x = *x&^(0x7<<12) | (DescriptorFlags(v) & 0x7 << 12)
 | 
			
		||||
	return x
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Code generated by `gen.exe`. DO NOT EDIT.
 | 
			
		||||
 | 
			
		||||
// DataBlockSize is defined as follow:
 | 
			
		||||
//   field         bits
 | 
			
		||||
//   -----         ----
 | 
			
		||||
//   size          31
 | 
			
		||||
//   Uncompressed  1
 | 
			
		||||
type DataBlockSize uint32
 | 
			
		||||
 | 
			
		||||
// Getters.
 | 
			
		||||
func (x DataBlockSize) size() int          { return int(x & 0x7FFFFFFF) }
 | 
			
		||||
func (x DataBlockSize) Uncompressed() bool { return x>>31&1 != 0 }
 | 
			
		||||
 | 
			
		||||
// Setters.
 | 
			
		||||
func (x *DataBlockSize) sizeSet(v int) *DataBlockSize {
 | 
			
		||||
	*x = *x&^0x7FFFFFFF | DataBlockSize(v)&0x7FFFFFFF
 | 
			
		||||
	return x
 | 
			
		||||
}
 | 
			
		||||
func (x *DataBlockSize) UncompressedSet(v bool) *DataBlockSize {
 | 
			
		||||
	const b = 1 << 31
 | 
			
		||||
	if v {
 | 
			
		||||
		*x = *x&^b | b
 | 
			
		||||
	} else {
 | 
			
		||||
		*x &^= b
 | 
			
		||||
	}
 | 
			
		||||
	return x
 | 
			
		||||
}
 | 
			
		||||
@@ -20,10 +20,7 @@ const (
 | 
			
		||||
 | 
			
		||||
// XXHZero represents an xxhash32 object with seed 0.
 | 
			
		||||
type XXHZero struct {
 | 
			
		||||
	v1       uint32
 | 
			
		||||
	v2       uint32
 | 
			
		||||
	v3       uint32
 | 
			
		||||
	v4       uint32
 | 
			
		||||
	v        [4]uint32
 | 
			
		||||
	totalLen uint64
 | 
			
		||||
	buf      [16]byte
 | 
			
		||||
	bufused  int
 | 
			
		||||
@@ -38,10 +35,10 @@ func (xxh XXHZero) Sum(b []byte) []byte {
 | 
			
		||||
 | 
			
		||||
// Reset resets the Hash to its initial state.
 | 
			
		||||
func (xxh *XXHZero) Reset() {
 | 
			
		||||
	xxh.v1 = prime1plus2
 | 
			
		||||
	xxh.v2 = prime2
 | 
			
		||||
	xxh.v3 = 0
 | 
			
		||||
	xxh.v4 = prime1minus
 | 
			
		||||
	xxh.v[0] = prime1plus2
 | 
			
		||||
	xxh.v[1] = prime2
 | 
			
		||||
	xxh.v[2] = 0
 | 
			
		||||
	xxh.v[3] = prime1minus
 | 
			
		||||
	xxh.totalLen = 0
 | 
			
		||||
	xxh.bufused = 0
 | 
			
		||||
}
 | 
			
		||||
@@ -51,7 +48,7 @@ func (xxh *XXHZero) Size() int {
 | 
			
		||||
	return 4
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// BlockSize gives the minimum number of bytes accepted by Write().
 | 
			
		||||
// BlockSizeIndex gives the minimum number of bytes accepted by Write().
 | 
			
		||||
func (xxh *XXHZero) BlockSize() int {
 | 
			
		||||
	return 1
 | 
			
		||||
}
 | 
			
		||||
@@ -74,44 +71,48 @@ func (xxh *XXHZero) Write(input []byte) (int, error) {
 | 
			
		||||
		return n, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	p := 0
 | 
			
		||||
	// Causes compiler to work directly from registers instead of stack:
 | 
			
		||||
	v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4
 | 
			
		||||
	if m > 0 {
 | 
			
		||||
	var buf *[16]byte
 | 
			
		||||
	if m != 0 {
 | 
			
		||||
		// some data left from previous update
 | 
			
		||||
		copy(xxh.buf[xxh.bufused:], input[:r])
 | 
			
		||||
		xxh.bufused += len(input) - r
 | 
			
		||||
		buf = &xxh.buf
 | 
			
		||||
		c := copy(buf[m:], input)
 | 
			
		||||
		n -= c
 | 
			
		||||
		input = input[c:]
 | 
			
		||||
	}
 | 
			
		||||
	update(&xxh.v, buf, input)
 | 
			
		||||
	xxh.bufused = copy(xxh.buf[:], input[n-n%16:])
 | 
			
		||||
 | 
			
		||||
		// fast rotl(13)
 | 
			
		||||
		buf := xxh.buf[:16] // BCE hint.
 | 
			
		||||
	return n, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Portable version of update. This updates v by processing all of buf
 | 
			
		||||
// (if not nil) and all full 16-byte blocks of input.
 | 
			
		||||
func updateGo(v *[4]uint32, buf *[16]byte, input []byte) {
 | 
			
		||||
	// Causes compiler to work directly from registers instead of stack:
 | 
			
		||||
	v1, v2, v3, v4 := v[0], v[1], v[2], v[3]
 | 
			
		||||
 | 
			
		||||
	if buf != nil {
 | 
			
		||||
		v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1
 | 
			
		||||
		v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1
 | 
			
		||||
		v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1
 | 
			
		||||
		v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1
 | 
			
		||||
		p = r
 | 
			
		||||
		xxh.bufused = 0
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for n := n - 16; p <= n; p += 16 {
 | 
			
		||||
		sub := input[p:][:16] //BCE hint for compiler
 | 
			
		||||
	for ; len(input) >= 16; input = input[16:] {
 | 
			
		||||
		sub := input[:16] //BCE hint for compiler
 | 
			
		||||
		v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
 | 
			
		||||
		v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
 | 
			
		||||
		v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
 | 
			
		||||
		v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
 | 
			
		||||
	}
 | 
			
		||||
	xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4
 | 
			
		||||
 | 
			
		||||
	copy(xxh.buf[xxh.bufused:], input[p:])
 | 
			
		||||
	xxh.bufused += len(input) - p
 | 
			
		||||
 | 
			
		||||
	return n, nil
 | 
			
		||||
	v[0], v[1], v[2], v[3] = v1, v2, v3, v4
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Sum32 returns the 32 bits Hash value.
 | 
			
		||||
func (xxh *XXHZero) Sum32() uint32 {
 | 
			
		||||
	h32 := uint32(xxh.totalLen)
 | 
			
		||||
	if h32 >= 16 {
 | 
			
		||||
		h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4)
 | 
			
		||||
		h32 += rol1(xxh.v[0]) + rol7(xxh.v[1]) + rol12(xxh.v[2]) + rol18(xxh.v[3])
 | 
			
		||||
	} else {
 | 
			
		||||
		h32 += prime5
 | 
			
		||||
	}
 | 
			
		||||
@@ -137,8 +138,8 @@ func (xxh *XXHZero) Sum32() uint32 {
 | 
			
		||||
	return h32
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ChecksumZero returns the 32bits Hash value.
 | 
			
		||||
func ChecksumZero(input []byte) uint32 {
 | 
			
		||||
// Portable version of ChecksumZero.
 | 
			
		||||
func checksumZeroGo(input []byte) uint32 {
 | 
			
		||||
	n := len(input)
 | 
			
		||||
	h32 := uint32(n)
 | 
			
		||||
 | 
			
		||||
@@ -182,18 +183,6 @@ func ChecksumZero(input []byte) uint32 {
 | 
			
		||||
	return h32
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Uint32Zero hashes x with seed 0.
 | 
			
		||||
func Uint32Zero(x uint32) uint32 {
 | 
			
		||||
	h := prime5 + 4 + x*prime3
 | 
			
		||||
	h = rol17(h) * prime4
 | 
			
		||||
	h ^= h >> 15
 | 
			
		||||
	h *= prime2
 | 
			
		||||
	h ^= h >> 13
 | 
			
		||||
	h *= prime3
 | 
			
		||||
	h ^= h >> 16
 | 
			
		||||
	return h
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func rol1(u uint32) uint32 {
 | 
			
		||||
	return u<<1 | u>>31
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										11
									
								
								vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,11 @@
 | 
			
		||||
// +build !noasm
 | 
			
		||||
 | 
			
		||||
package xxh32
 | 
			
		||||
 | 
			
		||||
// ChecksumZero returns the 32-bit hash of input.
 | 
			
		||||
//
 | 
			
		||||
//go:noescape
 | 
			
		||||
func ChecksumZero(input []byte) uint32
 | 
			
		||||
 | 
			
		||||
//go:noescape
 | 
			
		||||
func update(v *[4]uint32, buf *[16]byte, input []byte)
 | 
			
		||||
							
								
								
									
										259
									
								
								vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										259
									
								
								vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,259 @@
 | 
			
		||||
// +build !noasm
 | 
			
		||||
 | 
			
		||||
#include "textflag.h"
 | 
			
		||||
 | 
			
		||||
#define prime1		$2654435761
 | 
			
		||||
#define prime2		$2246822519
 | 
			
		||||
#define prime3		$3266489917
 | 
			
		||||
#define prime4		$668265263
 | 
			
		||||
#define prime5		$374761393
 | 
			
		||||
 | 
			
		||||
#define prime1plus2	$606290984
 | 
			
		||||
#define prime1minus	$1640531535
 | 
			
		||||
 | 
			
		||||
// Register allocation.
 | 
			
		||||
#define p	R0
 | 
			
		||||
#define n	R1
 | 
			
		||||
#define h	R2
 | 
			
		||||
#define v1	R2	// Alias for h.
 | 
			
		||||
#define v2	R3
 | 
			
		||||
#define v3	R4
 | 
			
		||||
#define v4	R5
 | 
			
		||||
#define x1	R6
 | 
			
		||||
#define x2	R7
 | 
			
		||||
#define x3	R8
 | 
			
		||||
#define x4	R9
 | 
			
		||||
 | 
			
		||||
// We need the primes in registers. The 16-byte loop only uses prime{1,2}.
 | 
			
		||||
#define prime1r	R11
 | 
			
		||||
#define prime2r	R12
 | 
			
		||||
#define prime3r	R3	// The rest can alias v{2-4}.
 | 
			
		||||
#define prime4r	R4
 | 
			
		||||
#define prime5r	R5
 | 
			
		||||
 | 
			
		||||
// Update round macros. These read from and increment p.
 | 
			
		||||
 | 
			
		||||
#define round16aligned			\
 | 
			
		||||
	MOVM.IA.W (p), [x1, x2, x3, x4]	\
 | 
			
		||||
					\
 | 
			
		||||
	MULA x1, prime2r, v1, v1	\
 | 
			
		||||
	MULA x2, prime2r, v2, v2	\
 | 
			
		||||
	MULA x3, prime2r, v3, v3	\
 | 
			
		||||
	MULA x4, prime2r, v4, v4	\
 | 
			
		||||
					\
 | 
			
		||||
	MOVW v1 @> 19, v1		\
 | 
			
		||||
	MOVW v2 @> 19, v2		\
 | 
			
		||||
	MOVW v3 @> 19, v3		\
 | 
			
		||||
	MOVW v4 @> 19, v4		\
 | 
			
		||||
					\
 | 
			
		||||
	MUL prime1r, v1			\
 | 
			
		||||
	MUL prime1r, v2			\
 | 
			
		||||
	MUL prime1r, v3			\
 | 
			
		||||
	MUL prime1r, v4			\
 | 
			
		||||
 | 
			
		||||
#define round16unaligned 		\
 | 
			
		||||
	MOVBU.P  16(p), x1		\
 | 
			
		||||
	MOVBU   -15(p), x2		\
 | 
			
		||||
	ORR     x2 <<  8, x1		\
 | 
			
		||||
	MOVBU   -14(p), x3		\
 | 
			
		||||
	MOVBU   -13(p), x4		\
 | 
			
		||||
	ORR     x4 <<  8, x3		\
 | 
			
		||||
	ORR     x3 << 16, x1		\
 | 
			
		||||
					\
 | 
			
		||||
	MULA x1, prime2r, v1, v1	\
 | 
			
		||||
	MOVW v1 @> 19, v1		\
 | 
			
		||||
	MUL prime1r, v1			\
 | 
			
		||||
					\
 | 
			
		||||
	MOVBU -12(p), x1		\
 | 
			
		||||
	MOVBU -11(p), x2		\
 | 
			
		||||
	ORR   x2 <<  8, x1		\
 | 
			
		||||
	MOVBU -10(p), x3		\
 | 
			
		||||
	MOVBU  -9(p), x4		\
 | 
			
		||||
	ORR   x4 <<  8, x3		\
 | 
			
		||||
	ORR   x3 << 16, x1		\
 | 
			
		||||
					\
 | 
			
		||||
	MULA x1, prime2r, v2, v2	\
 | 
			
		||||
	MOVW v2 @> 19, v2		\
 | 
			
		||||
	MUL prime1r, v2			\
 | 
			
		||||
					\
 | 
			
		||||
	MOVBU -8(p), x1			\
 | 
			
		||||
	MOVBU -7(p), x2			\
 | 
			
		||||
	ORR   x2 <<  8, x1		\
 | 
			
		||||
	MOVBU -6(p), x3			\
 | 
			
		||||
	MOVBU -5(p), x4			\
 | 
			
		||||
	ORR   x4 <<  8, x3		\
 | 
			
		||||
	ORR   x3 << 16, x1		\
 | 
			
		||||
					\
 | 
			
		||||
	MULA x1, prime2r, v3, v3	\
 | 
			
		||||
	MOVW v3 @> 19, v3		\
 | 
			
		||||
	MUL prime1r, v3			\
 | 
			
		||||
					\
 | 
			
		||||
	MOVBU -4(p), x1			\
 | 
			
		||||
	MOVBU -3(p), x2			\
 | 
			
		||||
	ORR   x2 <<  8, x1		\
 | 
			
		||||
	MOVBU -2(p), x3			\
 | 
			
		||||
	MOVBU -1(p), x4			\
 | 
			
		||||
	ORR   x4 <<  8, x3		\
 | 
			
		||||
	ORR   x3 << 16, x1		\
 | 
			
		||||
					\
 | 
			
		||||
	MULA x1, prime2r, v4, v4	\
 | 
			
		||||
	MOVW v4 @> 19, v4		\
 | 
			
		||||
	MUL prime1r, v4			\
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// func ChecksumZero([]byte) uint32
 | 
			
		||||
TEXT ·ChecksumZero(SB), NOFRAME|NOSPLIT, $-4-16
 | 
			
		||||
	MOVW input_base+0(FP), p
 | 
			
		||||
	MOVW input_len+4(FP),  n
 | 
			
		||||
 | 
			
		||||
	MOVW prime1, prime1r
 | 
			
		||||
	MOVW prime2, prime2r
 | 
			
		||||
 | 
			
		||||
	// Set up h for n < 16. It's tempting to say {ADD prime5, n, h}
 | 
			
		||||
	// here, but that's a pseudo-op that generates a load through R11.
 | 
			
		||||
	MOVW prime5, prime5r
 | 
			
		||||
	ADD  prime5r, n, h
 | 
			
		||||
	CMP  $0, n
 | 
			
		||||
	BEQ  end
 | 
			
		||||
 | 
			
		||||
	// We let n go negative so we can do comparisons with SUB.S
 | 
			
		||||
	// instead of separate CMP.
 | 
			
		||||
	SUB.S $16, n
 | 
			
		||||
	BMI   loop16done
 | 
			
		||||
 | 
			
		||||
	MOVW prime1plus2, v1
 | 
			
		||||
	MOVW prime2,      v2
 | 
			
		||||
	MOVW $0,          v3
 | 
			
		||||
	MOVW prime1minus, v4
 | 
			
		||||
 | 
			
		||||
	TST $3, p
 | 
			
		||||
	BNE loop16unaligned
 | 
			
		||||
 | 
			
		||||
loop16aligned:
 | 
			
		||||
	SUB.S $16, n
 | 
			
		||||
	round16aligned
 | 
			
		||||
	BPL loop16aligned
 | 
			
		||||
	B   loop16finish
 | 
			
		||||
 | 
			
		||||
loop16unaligned:
 | 
			
		||||
	SUB.S $16, n
 | 
			
		||||
	round16unaligned
 | 
			
		||||
	BPL loop16unaligned
 | 
			
		||||
 | 
			
		||||
loop16finish:
 | 
			
		||||
	MOVW v1 @> 31, h
 | 
			
		||||
	ADD  v2 @> 25, h
 | 
			
		||||
	ADD  v3 @> 20, h
 | 
			
		||||
	ADD  v4 @> 14, h
 | 
			
		||||
 | 
			
		||||
	// h += len(input) with v2 as temporary.
 | 
			
		||||
	MOVW input_len+4(FP), v2
 | 
			
		||||
	ADD  v2, h
 | 
			
		||||
 | 
			
		||||
loop16done:
 | 
			
		||||
	ADD $16, n	// Restore number of bytes left.
 | 
			
		||||
 | 
			
		||||
	SUB.S $4, n
 | 
			
		||||
	MOVW  prime3, prime3r
 | 
			
		||||
	BMI   loop4done
 | 
			
		||||
	MOVW  prime4, prime4r
 | 
			
		||||
 | 
			
		||||
	TST $3, p
 | 
			
		||||
	BNE loop4unaligned
 | 
			
		||||
 | 
			
		||||
loop4aligned:
 | 
			
		||||
	SUB.S $4, n
 | 
			
		||||
 | 
			
		||||
	MOVW.P 4(p), x1
 | 
			
		||||
	MULA   prime3r, x1, h, h
 | 
			
		||||
	MOVW   h @> 15, h
 | 
			
		||||
	MUL    prime4r, h
 | 
			
		||||
 | 
			
		||||
	BPL loop4aligned
 | 
			
		||||
	B   loop4done
 | 
			
		||||
 | 
			
		||||
loop4unaligned:
 | 
			
		||||
	SUB.S $4, n
 | 
			
		||||
 | 
			
		||||
	MOVBU.P  4(p), x1
 | 
			
		||||
	MOVBU   -3(p), x2
 | 
			
		||||
	ORR     x2 <<  8, x1
 | 
			
		||||
	MOVBU   -2(p), x3
 | 
			
		||||
	ORR     x3 << 16, x1
 | 
			
		||||
	MOVBU   -1(p), x4
 | 
			
		||||
	ORR     x4 << 24, x1
 | 
			
		||||
 | 
			
		||||
	MULA prime3r, x1, h, h
 | 
			
		||||
	MOVW h @> 15, h
 | 
			
		||||
	MUL  prime4r, h
 | 
			
		||||
 | 
			
		||||
	BPL loop4unaligned
 | 
			
		||||
 | 
			
		||||
loop4done:
 | 
			
		||||
	ADD.S $4, n	// Restore number of bytes left.
 | 
			
		||||
	BEQ   end
 | 
			
		||||
 | 
			
		||||
	MOVW prime5, prime5r
 | 
			
		||||
 | 
			
		||||
loop1:
 | 
			
		||||
	SUB.S $1, n
 | 
			
		||||
 | 
			
		||||
	MOVBU.P 1(p), x1
 | 
			
		||||
	MULA    prime5r, x1, h, h
 | 
			
		||||
	MOVW    h @> 21, h
 | 
			
		||||
	MUL     prime1r, h
 | 
			
		||||
 | 
			
		||||
	BNE loop1
 | 
			
		||||
 | 
			
		||||
end:
 | 
			
		||||
	MOVW prime3, prime3r
 | 
			
		||||
	EOR  h >> 15, h
 | 
			
		||||
	MUL  prime2r, h
 | 
			
		||||
	EOR  h >> 13, h
 | 
			
		||||
	MUL  prime3r, h
 | 
			
		||||
	EOR  h >> 16, h
 | 
			
		||||
 | 
			
		||||
	MOVW h, ret+12(FP)
 | 
			
		||||
	RET
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// func update(v *[4]uint64, buf *[16]byte, p []byte)
 | 
			
		||||
TEXT ·update(SB), NOFRAME|NOSPLIT, $-4-20
 | 
			
		||||
	MOVW    v+0(FP), p
 | 
			
		||||
	MOVM.IA (p), [v1, v2, v3, v4]
 | 
			
		||||
 | 
			
		||||
	MOVW prime1, prime1r
 | 
			
		||||
	MOVW prime2, prime2r
 | 
			
		||||
 | 
			
		||||
	// Process buf, if not nil.
 | 
			
		||||
	MOVW buf+4(FP), p
 | 
			
		||||
	CMP  $0, p
 | 
			
		||||
	BEQ  noBuffered
 | 
			
		||||
 | 
			
		||||
	round16aligned
 | 
			
		||||
 | 
			
		||||
noBuffered:
 | 
			
		||||
	MOVW input_base +8(FP), p
 | 
			
		||||
	MOVW input_len +12(FP), n
 | 
			
		||||
 | 
			
		||||
	SUB.S $16, n
 | 
			
		||||
	BMI   end
 | 
			
		||||
 | 
			
		||||
	TST $3, p
 | 
			
		||||
	BNE loop16unaligned
 | 
			
		||||
 | 
			
		||||
loop16aligned:
 | 
			
		||||
	SUB.S $16, n
 | 
			
		||||
	round16aligned
 | 
			
		||||
	BPL loop16aligned
 | 
			
		||||
	B   end
 | 
			
		||||
 | 
			
		||||
loop16unaligned:
 | 
			
		||||
	SUB.S $16, n
 | 
			
		||||
	round16unaligned
 | 
			
		||||
	BPL loop16unaligned
 | 
			
		||||
 | 
			
		||||
end:
 | 
			
		||||
	MOVW    v+0(FP), p
 | 
			
		||||
	MOVM.IA [v1, v2, v3, v4], (p)
 | 
			
		||||
	RET
 | 
			
		||||
							
								
								
									
										10
									
								
								vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,10 @@
 | 
			
		||||
// +build !arm noasm
 | 
			
		||||
 | 
			
		||||
package xxh32
 | 
			
		||||
 | 
			
		||||
// ChecksumZero returns the 32-bit hash of input.
 | 
			
		||||
func ChecksumZero(input []byte) uint32 { return checksumZeroGo(input) }
 | 
			
		||||
 | 
			
		||||
func update(v *[4]uint32, buf *[16]byte, input []byte) {
 | 
			
		||||
	updateGo(v, buf, input)
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										147
									
								
								vendor/github.com/pierrec/lz4/v4/lz4.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										147
									
								
								vendor/github.com/pierrec/lz4/v4/lz4.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,147 @@
 | 
			
		||||
// Package lz4 implements reading and writing lz4 compressed data.
 | 
			
		||||
//
 | 
			
		||||
// The package supports both the LZ4 stream format,
 | 
			
		||||
// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html,
 | 
			
		||||
// and the LZ4 block format, defined at
 | 
			
		||||
// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html.
 | 
			
		||||
//
 | 
			
		||||
// See https://github.com/lz4/lz4 for the reference C implementation.
 | 
			
		||||
package lz4
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/pierrec/lz4/v4/internal/lz4block"
 | 
			
		||||
	"github.com/pierrec/lz4/v4/internal/lz4errors"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func _() {
 | 
			
		||||
	// Safety checks for duplicated elements.
 | 
			
		||||
	var x [1]struct{}
 | 
			
		||||
	_ = x[lz4block.CompressionLevel(Fast)-lz4block.Fast]
 | 
			
		||||
	_ = x[Block64Kb-BlockSize(lz4block.Block64Kb)]
 | 
			
		||||
	_ = x[Block256Kb-BlockSize(lz4block.Block256Kb)]
 | 
			
		||||
	_ = x[Block1Mb-BlockSize(lz4block.Block1Mb)]
 | 
			
		||||
	_ = x[Block4Mb-BlockSize(lz4block.Block4Mb)]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
 | 
			
		||||
func CompressBlockBound(n int) int {
 | 
			
		||||
	return lz4block.CompressBlockBound(n)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// UncompressBlock uncompresses the source buffer into the destination one,
 | 
			
		||||
// and returns the uncompressed size.
 | 
			
		||||
//
 | 
			
		||||
// The destination buffer must be sized appropriately.
 | 
			
		||||
//
 | 
			
		||||
// An error is returned if the source data is invalid or the destination buffer is too small.
 | 
			
		||||
func UncompressBlock(src, dst []byte) (int, error) {
 | 
			
		||||
	return lz4block.UncompressBlock(src, dst)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// A Compressor compresses data into the LZ4 block format.
 | 
			
		||||
// It uses a fast compression algorithm.
 | 
			
		||||
//
 | 
			
		||||
// A Compressor is not safe for concurrent use by multiple goroutines.
 | 
			
		||||
//
 | 
			
		||||
// Use a Writer to compress into the LZ4 stream format.
 | 
			
		||||
type Compressor struct{ c lz4block.Compressor }
 | 
			
		||||
 | 
			
		||||
// CompressBlock compresses the source buffer src into the destination dst.
 | 
			
		||||
//
 | 
			
		||||
// If compression is successful, the first return value is the size of the
 | 
			
		||||
// compressed data, which is always >0.
 | 
			
		||||
//
 | 
			
		||||
// If dst has length at least CompressBlockBound(len(src)), compression always
 | 
			
		||||
// succeeds. Otherwise, the first return value is zero. The error return is
 | 
			
		||||
// non-nil if the compressed data does not fit in dst, but it might fit in a
 | 
			
		||||
// larger buffer that is still smaller than CompressBlockBound(len(src)). The
 | 
			
		||||
// return value (0, nil) means the data is likely incompressible and a buffer
 | 
			
		||||
// of length CompressBlockBound(len(src)) should be passed in.
 | 
			
		||||
func (c *Compressor) CompressBlock(src, dst []byte) (int, error) {
 | 
			
		||||
	return c.c.CompressBlock(src, dst)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CompressBlock compresses the source buffer into the destination one.
 | 
			
		||||
// This is the fast version of LZ4 compression and also the default one.
 | 
			
		||||
//
 | 
			
		||||
// The argument hashTable is scratch space for a hash table used by the
 | 
			
		||||
// compressor. If provided, it should have length at least 1<<16. If it is
 | 
			
		||||
// shorter (or nil), CompressBlock allocates its own hash table.
 | 
			
		||||
//
 | 
			
		||||
// The size of the compressed data is returned.
 | 
			
		||||
//
 | 
			
		||||
// If the destination buffer size is lower than CompressBlockBound and
 | 
			
		||||
// the compressed size is 0 and no error, then the data is incompressible.
 | 
			
		||||
//
 | 
			
		||||
// An error is returned if the destination buffer is too small.
 | 
			
		||||
 | 
			
		||||
// CompressBlock is equivalent to Compressor.CompressBlock.
 | 
			
		||||
// The final argument is ignored and should be set to nil.
 | 
			
		||||
//
 | 
			
		||||
// This function is deprecated. Use a Compressor instead.
 | 
			
		||||
func CompressBlock(src, dst []byte, _ []int) (int, error) {
 | 
			
		||||
	return lz4block.CompressBlock(src, dst)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// A CompressorHC compresses data into the LZ4 block format.
 | 
			
		||||
// Its compression ratio is potentially better than that of a Compressor,
 | 
			
		||||
// but it is also slower and requires more memory.
 | 
			
		||||
//
 | 
			
		||||
// A Compressor is not safe for concurrent use by multiple goroutines.
 | 
			
		||||
//
 | 
			
		||||
// Use a Writer to compress into the LZ4 stream format.
 | 
			
		||||
type CompressorHC struct {
 | 
			
		||||
	// Level is the maximum search depth for compression.
 | 
			
		||||
	// Values <= 0 mean no maximum.
 | 
			
		||||
	Level CompressionLevel
 | 
			
		||||
	c     lz4block.CompressorHC
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CompressBlock compresses the source buffer src into the destination dst.
 | 
			
		||||
//
 | 
			
		||||
// If compression is successful, the first return value is the size of the
 | 
			
		||||
// compressed data, which is always >0.
 | 
			
		||||
//
 | 
			
		||||
// If dst has length at least CompressBlockBound(len(src)), compression always
 | 
			
		||||
// succeeds. Otherwise, the first return value is zero. The error return is
 | 
			
		||||
// non-nil if the compressed data does not fit in dst, but it might fit in a
 | 
			
		||||
// larger buffer that is still smaller than CompressBlockBound(len(src)). The
 | 
			
		||||
// return value (0, nil) means the data is likely incompressible and a buffer
 | 
			
		||||
// of length CompressBlockBound(len(src)) should be passed in.
 | 
			
		||||
func (c *CompressorHC) CompressBlock(src, dst []byte) (int, error) {
 | 
			
		||||
	return c.c.CompressBlock(src, dst, lz4block.CompressionLevel(c.Level))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CompressBlockHC is equivalent to CompressorHC.CompressBlock.
 | 
			
		||||
// The final two arguments are ignored and should be set to nil.
 | 
			
		||||
//
 | 
			
		||||
// This function is deprecated. Use a CompressorHC instead.
 | 
			
		||||
func CompressBlockHC(src, dst []byte, depth CompressionLevel, _, _ []int) (int, error) {
 | 
			
		||||
	return lz4block.CompressBlockHC(src, dst, lz4block.CompressionLevel(depth))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
 | 
			
		||||
	// block is corrupted or the destination buffer is not large enough for the uncompressed data.
 | 
			
		||||
	ErrInvalidSourceShortBuffer = lz4errors.ErrInvalidSourceShortBuffer
 | 
			
		||||
	// ErrInvalidFrame is returned when reading an invalid LZ4 archive.
 | 
			
		||||
	ErrInvalidFrame = lz4errors.ErrInvalidFrame
 | 
			
		||||
	// ErrInternalUnhandledState is an internal error.
 | 
			
		||||
	ErrInternalUnhandledState = lz4errors.ErrInternalUnhandledState
 | 
			
		||||
	// ErrInvalidHeaderChecksum is returned when reading a frame.
 | 
			
		||||
	ErrInvalidHeaderChecksum = lz4errors.ErrInvalidHeaderChecksum
 | 
			
		||||
	// ErrInvalidBlockChecksum is returned when reading a frame.
 | 
			
		||||
	ErrInvalidBlockChecksum = lz4errors.ErrInvalidBlockChecksum
 | 
			
		||||
	// ErrInvalidFrameChecksum is returned when reading a frame.
 | 
			
		||||
	ErrInvalidFrameChecksum = lz4errors.ErrInvalidFrameChecksum
 | 
			
		||||
	// ErrOptionInvalidCompressionLevel is returned when the supplied compression level is invalid.
 | 
			
		||||
	ErrOptionInvalidCompressionLevel = lz4errors.ErrOptionInvalidCompressionLevel
 | 
			
		||||
	// ErrOptionClosedOrError is returned when an option is applied to a closed or in error object.
 | 
			
		||||
	ErrOptionClosedOrError = lz4errors.ErrOptionClosedOrError
 | 
			
		||||
	// ErrOptionInvalidBlockSize is returned when
 | 
			
		||||
	ErrOptionInvalidBlockSize = lz4errors.ErrOptionInvalidBlockSize
 | 
			
		||||
	// ErrOptionNotApplicable is returned when trying to apply an option to an object not supporting it.
 | 
			
		||||
	ErrOptionNotApplicable = lz4errors.ErrOptionNotApplicable
 | 
			
		||||
	// ErrWriterNotClosed is returned when attempting to reset an unclosed writer.
 | 
			
		||||
	ErrWriterNotClosed = lz4errors.ErrWriterNotClosed
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										213
									
								
								vendor/github.com/pierrec/lz4/v4/options.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										213
									
								
								vendor/github.com/pierrec/lz4/v4/options.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,213 @@
 | 
			
		||||
package lz4
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/pierrec/lz4/v4/internal/lz4block"
 | 
			
		||||
	"github.com/pierrec/lz4/v4/internal/lz4errors"
 | 
			
		||||
	"reflect"
 | 
			
		||||
	"runtime"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
//go:generate go run golang.org/x/tools/cmd/stringer -type=BlockSize,CompressionLevel -output options_gen.go
 | 
			
		||||
 | 
			
		||||
type (
 | 
			
		||||
	applier interface {
 | 
			
		||||
		Apply(...Option) error
 | 
			
		||||
		private()
 | 
			
		||||
	}
 | 
			
		||||
	// Option defines the parameters to setup an LZ4 Writer or Reader.
 | 
			
		||||
	Option func(applier) error
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// String returns a string representation of the option with its parameter(s).
 | 
			
		||||
func (o Option) String() string {
 | 
			
		||||
	return o(nil).Error()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Default options.
 | 
			
		||||
var (
 | 
			
		||||
	DefaultBlockSizeOption = BlockSizeOption(Block4Mb)
 | 
			
		||||
	DefaultChecksumOption  = ChecksumOption(true)
 | 
			
		||||
	DefaultConcurrency     = ConcurrencyOption(1)
 | 
			
		||||
	defaultOnBlockDone     = OnBlockDoneOption(nil)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	Block64Kb BlockSize = 1 << (16 + iota*2)
 | 
			
		||||
	Block256Kb
 | 
			
		||||
	Block1Mb
 | 
			
		||||
	Block4Mb
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// BlockSizeIndex defines the size of the blocks to be compressed.
 | 
			
		||||
type BlockSize uint32
 | 
			
		||||
 | 
			
		||||
// BlockSizeOption defines the maximum size of compressed blocks (default=Block4Mb).
 | 
			
		||||
func BlockSizeOption(size BlockSize) Option {
 | 
			
		||||
	return func(a applier) error {
 | 
			
		||||
		switch w := a.(type) {
 | 
			
		||||
		case nil:
 | 
			
		||||
			s := fmt.Sprintf("BlockSizeOption(%s)", size)
 | 
			
		||||
			return lz4errors.Error(s)
 | 
			
		||||
		case *Writer:
 | 
			
		||||
			size := uint32(size)
 | 
			
		||||
			if !lz4block.IsValid(size) {
 | 
			
		||||
				return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size)
 | 
			
		||||
			}
 | 
			
		||||
			w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size))
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
		return lz4errors.ErrOptionNotApplicable
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// BlockChecksumOption enables or disables block checksum (default=false).
 | 
			
		||||
func BlockChecksumOption(flag bool) Option {
 | 
			
		||||
	return func(a applier) error {
 | 
			
		||||
		switch w := a.(type) {
 | 
			
		||||
		case nil:
 | 
			
		||||
			s := fmt.Sprintf("BlockChecksumOption(%v)", flag)
 | 
			
		||||
			return lz4errors.Error(s)
 | 
			
		||||
		case *Writer:
 | 
			
		||||
			w.frame.Descriptor.Flags.BlockChecksumSet(flag)
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
		return lz4errors.ErrOptionNotApplicable
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ChecksumOption enables/disables all blocks or content checksum (default=true).
 | 
			
		||||
func ChecksumOption(flag bool) Option {
 | 
			
		||||
	return func(a applier) error {
 | 
			
		||||
		switch w := a.(type) {
 | 
			
		||||
		case nil:
 | 
			
		||||
			s := fmt.Sprintf("ChecksumOption(%v)", flag)
 | 
			
		||||
			return lz4errors.Error(s)
 | 
			
		||||
		case *Writer:
 | 
			
		||||
			w.frame.Descriptor.Flags.ContentChecksumSet(flag)
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
		return lz4errors.ErrOptionNotApplicable
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// SizeOption sets the size of the original uncompressed data (default=0). It is useful to know the size of the
 | 
			
		||||
// whole uncompressed data stream.
 | 
			
		||||
func SizeOption(size uint64) Option {
 | 
			
		||||
	return func(a applier) error {
 | 
			
		||||
		switch w := a.(type) {
 | 
			
		||||
		case nil:
 | 
			
		||||
			s := fmt.Sprintf("SizeOption(%d)", size)
 | 
			
		||||
			return lz4errors.Error(s)
 | 
			
		||||
		case *Writer:
 | 
			
		||||
			w.frame.Descriptor.Flags.SizeSet(size > 0)
 | 
			
		||||
			w.frame.Descriptor.ContentSize = size
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
		return lz4errors.ErrOptionNotApplicable
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConcurrencyOption sets the number of go routines used for compression.
 | 
			
		||||
// If n <= 0, then the output of runtime.GOMAXPROCS(0) is used.
 | 
			
		||||
func ConcurrencyOption(n int) Option {
 | 
			
		||||
	if n <= 0 {
 | 
			
		||||
		n = runtime.GOMAXPROCS(0)
 | 
			
		||||
	}
 | 
			
		||||
	return func(a applier) error {
 | 
			
		||||
		switch rw := a.(type) {
 | 
			
		||||
		case nil:
 | 
			
		||||
			s := fmt.Sprintf("ConcurrencyOption(%d)", n)
 | 
			
		||||
			return lz4errors.Error(s)
 | 
			
		||||
		case *Writer:
 | 
			
		||||
			rw.num = n
 | 
			
		||||
			return nil
 | 
			
		||||
		case *Reader:
 | 
			
		||||
			rw.num = n
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
		return lz4errors.ErrOptionNotApplicable
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CompressionLevel defines the level of compression to use. The higher the better, but slower, compression.
 | 
			
		||||
type CompressionLevel uint32
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	Fast   CompressionLevel = 0
 | 
			
		||||
	Level1 CompressionLevel = 1 << (8 + iota)
 | 
			
		||||
	Level2
 | 
			
		||||
	Level3
 | 
			
		||||
	Level4
 | 
			
		||||
	Level5
 | 
			
		||||
	Level6
 | 
			
		||||
	Level7
 | 
			
		||||
	Level8
 | 
			
		||||
	Level9
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// CompressionLevelOption defines the compression level (default=Fast).
 | 
			
		||||
func CompressionLevelOption(level CompressionLevel) Option {
 | 
			
		||||
	return func(a applier) error {
 | 
			
		||||
		switch w := a.(type) {
 | 
			
		||||
		case nil:
 | 
			
		||||
			s := fmt.Sprintf("CompressionLevelOption(%s)", level)
 | 
			
		||||
			return lz4errors.Error(s)
 | 
			
		||||
		case *Writer:
 | 
			
		||||
			switch level {
 | 
			
		||||
			case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9:
 | 
			
		||||
			default:
 | 
			
		||||
				return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level)
 | 
			
		||||
			}
 | 
			
		||||
			w.level = lz4block.CompressionLevel(level)
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
		return lz4errors.ErrOptionNotApplicable
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func onBlockDone(int) {}
 | 
			
		||||
 | 
			
		||||
// OnBlockDoneOption is triggered when a block has been processed. For a Writer, it is when is has been compressed,
 | 
			
		||||
// for a Reader, it is when it has been uncompressed.
 | 
			
		||||
func OnBlockDoneOption(handler func(size int)) Option {
 | 
			
		||||
	if handler == nil {
 | 
			
		||||
		handler = onBlockDone
 | 
			
		||||
	}
 | 
			
		||||
	return func(a applier) error {
 | 
			
		||||
		switch rw := a.(type) {
 | 
			
		||||
		case nil:
 | 
			
		||||
			s := fmt.Sprintf("OnBlockDoneOption(%s)", reflect.TypeOf(handler).String())
 | 
			
		||||
			return lz4errors.Error(s)
 | 
			
		||||
		case *Writer:
 | 
			
		||||
			rw.handler = handler
 | 
			
		||||
			return nil
 | 
			
		||||
		case *Reader:
 | 
			
		||||
			rw.handler = handler
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
		return lz4errors.ErrOptionNotApplicable
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// LegacyOption provides support for writing LZ4 frames in the legacy format.
 | 
			
		||||
//
 | 
			
		||||
// See https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame.
 | 
			
		||||
//
 | 
			
		||||
// NB. compressed Linux kernel images use a tweaked LZ4 legacy format where
 | 
			
		||||
// the compressed stream is followed by the original (uncompressed) size of
 | 
			
		||||
// the kernel (https://events.static.linuxfound.org/sites/events/files/lcjpcojp13_klee.pdf).
 | 
			
		||||
// This is also supported as a special case.
 | 
			
		||||
func LegacyOption(legacy bool) Option {
 | 
			
		||||
	return func(a applier) error {
 | 
			
		||||
		switch rw := a.(type) {
 | 
			
		||||
		case nil:
 | 
			
		||||
			s := fmt.Sprintf("LegacyOption(%v)", legacy)
 | 
			
		||||
			return lz4errors.Error(s)
 | 
			
		||||
		case *Writer:
 | 
			
		||||
			rw.legacy = legacy
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
		return lz4errors.ErrOptionNotApplicable
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										92
									
								
								vendor/github.com/pierrec/lz4/v4/options_gen.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										92
									
								
								vendor/github.com/pierrec/lz4/v4/options_gen.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,92 @@
 | 
			
		||||
// Code generated by "stringer -type=BlockSize,CompressionLevel -output options_gen.go"; DO NOT EDIT.
 | 
			
		||||
 | 
			
		||||
package lz4
 | 
			
		||||
 | 
			
		||||
import "strconv"
 | 
			
		||||
 | 
			
		||||
func _() {
 | 
			
		||||
	// An "invalid array index" compiler error signifies that the constant values have changed.
 | 
			
		||||
	// Re-run the stringer command to generate them again.
 | 
			
		||||
	var x [1]struct{}
 | 
			
		||||
	_ = x[Block64Kb-65536]
 | 
			
		||||
	_ = x[Block256Kb-262144]
 | 
			
		||||
	_ = x[Block1Mb-1048576]
 | 
			
		||||
	_ = x[Block4Mb-4194304]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	_BlockSize_name_0 = "Block64Kb"
 | 
			
		||||
	_BlockSize_name_1 = "Block256Kb"
 | 
			
		||||
	_BlockSize_name_2 = "Block1Mb"
 | 
			
		||||
	_BlockSize_name_3 = "Block4Mb"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func (i BlockSize) String() string {
 | 
			
		||||
	switch {
 | 
			
		||||
	case i == 65536:
 | 
			
		||||
		return _BlockSize_name_0
 | 
			
		||||
	case i == 262144:
 | 
			
		||||
		return _BlockSize_name_1
 | 
			
		||||
	case i == 1048576:
 | 
			
		||||
		return _BlockSize_name_2
 | 
			
		||||
	case i == 4194304:
 | 
			
		||||
		return _BlockSize_name_3
 | 
			
		||||
	default:
 | 
			
		||||
		return "BlockSize(" + strconv.FormatInt(int64(i), 10) + ")"
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
func _() {
 | 
			
		||||
	// An "invalid array index" compiler error signifies that the constant values have changed.
 | 
			
		||||
	// Re-run the stringer command to generate them again.
 | 
			
		||||
	var x [1]struct{}
 | 
			
		||||
	_ = x[Fast-0]
 | 
			
		||||
	_ = x[Level1-512]
 | 
			
		||||
	_ = x[Level2-1024]
 | 
			
		||||
	_ = x[Level3-2048]
 | 
			
		||||
	_ = x[Level4-4096]
 | 
			
		||||
	_ = x[Level5-8192]
 | 
			
		||||
	_ = x[Level6-16384]
 | 
			
		||||
	_ = x[Level7-32768]
 | 
			
		||||
	_ = x[Level8-65536]
 | 
			
		||||
	_ = x[Level9-131072]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	_CompressionLevel_name_0 = "Fast"
 | 
			
		||||
	_CompressionLevel_name_1 = "Level1"
 | 
			
		||||
	_CompressionLevel_name_2 = "Level2"
 | 
			
		||||
	_CompressionLevel_name_3 = "Level3"
 | 
			
		||||
	_CompressionLevel_name_4 = "Level4"
 | 
			
		||||
	_CompressionLevel_name_5 = "Level5"
 | 
			
		||||
	_CompressionLevel_name_6 = "Level6"
 | 
			
		||||
	_CompressionLevel_name_7 = "Level7"
 | 
			
		||||
	_CompressionLevel_name_8 = "Level8"
 | 
			
		||||
	_CompressionLevel_name_9 = "Level9"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func (i CompressionLevel) String() string {
 | 
			
		||||
	switch {
 | 
			
		||||
	case i == 0:
 | 
			
		||||
		return _CompressionLevel_name_0
 | 
			
		||||
	case i == 512:
 | 
			
		||||
		return _CompressionLevel_name_1
 | 
			
		||||
	case i == 1024:
 | 
			
		||||
		return _CompressionLevel_name_2
 | 
			
		||||
	case i == 2048:
 | 
			
		||||
		return _CompressionLevel_name_3
 | 
			
		||||
	case i == 4096:
 | 
			
		||||
		return _CompressionLevel_name_4
 | 
			
		||||
	case i == 8192:
 | 
			
		||||
		return _CompressionLevel_name_5
 | 
			
		||||
	case i == 16384:
 | 
			
		||||
		return _CompressionLevel_name_6
 | 
			
		||||
	case i == 32768:
 | 
			
		||||
		return _CompressionLevel_name_7
 | 
			
		||||
	case i == 65536:
 | 
			
		||||
		return _CompressionLevel_name_8
 | 
			
		||||
	case i == 131072:
 | 
			
		||||
		return _CompressionLevel_name_9
 | 
			
		||||
	default:
 | 
			
		||||
		return "CompressionLevel(" + strconv.FormatInt(int64(i), 10) + ")"
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										243
									
								
								vendor/github.com/pierrec/lz4/v4/reader.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										243
									
								
								vendor/github.com/pierrec/lz4/v4/reader.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,243 @@
 | 
			
		||||
package lz4
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"io"
 | 
			
		||||
 | 
			
		||||
	"github.com/pierrec/lz4/v4/internal/lz4block"
 | 
			
		||||
	"github.com/pierrec/lz4/v4/internal/lz4errors"
 | 
			
		||||
	"github.com/pierrec/lz4/v4/internal/lz4stream"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var readerStates = []aState{
 | 
			
		||||
	noState:     newState,
 | 
			
		||||
	errorState:  newState,
 | 
			
		||||
	newState:    readState,
 | 
			
		||||
	readState:   closedState,
 | 
			
		||||
	closedState: newState,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewReader returns a new LZ4 frame decoder.
 | 
			
		||||
func NewReader(r io.Reader) *Reader {
 | 
			
		||||
	return newReader(r, false)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newReader(r io.Reader, legacy bool) *Reader {
 | 
			
		||||
	zr := &Reader{frame: lz4stream.NewFrame()}
 | 
			
		||||
	zr.state.init(readerStates)
 | 
			
		||||
	_ = zr.Apply(DefaultConcurrency, defaultOnBlockDone)
 | 
			
		||||
	zr.Reset(r)
 | 
			
		||||
	return zr
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Reader allows reading an LZ4 stream.
 | 
			
		||||
type Reader struct {
 | 
			
		||||
	state   _State
 | 
			
		||||
	src     io.Reader        // source reader
 | 
			
		||||
	num     int              // concurrency level
 | 
			
		||||
	frame   *lz4stream.Frame // frame being read
 | 
			
		||||
	data    []byte           // block buffer allocated in non concurrent mode
 | 
			
		||||
	reads   chan []byte      // pending data
 | 
			
		||||
	idx     int              // size of pending data
 | 
			
		||||
	handler func(int)
 | 
			
		||||
	cum     uint32
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (*Reader) private() {}
 | 
			
		||||
 | 
			
		||||
func (r *Reader) Apply(options ...Option) (err error) {
 | 
			
		||||
	defer r.state.check(&err)
 | 
			
		||||
	switch r.state.state {
 | 
			
		||||
	case newState:
 | 
			
		||||
	case errorState:
 | 
			
		||||
		return r.state.err
 | 
			
		||||
	default:
 | 
			
		||||
		return lz4errors.ErrOptionClosedOrError
 | 
			
		||||
	}
 | 
			
		||||
	for _, o := range options {
 | 
			
		||||
		if err = o(r); err != nil {
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Size returns the size of the underlying uncompressed data, if set in the stream.
 | 
			
		||||
func (r *Reader) Size() int {
 | 
			
		||||
	switch r.state.state {
 | 
			
		||||
	case readState, closedState:
 | 
			
		||||
		if r.frame.Descriptor.Flags.Size() {
 | 
			
		||||
			return int(r.frame.Descriptor.ContentSize)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *Reader) isNotConcurrent() bool {
 | 
			
		||||
	return r.num == 1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *Reader) init() error {
 | 
			
		||||
	data, err := r.frame.InitR(r.src, r.num)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	r.reads = data
 | 
			
		||||
	r.idx = 0
 | 
			
		||||
	size := r.frame.Descriptor.Flags.BlockSizeIndex()
 | 
			
		||||
	r.data = size.Get()
 | 
			
		||||
	r.cum = 0
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *Reader) Read(buf []byte) (n int, err error) {
 | 
			
		||||
	defer r.state.check(&err)
 | 
			
		||||
	switch r.state.state {
 | 
			
		||||
	case readState:
 | 
			
		||||
	case closedState, errorState:
 | 
			
		||||
		return 0, r.state.err
 | 
			
		||||
	case newState:
 | 
			
		||||
		// First initialization.
 | 
			
		||||
		if err = r.init(); r.state.next(err) {
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	default:
 | 
			
		||||
		return 0, r.state.fail()
 | 
			
		||||
	}
 | 
			
		||||
	for len(buf) > 0 {
 | 
			
		||||
		var bn int
 | 
			
		||||
		if r.idx == 0 {
 | 
			
		||||
			if r.isNotConcurrent() {
 | 
			
		||||
				bn, err = r.read(buf)
 | 
			
		||||
			} else {
 | 
			
		||||
				lz4block.Put(r.data)
 | 
			
		||||
				r.data = <-r.reads
 | 
			
		||||
				if len(r.data) == 0 {
 | 
			
		||||
					// No uncompressed data: something went wrong or we are done.
 | 
			
		||||
					err = r.frame.Blocks.ErrorR()
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			switch err {
 | 
			
		||||
			case nil:
 | 
			
		||||
			case io.EOF:
 | 
			
		||||
				if er := r.frame.CloseR(r.src); er != nil {
 | 
			
		||||
					err = er
 | 
			
		||||
				}
 | 
			
		||||
				lz4block.Put(r.data)
 | 
			
		||||
				r.data = nil
 | 
			
		||||
				return
 | 
			
		||||
			default:
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		if bn == 0 {
 | 
			
		||||
			// Fill buf with buffered data.
 | 
			
		||||
			bn = copy(buf, r.data[r.idx:])
 | 
			
		||||
			r.idx += bn
 | 
			
		||||
			if r.idx == len(r.data) {
 | 
			
		||||
				// All data read, get ready for the next Read.
 | 
			
		||||
				r.idx = 0
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		buf = buf[bn:]
 | 
			
		||||
		n += bn
 | 
			
		||||
		r.handler(bn)
 | 
			
		||||
	}
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// read uncompresses the next block as follow:
 | 
			
		||||
// - if buf has enough room, the block is uncompressed into it directly
 | 
			
		||||
//   and the lenght of used space is returned
 | 
			
		||||
// - else, the uncompress data is stored in r.data and 0 is returned
 | 
			
		||||
func (r *Reader) read(buf []byte) (int, error) {
 | 
			
		||||
	block := r.frame.Blocks.Block
 | 
			
		||||
	_, err := block.Read(r.frame, r.src, r.cum)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return 0, err
 | 
			
		||||
	}
 | 
			
		||||
	var direct bool
 | 
			
		||||
	dst := r.data[:cap(r.data)]
 | 
			
		||||
	if len(buf) >= len(dst) {
 | 
			
		||||
		// Uncompress directly into buf.
 | 
			
		||||
		direct = true
 | 
			
		||||
		dst = buf
 | 
			
		||||
	}
 | 
			
		||||
	dst, err = block.Uncompress(r.frame, dst, true)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return 0, err
 | 
			
		||||
	}
 | 
			
		||||
	r.cum += uint32(len(dst))
 | 
			
		||||
	if direct {
 | 
			
		||||
		return len(dst), nil
 | 
			
		||||
	}
 | 
			
		||||
	r.data = dst
 | 
			
		||||
	return 0, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Reset clears the state of the Reader r such that it is equivalent to its
 | 
			
		||||
// initial state from NewReader, but instead writing to writer.
 | 
			
		||||
// No access to reader is performed.
 | 
			
		||||
//
 | 
			
		||||
// w.Close must be called before Reset.
 | 
			
		||||
func (r *Reader) Reset(reader io.Reader) {
 | 
			
		||||
	if r.data != nil {
 | 
			
		||||
		lz4block.Put(r.data)
 | 
			
		||||
		r.data = nil
 | 
			
		||||
	}
 | 
			
		||||
	r.frame.Reset(r.num)
 | 
			
		||||
	r.state.reset()
 | 
			
		||||
	r.src = reader
 | 
			
		||||
	r.reads = nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WriteTo efficiently uncompresses the data from the Reader underlying source to w.
 | 
			
		||||
func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
 | 
			
		||||
	switch r.state.state {
 | 
			
		||||
	case closedState, errorState:
 | 
			
		||||
		return 0, r.state.err
 | 
			
		||||
	case newState:
 | 
			
		||||
		if err = r.init(); r.state.next(err) {
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	default:
 | 
			
		||||
		return 0, r.state.fail()
 | 
			
		||||
	}
 | 
			
		||||
	defer r.state.nextd(&err)
 | 
			
		||||
 | 
			
		||||
	var data []byte
 | 
			
		||||
	if r.isNotConcurrent() {
 | 
			
		||||
		size := r.frame.Descriptor.Flags.BlockSizeIndex()
 | 
			
		||||
		data = size.Get()
 | 
			
		||||
		defer lz4block.Put(data)
 | 
			
		||||
	}
 | 
			
		||||
	for {
 | 
			
		||||
		var bn int
 | 
			
		||||
		var dst []byte
 | 
			
		||||
		if r.isNotConcurrent() {
 | 
			
		||||
			bn, err = r.read(data)
 | 
			
		||||
			dst = data[:bn]
 | 
			
		||||
		} else {
 | 
			
		||||
			lz4block.Put(dst)
 | 
			
		||||
			dst = <-r.reads
 | 
			
		||||
			bn = len(dst)
 | 
			
		||||
			if bn == 0 {
 | 
			
		||||
				// No uncompressed data: something went wrong or we are done.
 | 
			
		||||
				err = r.frame.Blocks.ErrorR()
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		switch err {
 | 
			
		||||
		case nil:
 | 
			
		||||
		case io.EOF:
 | 
			
		||||
			err = r.frame.CloseR(r.src)
 | 
			
		||||
			return
 | 
			
		||||
		default:
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
		r.handler(bn)
 | 
			
		||||
		bn, err = w.Write(dst)
 | 
			
		||||
		n += int64(bn)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										75
									
								
								vendor/github.com/pierrec/lz4/v4/state.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										75
									
								
								vendor/github.com/pierrec/lz4/v4/state.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,75 @@
 | 
			
		||||
package lz4
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io"
 | 
			
		||||
 | 
			
		||||
	"github.com/pierrec/lz4/v4/internal/lz4errors"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
//go:generate go run golang.org/x/tools/cmd/stringer -type=aState -output state_gen.go
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	noState     aState = iota // uninitialized reader
 | 
			
		||||
	errorState                // unrecoverable error encountered
 | 
			
		||||
	newState                  // instantiated object
 | 
			
		||||
	readState                 // reading data
 | 
			
		||||
	writeState                // writing data
 | 
			
		||||
	closedState               // all done
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type (
 | 
			
		||||
	aState uint8
 | 
			
		||||
	_State struct {
 | 
			
		||||
		states []aState
 | 
			
		||||
		state  aState
 | 
			
		||||
		err    error
 | 
			
		||||
	}
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func (s *_State) init(states []aState) {
 | 
			
		||||
	s.states = states
 | 
			
		||||
	s.state = states[0]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *_State) reset() {
 | 
			
		||||
	s.state = s.states[0]
 | 
			
		||||
	s.err = nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// next sets the state to the next one unless it is passed a non nil error.
 | 
			
		||||
// It returns whether or not it is in error.
 | 
			
		||||
func (s *_State) next(err error) bool {
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		s.err = fmt.Errorf("%s: %w", s.state, err)
 | 
			
		||||
		s.state = errorState
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
	s.state = s.states[s.state]
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// nextd is like next but for defers.
 | 
			
		||||
func (s *_State) nextd(errp *error) bool {
 | 
			
		||||
	return errp != nil && s.next(*errp)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// check sets s in error if not already in error and if the error is not nil or io.EOF,
 | 
			
		||||
func (s *_State) check(errp *error) {
 | 
			
		||||
	if s.state == errorState || errp == nil {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	if err := *errp; err != nil {
 | 
			
		||||
		s.err = fmt.Errorf("%w[%s]", err, s.state)
 | 
			
		||||
		if !errors.Is(err, io.EOF) {
 | 
			
		||||
			s.state = errorState
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *_State) fail() error {
 | 
			
		||||
	s.state = errorState
 | 
			
		||||
	s.err = fmt.Errorf("%w[%s]", lz4errors.ErrInternalUnhandledState, s.state)
 | 
			
		||||
	return s.err
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										28
									
								
								vendor/github.com/pierrec/lz4/v4/state_gen.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								vendor/github.com/pierrec/lz4/v4/state_gen.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,28 @@
 | 
			
		||||
// Code generated by "stringer -type=aState -output state_gen.go"; DO NOT EDIT.
 | 
			
		||||
 | 
			
		||||
package lz4
 | 
			
		||||
 | 
			
		||||
import "strconv"
 | 
			
		||||
 | 
			
		||||
func _() {
 | 
			
		||||
	// An "invalid array index" compiler error signifies that the constant values have changed.
 | 
			
		||||
	// Re-run the stringer command to generate them again.
 | 
			
		||||
	var x [1]struct{}
 | 
			
		||||
	_ = x[noState-0]
 | 
			
		||||
	_ = x[errorState-1]
 | 
			
		||||
	_ = x[newState-2]
 | 
			
		||||
	_ = x[readState-3]
 | 
			
		||||
	_ = x[writeState-4]
 | 
			
		||||
	_ = x[closedState-5]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const _aState_name = "noStateerrorStatenewStatereadStatewriteStateclosedState"
 | 
			
		||||
 | 
			
		||||
var _aState_index = [...]uint8{0, 7, 17, 25, 34, 44, 55}
 | 
			
		||||
 | 
			
		||||
func (i aState) String() string {
 | 
			
		||||
	if i >= aState(len(_aState_index)-1) {
 | 
			
		||||
		return "aState(" + strconv.FormatInt(int64(i), 10) + ")"
 | 
			
		||||
	}
 | 
			
		||||
	return _aState_name[_aState_index[i]:_aState_index[i+1]]
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										233
									
								
								vendor/github.com/pierrec/lz4/v4/writer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										233
									
								
								vendor/github.com/pierrec/lz4/v4/writer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,233 @@
 | 
			
		||||
package lz4
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"io"
 | 
			
		||||
 | 
			
		||||
	"github.com/pierrec/lz4/v4/internal/lz4block"
 | 
			
		||||
	"github.com/pierrec/lz4/v4/internal/lz4errors"
 | 
			
		||||
	"github.com/pierrec/lz4/v4/internal/lz4stream"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var writerStates = []aState{
 | 
			
		||||
	noState:     newState,
 | 
			
		||||
	newState:    writeState,
 | 
			
		||||
	writeState:  closedState,
 | 
			
		||||
	closedState: newState,
 | 
			
		||||
	errorState:  newState,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewWriter returns a new LZ4 frame encoder.
 | 
			
		||||
func NewWriter(w io.Writer) *Writer {
 | 
			
		||||
	zw := &Writer{frame: lz4stream.NewFrame()}
 | 
			
		||||
	zw.state.init(writerStates)
 | 
			
		||||
	_ = zw.Apply(DefaultBlockSizeOption, DefaultChecksumOption, DefaultConcurrency, defaultOnBlockDone)
 | 
			
		||||
	zw.Reset(w)
 | 
			
		||||
	return zw
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Writer allows writing an LZ4 stream.
 | 
			
		||||
type Writer struct {
 | 
			
		||||
	state   _State
 | 
			
		||||
	src     io.Writer                 // destination writer
 | 
			
		||||
	level   lz4block.CompressionLevel // how hard to try
 | 
			
		||||
	num     int                       // concurrency level
 | 
			
		||||
	frame   *lz4stream.Frame          // frame being built
 | 
			
		||||
	data    []byte                    // pending data
 | 
			
		||||
	idx     int                       // size of pending data
 | 
			
		||||
	handler func(int)
 | 
			
		||||
	legacy  bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (*Writer) private() {}
 | 
			
		||||
 | 
			
		||||
func (w *Writer) Apply(options ...Option) (err error) {
 | 
			
		||||
	defer w.state.check(&err)
 | 
			
		||||
	switch w.state.state {
 | 
			
		||||
	case newState:
 | 
			
		||||
	case errorState:
 | 
			
		||||
		return w.state.err
 | 
			
		||||
	default:
 | 
			
		||||
		return lz4errors.ErrOptionClosedOrError
 | 
			
		||||
	}
 | 
			
		||||
	for _, o := range options {
 | 
			
		||||
		if err = o(w); err != nil {
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	w.Reset(w.src)
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (w *Writer) isNotConcurrent() bool {
 | 
			
		||||
	return w.num == 1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// init sets up the Writer when in newState. It does not change the Writer state.
 | 
			
		||||
func (w *Writer) init() error {
 | 
			
		||||
	w.frame.InitW(w.src, w.num, w.legacy)
 | 
			
		||||
	if true || !w.isNotConcurrent() {
 | 
			
		||||
		size := w.frame.Descriptor.Flags.BlockSizeIndex()
 | 
			
		||||
		w.data = size.Get()
 | 
			
		||||
	}
 | 
			
		||||
	w.idx = 0
 | 
			
		||||
	return w.frame.Descriptor.Write(w.frame, w.src)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (w *Writer) Write(buf []byte) (n int, err error) {
 | 
			
		||||
	defer w.state.check(&err)
 | 
			
		||||
	switch w.state.state {
 | 
			
		||||
	case writeState:
 | 
			
		||||
	case closedState, errorState:
 | 
			
		||||
		return 0, w.state.err
 | 
			
		||||
	case newState:
 | 
			
		||||
		if err = w.init(); w.state.next(err) {
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	default:
 | 
			
		||||
		return 0, w.state.fail()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	zn := len(w.data)
 | 
			
		||||
	for len(buf) > 0 {
 | 
			
		||||
		if w.idx == 0 && len(buf) >= zn {
 | 
			
		||||
			// Avoid a copy as there is enough data for a block.
 | 
			
		||||
			if err = w.write(buf[:zn], false); err != nil {
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
			n += zn
 | 
			
		||||
			buf = buf[zn:]
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		// Accumulate the data to be compressed.
 | 
			
		||||
		m := copy(w.data[w.idx:], buf)
 | 
			
		||||
		n += m
 | 
			
		||||
		w.idx += m
 | 
			
		||||
		buf = buf[m:]
 | 
			
		||||
 | 
			
		||||
		if w.idx < len(w.data) {
 | 
			
		||||
			// Buffer not filled.
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Buffer full.
 | 
			
		||||
		if err = w.write(w.data, true); err != nil {
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
		if !w.isNotConcurrent() {
 | 
			
		||||
			size := w.frame.Descriptor.Flags.BlockSizeIndex()
 | 
			
		||||
			w.data = size.Get()
 | 
			
		||||
		}
 | 
			
		||||
		w.idx = 0
 | 
			
		||||
	}
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (w *Writer) write(data []byte, safe bool) error {
 | 
			
		||||
	if w.isNotConcurrent() {
 | 
			
		||||
		block := w.frame.Blocks.Block
 | 
			
		||||
		err := block.Compress(w.frame, data, w.level).Write(w.frame, w.src)
 | 
			
		||||
		w.handler(len(block.Data))
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	c := make(chan *lz4stream.FrameDataBlock)
 | 
			
		||||
	w.frame.Blocks.Blocks <- c
 | 
			
		||||
	go func(c chan *lz4stream.FrameDataBlock, data []byte, safe bool) {
 | 
			
		||||
		b := lz4stream.NewFrameDataBlock(w.frame)
 | 
			
		||||
		c <- b.Compress(w.frame, data, w.level)
 | 
			
		||||
		<-c
 | 
			
		||||
		w.handler(len(b.Data))
 | 
			
		||||
		b.Close(w.frame)
 | 
			
		||||
		if safe {
 | 
			
		||||
			// safe to put it back as the last usage of it was FrameDataBlock.Write() called before c is closed
 | 
			
		||||
			lz4block.Put(data)
 | 
			
		||||
		}
 | 
			
		||||
	}(c, data, safe)
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Close closes the Writer, flushing any unwritten data to the underlying io.Writer,
 | 
			
		||||
// but does not close the underlying io.Writer.
 | 
			
		||||
func (w *Writer) Close() (err error) {
 | 
			
		||||
	switch w.state.state {
 | 
			
		||||
	case writeState:
 | 
			
		||||
	case errorState:
 | 
			
		||||
		return w.state.err
 | 
			
		||||
	default:
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	defer w.state.nextd(&err)
 | 
			
		||||
	if w.idx > 0 {
 | 
			
		||||
		// Flush pending data, disable w.data freeing as it is done later on.
 | 
			
		||||
		if err = w.write(w.data[:w.idx], false); err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		w.idx = 0
 | 
			
		||||
	}
 | 
			
		||||
	err = w.frame.CloseW(w.src, w.num)
 | 
			
		||||
	// It is now safe to free the buffer.
 | 
			
		||||
	if w.data != nil {
 | 
			
		||||
		lz4block.Put(w.data)
 | 
			
		||||
		w.data = nil
 | 
			
		||||
	}
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Reset clears the state of the Writer w such that it is equivalent to its
 | 
			
		||||
// initial state from NewWriter, but instead writing to writer.
 | 
			
		||||
// Reset keeps the previous options unless overwritten by the supplied ones.
 | 
			
		||||
// No access to writer is performed.
 | 
			
		||||
//
 | 
			
		||||
// w.Close must be called before Reset or pending data may be dropped.
 | 
			
		||||
func (w *Writer) Reset(writer io.Writer) {
 | 
			
		||||
	w.frame.Reset(w.num)
 | 
			
		||||
	w.state.reset()
 | 
			
		||||
	w.src = writer
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ReadFrom efficiently reads from r and compressed into the Writer destination.
 | 
			
		||||
func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) {
 | 
			
		||||
	switch w.state.state {
 | 
			
		||||
	case closedState, errorState:
 | 
			
		||||
		return 0, w.state.err
 | 
			
		||||
	case newState:
 | 
			
		||||
		if err = w.init(); w.state.next(err) {
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	default:
 | 
			
		||||
		return 0, w.state.fail()
 | 
			
		||||
	}
 | 
			
		||||
	defer w.state.check(&err)
 | 
			
		||||
 | 
			
		||||
	size := w.frame.Descriptor.Flags.BlockSizeIndex()
 | 
			
		||||
	var done bool
 | 
			
		||||
	var rn int
 | 
			
		||||
	data := size.Get()
 | 
			
		||||
	if w.isNotConcurrent() {
 | 
			
		||||
		// Keep the same buffer for the whole process.
 | 
			
		||||
		defer lz4block.Put(data)
 | 
			
		||||
	}
 | 
			
		||||
	for !done {
 | 
			
		||||
		rn, err = io.ReadFull(r, data)
 | 
			
		||||
		switch err {
 | 
			
		||||
		case nil:
 | 
			
		||||
		case io.EOF, io.ErrUnexpectedEOF: // read may be partial
 | 
			
		||||
			done = true
 | 
			
		||||
		default:
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
		n += int64(rn)
 | 
			
		||||
		err = w.write(data[:rn], true)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
		w.handler(rn)
 | 
			
		||||
		if !done && !w.isNotConcurrent() {
 | 
			
		||||
			// The buffer will be returned automatically by go routines (safe=true)
 | 
			
		||||
			// so get a new one fo the next round.
 | 
			
		||||
			data = size.Get()
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	err = w.Close()
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
		Reference in New Issue
	
	Block a user