diff options
author | 6543 <6543@obermui.de> | 2020-02-28 10:51:18 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2020-02-28 10:51:18 +0100 |
commit | 8d2059a20184842d9a7a573d285956dd998d42c4 (patch) | |
tree | ea957536a3d71dc9a98b793fe9b0e8c89252ea12 | |
parent | 694f44660f51aa2e8be920c49461380b7db64755 (diff) | |
download | gitea-8d2059a20184842d9a7a573d285956dd998d42c4.tar.gz gitea-8d2059a20184842d9a7a573d285956dd998d42c4.zip |
update: macaron cores,gzip,session (#10522)
Co-authored-by: zeripath <art27@cantab.net>
47 files changed, 2154 insertions, 349 deletions
@@ -8,13 +8,13 @@ require ( gitea.com/macaron/binding v0.0.0-20190822013154-a5f53841ed2b gitea.com/macaron/cache v0.0.0-20190822004001-a6e7fee4ee76 gitea.com/macaron/captcha v0.0.0-20190822015246-daa973478bae - gitea.com/macaron/cors v0.0.0-20190821152825-7dcef4a17175 + gitea.com/macaron/cors v0.0.0-20190826180238-95aec09ea8b4 gitea.com/macaron/csrf v0.0.0-20190822024205-3dc5a4474439 - gitea.com/macaron/gzip v0.0.0-20191118033930-0c4c5566a0e5 + gitea.com/macaron/gzip v0.0.0-20191118041502-506895b47aae gitea.com/macaron/i18n v0.0.0-20190822004228-474e714e2223 gitea.com/macaron/inject v0.0.0-20190805023432-d4c86e31027a gitea.com/macaron/macaron v1.4.0 - gitea.com/macaron/session v0.0.0-20190821211443-122c47c5f705 + gitea.com/macaron/session v0.0.0-20191207215012-613cebf0674d gitea.com/macaron/toolbox v0.0.0-20190822013122-05ff0fc766b7 github.com/PuerkitoBio/goquery v1.5.0 github.com/RoaringBitmap/roaring v0.4.21 // indirect @@ -25,7 +25,6 @@ require ( github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f // indirect github.com/boombuler/barcode v0.0.0-20161226211916-fe0f26ff6d26 // indirect github.com/couchbase/gomemcached v0.0.0-20191004160342-7b5da2ec40b2 // indirect - github.com/couchbase/goutils v0.0.0-20191018232750-b49639060d85 // indirect github.com/couchbase/vellum v0.0.0-20190829182332-ef2e028c01fd // indirect github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d // indirect github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect @@ -59,7 +58,7 @@ require ( github.com/joho/godotenv v1.3.0 // indirect github.com/kballard/go-shellquote v0.0.0-20170619183022-cd60e84ee657 github.com/keybase/go-crypto v0.0.0-20200123153347-de78d2cb44f4 - github.com/klauspost/compress v1.9.2 + github.com/klauspost/compress v1.10.2 github.com/lafriks/xormstore v1.3.2 github.com/lib/pq v1.2.0 github.com/lunny/dingtalk_webhook v0.0.0-20171025031554-e3534c89ef96 @@ -99,7 +98,7 @@ require ( github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53 github.com/yuin/goldmark v1.1.19 go.etcd.io/bbolt v1.3.3 // indirect - golang.org/x/crypto v0.0.0-20200219234226-1ad67e1f0ef4 + golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 golang.org/x/sys v0.0.0-20200219091948-cb0a6d8edb6c @@ -108,7 +107,7 @@ require ( gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect gopkg.in/asn1-ber.v1 v1.0.0-20150924051756-4e86f4367175 // indirect gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df - gopkg.in/ini.v1 v1.51.1 + gopkg.in/ini.v1 v1.52.0 gopkg.in/ldap.v3 v3.0.2 gopkg.in/src-d/go-billy.v4 v4.3.2 gopkg.in/src-d/go-git.v4 v4.13.1 @@ -17,12 +17,12 @@ gitea.com/macaron/cache v0.0.0-20190822004001-a6e7fee4ee76 h1:mMsMEg90c5KXQgRWsH gitea.com/macaron/cache v0.0.0-20190822004001-a6e7fee4ee76/go.mod h1:NFHb9Of+LUnU86bU20CiXXg6ZlgCJ4XytP14UsHOXFs= gitea.com/macaron/captcha v0.0.0-20190822015246-daa973478bae h1:9C31eOCpMPbW9rDVq8M1UJ+5HZVYA38HHaKCVcRYDpI= gitea.com/macaron/captcha v0.0.0-20190822015246-daa973478bae/go.mod h1:J5h3N+1nKTXtU1x4GxexaQKgAz8UiWecNwi/CfX7CtQ= -gitea.com/macaron/cors v0.0.0-20190821152825-7dcef4a17175 h1:ikzdAGB6SsUGByW5wKlK+JwzfgQHX+GJnBwEfsaCTNY= -gitea.com/macaron/cors v0.0.0-20190821152825-7dcef4a17175/go.mod h1:rtOK4J20kpMD9XcNsnO5YA843YSTe/MUMbDj/TJ/Q7A= +gitea.com/macaron/cors v0.0.0-20190826180238-95aec09ea8b4 h1:e2rAFDejB0qN8OrY4xP4XSu8/yT6QmWxDZpB3J7r2GU= +gitea.com/macaron/cors v0.0.0-20190826180238-95aec09ea8b4/go.mod h1:rtOK4J20kpMD9XcNsnO5YA843YSTe/MUMbDj/TJ/Q7A= gitea.com/macaron/csrf v0.0.0-20190822024205-3dc5a4474439 h1:88c34YM29a1GlWLrLBaG/GTT2htDdJz1u3n9+lmPolg= gitea.com/macaron/csrf v0.0.0-20190822024205-3dc5a4474439/go.mod h1:IsQPHx73HnnqFBYiVHjg87q4XBZyGXXu77xANukvZuk= -gitea.com/macaron/gzip v0.0.0-20191118033930-0c4c5566a0e5 h1:G/a7r0r2jEelSynBlv1+PAEZQKfsdRHQUMb1PlNvemM= -gitea.com/macaron/gzip v0.0.0-20191118033930-0c4c5566a0e5/go.mod h1:jGHtoovArcQj+sw7NJxyPgjuRxOSG9a/oFu3VkLRTKQ= +gitea.com/macaron/gzip v0.0.0-20191118041502-506895b47aae h1:OXxYwGmGNfYrC0/sUUL9KSvr2Sfvmzwgd2YD65vIjGE= +gitea.com/macaron/gzip v0.0.0-20191118041502-506895b47aae/go.mod h1:jGHtoovArcQj+sw7NJxyPgjuRxOSG9a/oFu3VkLRTKQ= gitea.com/macaron/i18n v0.0.0-20190822004228-474e714e2223 h1:iZWwQif/LHMjBgfY/ua8CFVa4XMDfbbs7EZ0Q1dYguU= gitea.com/macaron/i18n v0.0.0-20190822004228-474e714e2223/go.mod h1:+qsc10s4hBsHKU/9luGGumFh4m5FFVc7uih+8/mM1NY= gitea.com/macaron/inject v0.0.0-20190803172902-8375ba841591/go.mod h1:h6E4kLao1Yko6DOU6QDnQPcuoNzvbZqzj2mtPcEn1aM= @@ -35,6 +35,8 @@ gitea.com/macaron/macaron v1.4.0 h1:FY1QDGqyuUzs21K6ChkbYbRUfwL7v2aUrhNEJ0IgsAw= gitea.com/macaron/macaron v1.4.0/go.mod h1:P7hfDbQjcW22lkYkXlxdRIfWOXxH2+K4EogN4Q0UlLY= gitea.com/macaron/session v0.0.0-20190821211443-122c47c5f705 h1:mvkQGAlON1Z6Y8pqa/+FpYIskk54mazuECUfZK5oTg0= gitea.com/macaron/session v0.0.0-20190821211443-122c47c5f705/go.mod h1:1ujH0jD6Ca4iK9NL0Q2a7fG2chvXx5hVa7hBfABwpkA= +gitea.com/macaron/session v0.0.0-20191207215012-613cebf0674d h1:XLww3CvnFZkXVwauN67fniDaIpIqsE+9KVcxlZKlvLU= +gitea.com/macaron/session v0.0.0-20191207215012-613cebf0674d/go.mod h1:FanKy3WjWb5iw/iZBPk4ggoQT9FcM6bkBPvmDmsH6tY= gitea.com/macaron/toolbox v0.0.0-20190822013122-05ff0fc766b7 h1:N9QFoeNsUXLhl14mefLzGluqV7w2mGU3u+iZU+jCeWk= gitea.com/macaron/toolbox v0.0.0-20190822013122-05ff0fc766b7/go.mod h1:kgsbFPPS4P+acDYDOPDa3N4IWWOuDJt5/INKRUz7aks= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= @@ -349,6 +351,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCWIWY= github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.2 h1:Znfn6hXZAHaLPNnlqUYRrBSReFHYybslgv4PTiyz6P0= +github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= @@ -602,8 +606,8 @@ golang.org/x/crypto v0.0.0-20190907121410-71b5226ff739/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad h1:5E5raQxcv+6CZ11RrBYQe5WRbUIWpScjh0kvHZkZIrQ= golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200219234226-1ad67e1f0ef4 h1:4icQlpeqbz3WxfgP6Eq3szTj95KTrlH/CwzBzoxuFd0= -golang.org/x/crypto v0.0.0-20200219234226-1ad67e1f0ef4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= @@ -746,8 +750,8 @@ gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.44.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.44.2/go.mod h1:M3Cogqpuv0QCi3ExAY5V4uOt4qb/R3xZubo9m8lK5wg= gopkg.in/ini.v1 v1.46.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.51.1 h1:GyboHr4UqMiLUybYjd22ZjQIKEJEpgtLXtuGbR21Oho= -gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.52.0 h1:j+Lt/M1oPPejkniCg1TkWE2J3Eh1oZTsHSXzMTzUXn4= +gopkg.in/ini.v1 v1.52.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ldap.v3 v3.0.2 h1:R6RBtabK6e1GO0eQKtkyOFbAHO73QesLzI2w2DZ6b9w= gopkg.in/ldap.v3 v3.0.2/go.mod h1:oxD7NyBuxchC+SgJDE1Q5Od05eGt29SDQVBmV+HYbzw= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= diff --git a/vendor/gitea.com/macaron/cors/cors.go b/vendor/gitea.com/macaron/cors/cors.go index ed00df1e7b..2d0613f2bc 100644 --- a/vendor/gitea.com/macaron/cors/cors.go +++ b/vendor/gitea.com/macaron/cors/cors.go @@ -151,7 +151,8 @@ func CORS(options ...Options) macaron.Handler { } }) if reqOptions { - ctx.Status(200) // return response + ctx.Resp.WriteHeader(200) // return response + return } } } diff --git a/vendor/gitea.com/macaron/gzip/.drone.yml b/vendor/gitea.com/macaron/gzip/.drone.yml new file mode 100644 index 0000000000..e55afaac04 --- /dev/null +++ b/vendor/gitea.com/macaron/gzip/.drone.yml @@ -0,0 +1,24 @@ +kind: pipeline +name: go1-1-2 + +steps: +- name: test + image: golang:1.12 + environment: + GOPROXY: https://goproxy.cn + commands: + - go build -v + - go test -v -race -coverprofile=coverage.txt -covermode=atomic + +--- +kind: pipeline +name: go1-1-3 + +steps: +- name: test + image: golang:1.13 + environment: + GOPROXY: https://goproxy.cn + commands: + - go build -v + - go test -v -race -coverprofile=coverage.txt -covermode=atomic
\ No newline at end of file diff --git a/vendor/gitea.com/macaron/gzip/README.md b/vendor/gitea.com/macaron/gzip/README.md new file mode 100644 index 0000000000..e09c4a9d8c --- /dev/null +++ b/vendor/gitea.com/macaron/gzip/README.md @@ -0,0 +1,19 @@ +# gzip + +Middleware gzip provides gzip comparess middleware for [Macaron](https://gitea.com/macaron/macaron). + +### Installation + + go get gitea.com/macaron/gzip + +## Getting Help + +- [API Reference](https://godoc.org/gitea.com/macaron/gzip) + +## Credits + +This package is a modified version of [go-macaron gzip](github.com/go-macaron/gzip). + +## License + +This project is under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for the full license text.
\ No newline at end of file diff --git a/vendor/gitea.com/macaron/session/go.mod b/vendor/gitea.com/macaron/session/go.mod index 626199661d..e161b8e165 100644 --- a/vendor/gitea.com/macaron/session/go.mod +++ b/vendor/gitea.com/macaron/session/go.mod @@ -6,7 +6,7 @@ require ( gitea.com/macaron/macaron v1.3.3-0.20190821202302-9646c0587edb github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d // indirect - github.com/couchbase/goutils v0.0.0-20190315194238-f9d42b11473b // indirect + github.com/couchbase/goutils v0.0.0-20191018232750-b49639060d85 // indirect github.com/couchbaselabs/go-couchbase v0.0.0-20190708161019-23e7ca2ce2b7 github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76 // indirect github.com/edsrzf/mmap-go v1.0.0 // indirect diff --git a/vendor/gitea.com/macaron/session/go.sum b/vendor/gitea.com/macaron/session/go.sum index dc7dc6f34a..38d5f5f226 100644 --- a/vendor/gitea.com/macaron/session/go.sum +++ b/vendor/gitea.com/macaron/session/go.sum @@ -8,8 +8,8 @@ github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 h1:U/lr3Dgy4WK github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d h1:XMf4E1U+b9E3ElF0mjvfXZdflBRZz4gLp16nQ/QSHQM= github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= -github.com/couchbase/goutils v0.0.0-20190315194238-f9d42b11473b h1:bZ9rKU2/V8sY+NulSfxDOnXTWcs1rySqdF1sVepihvo= -github.com/couchbase/goutils v0.0.0-20190315194238-f9d42b11473b/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= +github.com/couchbase/goutils v0.0.0-20191018232750-b49639060d85 h1:0WMIDtuXCKEm4wtAJgAAXa/qtM5O9MariLwgHaRlYmk= +github.com/couchbase/goutils v0.0.0-20191018232750-b49639060d85/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= github.com/couchbaselabs/go-couchbase v0.0.0-20190708161019-23e7ca2ce2b7 h1:1XjEY/gnjQ+AfXef2U6dxCquhiRzkEpxZuWqs+QxTL8= github.com/couchbaselabs/go-couchbase v0.0.0-20190708161019-23e7ca2ce2b7/go.mod h1:mby/05p8HE5yHEAKiIH/555NoblMs7PtW6NrYshDruc= github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76 h1:Lgdd/Qp96Qj8jqLpq2cI1I1X7BJnu06efS+XkhRoLUQ= diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 20c94f5968..2b101d26b2 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -48,6 +48,8 @@ const ( maxHashOffset = 1 << 24 skipNever = math.MaxInt32 + + debugDeflate = false ) type compressionLevel struct { @@ -59,15 +61,13 @@ type compressionLevel struct { // See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ var levels = []compressionLevel{ {}, // 0 - // Level 1-4 uses specialized algorithm - values not used + // Level 1-6 uses specialized algorithm - values not used {0, 0, 0, 0, 0, 1}, {0, 0, 0, 0, 0, 2}, {0, 0, 0, 0, 0, 3}, {0, 0, 0, 0, 0, 4}, - // For levels 5-6 we don't bother trying with lazy matches. - // Lazy matching is at least 30% slower, with 1.5% increase. - {6, 0, 12, 8, 12, 5}, - {8, 0, 24, 16, 16, 6}, + {0, 0, 0, 0, 0, 5}, + {0, 0, 0, 0, 0, 6}, // Levels 7-9 use increasingly more lazy matching // and increasingly stringent conditions for "good enough". {8, 8, 24, 16, skipNever, 7}, @@ -203,9 +203,8 @@ func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { // This is much faster than doing a full encode. // Should only be used after a start/reset. func (d *compressor) fillWindow(b []byte) { - // Do not fill window if we are in store-only mode, - // use constant or Snappy compression. - if d.level == 0 { + // Do not fill window if we are in store-only or huffman mode. + if d.level <= 0 { return } if d.fast != nil { @@ -368,7 +367,7 @@ func (d *compressor) deflateLazy() { // Sanity enables additional runtime tests. // It's intended to be used during development // to supplement the currently ad-hoc unit tests. - const sanity = false + const sanity = debugDeflate if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { return @@ -644,7 +643,7 @@ func (d *compressor) init(w io.Writer, level int) (err error) { d.fill = (*compressor).fillBlock d.step = (*compressor).store case level == ConstantCompression: - d.w.logReusePenalty = uint(4) + d.w.logNewTablePenalty = 4 d.window = make([]byte, maxStoreBlockSize) d.fill = (*compressor).fillBlock d.step = (*compressor).storeHuff @@ -652,13 +651,13 @@ func (d *compressor) init(w io.Writer, level int) (err error) { level = 5 fallthrough case level >= 1 && level <= 6: - d.w.logReusePenalty = uint(level + 1) + d.w.logNewTablePenalty = 6 d.fast = newFastEnc(level) d.window = make([]byte, maxStoreBlockSize) d.fill = (*compressor).fillBlock d.step = (*compressor).storeFast case 7 <= level && level <= 9: - d.w.logReusePenalty = uint(level) + d.w.logNewTablePenalty = 10 d.state = &advancedState{} d.compressionLevel = levels[level] d.initDeflate() @@ -667,6 +666,7 @@ func (d *compressor) init(w io.Writer, level int) (err error) { default: return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) } + d.level = level return nil } @@ -720,6 +720,7 @@ func (d *compressor) close() error { return d.w.err } d.w.flush() + d.w.reset(nil) return d.w.err } @@ -750,8 +751,7 @@ func NewWriter(w io.Writer, level int) (*Writer, error) { // can only be decompressed by a Reader initialized with the // same dictionary. func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { - dw := &dictWriter{w} - zw, err := NewWriter(dw, level) + zw, err := NewWriter(w, level) if err != nil { return nil, err } @@ -760,14 +760,6 @@ func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { return zw, err } -type dictWriter struct { - w io.Writer -} - -func (w *dictWriter) Write(b []byte) (n int, err error) { - return w.w.Write(b) -} - // A Writer takes data written to it and writes the compressed // form of that data to an underlying writer (see NewWriter). type Writer struct { @@ -805,11 +797,12 @@ func (w *Writer) Close() error { // the result of NewWriter or NewWriterDict called with dst // and w's level and dictionary. func (w *Writer) Reset(dst io.Writer) { - if dw, ok := w.d.w.writer.(*dictWriter); ok { + if len(w.dict) > 0 { // w was created with NewWriterDict - dw.w = dst - w.d.reset(dw) - w.d.fillWindow(w.dict) + w.d.reset(dst) + if dst != nil { + w.d.fillWindow(w.dict) + } } else { // w was created with NewWriter w.d.reset(dst) diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index b0a470f92e..6d4c1e98bc 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -35,17 +35,17 @@ func newFastEnc(level int) fastEnc { } const ( - tableBits = 16 // Bits used in the table + tableBits = 15 // Bits used in the table tableSize = 1 << tableBits // Size of the table tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. baseMatchOffset = 1 // The smallest match offset baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 maxMatchOffset = 1 << 15 // The largest match offset - bTableBits = 18 // Bits used in the big tables - bTableSize = 1 << bTableBits // Size of the table - allocHistory = maxMatchOffset * 10 // Size to preallocate for history. - bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize // Reset the buffer offset when reaching this. + bTableBits = 17 // Bits used in the big tables + bTableSize = 1 << bTableBits // Size of the table + allocHistory = maxStoreBlockSize * 10 // Size to preallocate for history. + bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this. ) const ( @@ -92,7 +92,6 @@ func hash(u uint32) uint32 { } type tableEntry struct { - val uint32 offset int32 } @@ -210,16 +209,14 @@ func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { // Reset the encoding table. func (e *fastGen) Reset() { - if cap(e.hist) < int(maxMatchOffset*8) { - l := maxMatchOffset * 8 - // Make it at least 1MB. - if l < 1<<20 { - l = 1 << 20 - } - e.hist = make([]byte, 0, l) + if cap(e.hist) < allocHistory { + e.hist = make([]byte, 0, allocHistory) + } + // We offset current position so everything will be out of reach. + // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. + if e.cur <= bufferReset { + e.cur += maxMatchOffset + int32(len(e.hist)) } - // We offset current position so everything will be out of reach - e.cur += maxMatchOffset + int32(len(e.hist)) e.hist = e.hist[:0] } diff --git a/vendor/github.com/klauspost/compress/flate/gen_inflate.go b/vendor/github.com/klauspost/compress/flate/gen_inflate.go new file mode 100644 index 0000000000..c74a95fe7f --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/gen_inflate.go @@ -0,0 +1,274 @@ +// +build generate + +//go:generate go run $GOFILE && gofmt -w inflate_gen.go + +package main + +import ( + "os" + "strings" +) + +func main() { + f, err := os.Create("inflate_gen.go") + if err != nil { + panic(err) + } + defer f.Close() + types := []string{"*bytes.Buffer", "*bytes.Reader", "*bufio.Reader", "*strings.Reader"} + names := []string{"BytesBuffer", "BytesReader", "BufioReader", "StringsReader"} + imports := []string{"bytes", "bufio", "io", "strings", "math/bits"} + f.WriteString(`// Code generated by go generate gen_inflate.go. DO NOT EDIT. + +package flate + +import ( +`) + + for _, imp := range imports { + f.WriteString("\t\"" + imp + "\"\n") + } + f.WriteString(")\n\n") + + template := ` + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) $FUNCNAME$() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.($TYPE$) + moreBits := func() error { + c, err := fr.ReadByte() + if err != nil { + return noEOF(err) + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + return nil + } + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := fr.ReadByte() + if err != nil { + f.b = b + f.nb = nb + f.err = noEOF(err) + return + } + f.roffset++ + b |= uint32(c) << (nb & 31) + nb += 8 + } + chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + f.b = b >> (n & 31) + f.nb = nb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var n uint // number of bits extra + var length int + var err error + switch { + case v < 256: + f.dict.writeByte(byte(v)) + if f.dict.availWrite() == 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).$FUNCNAME$ + f.stepState = stateInit + return + } + goto readLiteral + case v == 256: + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + n = 0 + case v < 269: + length = v*2 - (265*2 - 11) + n = 1 + case v < 273: + length = v*4 - (269*4 - 19) + n = 2 + case v < 277: + length = v*8 - (273*8 - 35) + n = 3 + case v < 281: + length = v*16 - (277*16 - 67) + n = 4 + case v < 285: + length = v*32 - (281*32 - 131) + n = 5 + case v < maxNumLit: + length = 258 + n = 0 + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + return + } + if n > 0 { + for f.nb < n { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + } + length += int(f.b & uint32(1<<n-1)) + f.b >>= n + f.nb -= n + } + + var dist int + if f.hd == nil { + for f.nb < 5 { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + } + dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + f.b >>= 5 + f.nb -= 5 + } else { + if dist, err = f.huffSym(f.hd); err != nil { + if debugDecode { + fmt.Println("huffsym:", err) + } + f.err = err + return + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << nb + for f.nb < nb { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<nb:", err) + } + f.err = err + return + } + } + extra |= int(f.b & uint32(1<<nb-1)) + f.b >>= nb + f.nb -= nb + dist = 1<<(nb+1) + 1 + extra + default: + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > f.dict.histSize() { + if debugDecode { + fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, dist + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).$FUNCNAME$ // We need to continue this work + f.stepState = stateDict + return + } + goto readLiteral + } +} + +` + for i, t := range types { + s := strings.Replace(template, "$FUNCNAME$", "huffman"+names[i], -1) + s = strings.Replace(s, "$TYPE$", t, -1) + f.WriteString(s) + } + f.WriteString("func (f *decompressor) huffmanBlockDecoder() func() {\n") + f.WriteString("\tswitch f.r.(type) {\n") + for i, t := range types { + f.WriteString("\t\tcase " + t + ":\n") + f.WriteString("\t\t\treturn f.huffman" + names[i] + "\n") + } + f.WriteString("\t\tdefault:\n") + f.WriteString("\t\t\treturn f.huffmanBlockGeneric") + f.WriteString("\t}\n}\n") +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index 5ed476aa0d..53fe1d06e2 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -93,12 +93,12 @@ type huffmanBitWriter struct { err error lastHeader int // Set between 0 (reused block can be up to 2x the size) - logReusePenalty uint - lastHuffMan bool - bytes [256]byte - literalFreq [lengthCodesStart + 32]uint16 - offsetFreq [32]uint16 - codegenFreq [codegenCodeCount]uint16 + logNewTablePenalty uint + lastHuffMan bool + bytes [256]byte + literalFreq [lengthCodesStart + 32]uint16 + offsetFreq [32]uint16 + codegenFreq [codegenCodeCount]uint16 // codegen must have an extra space for the final symbol. codegen [literalCount + offsetCodeCount + 1]uint8 @@ -119,7 +119,7 @@ type huffmanBitWriter struct { // If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid. // // An incoming block estimates the output size of a new table using a 'fresh' by calculating the -// optimal size and adding a penalty in 'logReusePenalty'. +// optimal size and adding a penalty in 'logNewTablePenalty'. // A Huffman table is not optimal, which is why we add a penalty, and generating a new table // is slower both for compression and decompression. @@ -135,7 +135,6 @@ func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { func (w *huffmanBitWriter) reset(writer io.Writer) { w.writer = writer w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil - w.bytes = [256]byte{} w.lastHeader = 0 w.lastHuffMan = false } @@ -178,6 +177,11 @@ func (w *huffmanBitWriter) flush() { w.nbits = 0 return } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } n := w.nbytes for w.nbits != 0 { w.bytes[n] = byte(w.bits) @@ -351,6 +355,13 @@ func (w *huffmanBitWriter) headerSize() (size, numCodegens int) { } // dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) { + size = litEnc.bitLength(w.literalFreq[:]) + + offEnc.bitLength(w.offsetFreq[:]) + return size +} + +// dynamicSize returns the size of dynamically encoded data in bits. func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { header, numCodegens := w.headerSize() size = header + @@ -452,30 +463,30 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n i := 0 for { - var codeWord int = int(w.codegen[i]) + var codeWord = uint32(w.codegen[i]) i++ if codeWord == badCode { break } - w.writeCode(w.codegenEncoding.codes[uint32(codeWord)]) + w.writeCode(w.codegenEncoding.codes[codeWord]) switch codeWord { case 16: w.writeBits(int32(w.codegen[i]), 2) i++ - break case 17: w.writeBits(int32(w.codegen[i]), 3) i++ - break case 18: w.writeBits(int32(w.codegen[i]), 7) i++ - break } } } +// writeStoredHeader will write a stored header. +// If the stored block is only used for EOF, +// it is replaced with a fixed huffman block. func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { if w.err != nil { return @@ -485,6 +496,16 @@ func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { w.writeCode(w.literalEncoding.codes[endBlockMarker]) w.lastHeader = 0 } + + // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes. + if length == 0 && isEof { + w.writeFixedHeader(isEof) + // EOB: 7 bits, value: 0 + w.writeBits(0, 7) + w.flush() + return + } + var flag int32 if isEof { flag = 1 @@ -591,8 +612,8 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b tokens.AddEOB() } - // We cannot reuse pure huffman table. - if w.lastHuffMan && w.lastHeader > 0 { + // We cannot reuse pure huffman table, and must mark as EOF. + if (w.lastHuffMan || eof) && w.lastHeader > 0 { // We will not try to reuse. w.writeCode(w.literalEncoding.codes[endBlockMarker]) w.lastHeader = 0 @@ -606,14 +627,14 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b var size int // Check if we should reuse. if w.lastHeader > 0 { - // Estimate size for using a new table + // Estimate size for using a new table. + // Use the previous header size as the best estimate. newSize := w.lastHeader + tokens.EstimatedBits() + newSize += newSize >> w.logNewTablePenalty // The estimated size is calculated as an optimal table. // We add a penalty to make it more realistic and re-use a bit more. - newSize += newSize >> (w.logReusePenalty & 31) - extra := w.extraBitSize() - reuseSize, _ := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extra) + reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + w.extraBitSize() // Check if a new table is better. if newSize < reuseSize { @@ -805,21 +826,30 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { } // Add everything as literals - estBits := histogramSize(input, w.literalFreq[:], !eof && !sync) + 15 + // We have to estimate the header size. + // Assume header is around 70 bytes: + // https://stackoverflow.com/a/25454430 + const guessHeaderSizeBits = 70 * 8 + estBits, estExtra := histogramSize(input, w.literalFreq[:], !eof && !sync) + estBits += w.lastHeader + 15 + if w.lastHeader == 0 { + estBits += guessHeaderSizeBits + } + estBits += estBits >> w.logNewTablePenalty // Store bytes, if we don't get a reasonable improvement. ssize, storable := w.storedSize(input) - if storable && ssize < (estBits+estBits>>4) { + if storable && ssize < estBits { w.writeStoredHeader(len(input), eof) w.writeBytes(input) return } if w.lastHeader > 0 { - size, _ := w.dynamicSize(w.literalEncoding, huffOffset, w.lastHeader) - estBits += estBits >> (w.logReusePenalty) + reuseSize := w.literalEncoding.bitLength(w.literalFreq[:256]) + estBits += estExtra - if estBits < size { + if estBits < reuseSize { // We owe an EOB w.writeCode(w.literalEncoding.codes[endBlockMarker]) w.lastHeader = 0 diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index d0099599c5..4c39a30187 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -7,7 +7,6 @@ package flate import ( "math" "math/bits" - "sort" ) const ( @@ -25,8 +24,6 @@ type huffmanEncoder struct { codes []hcode freqcache []literalNode bitCount [17]int32 - lns byLiteral // stored to avoid repeated allocation in generate - lfs byFreq // stored to avoid repeated allocation in generate } type literalNode struct { @@ -85,17 +82,14 @@ func generateFixedLiteralEncoding() *huffmanEncoder { // size 8, 000110000 .. 10111111 bits = ch + 48 size = 8 - break case ch < 256: // size 9, 110010000 .. 111111111 bits = ch + 400 - 144 size = 9 - break case ch < 280: // size 7, 0000000 .. 0010111 bits = ch - 256 size = 7 - break default: // size 8, 11000000 .. 11000111 bits = ch + 192 - 280 @@ -115,8 +109,8 @@ func generateFixedOffsetEncoding() *huffmanEncoder { return h } -var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding() -var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding() +var fixedLiteralEncoding = generateFixedLiteralEncoding() +var fixedOffsetEncoding = generateFixedOffsetEncoding() func (h *huffmanEncoder) bitLength(freq []uint16) int { var total int @@ -273,7 +267,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN // assigned in literal order (not frequency order). chunk := list[len(list)-int(bits):] - h.lns.sort(chunk) + sortByLiteral(chunk) for _, node := range chunk { h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)} code++ @@ -318,7 +312,7 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { } return } - h.lfs.sort(list) + sortByFreq(list) // Get the number of literals for each bit count bitCount := h.bitCounts(list, maxBits) @@ -326,59 +320,44 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { h.assignEncodingAndSize(bitCount, list) } -type byLiteral []literalNode - -func (s *byLiteral) sort(a []literalNode) { - *s = byLiteral(a) - sort.Sort(s) -} - -func (s byLiteral) Len() int { return len(s) } - -func (s byLiteral) Less(i, j int) bool { - return s[i].literal < s[j].literal -} - -func (s byLiteral) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -type byFreq []literalNode - -func (s *byFreq) sort(a []literalNode) { - *s = byFreq(a) - sort.Sort(s) -} - -func (s byFreq) Len() int { return len(s) } - -func (s byFreq) Less(i, j int) bool { - if s[i].freq == s[j].freq { - return s[i].literal < s[j].literal +func atLeastOne(v float32) float32 { + if v < 1 { + return 1 } - return s[i].freq < s[j].freq + return v } -func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - // histogramSize accumulates a histogram of b in h. // An estimated size in bits is returned. // Unassigned values are assigned '1' in the histogram. // len(h) must be >= 256, and h's elements must be all zeroes. -func histogramSize(b []byte, h []uint16, fill bool) int { +func histogramSize(b []byte, h []uint16, fill bool) (int, int) { h = h[:256] for _, t := range b { h[t]++ } - invTotal := 1.0 / float64(len(b)) - shannon := 0.0 - single := math.Ceil(-math.Log2(invTotal)) - for i, v := range h[:] { - if v > 0 { - n := float64(v) - shannon += math.Ceil(-math.Log2(n*invTotal) * n) - } else if fill { - shannon += single - h[i] = 1 + invTotal := 1.0 / float32(len(b)) + shannon := float32(0.0) + var extra float32 + if fill { + oneBits := atLeastOne(-mFastLog2(invTotal)) + for i, v := range h[:] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + } else { + h[i] = 1 + extra += oneBits + } + } + } else { + for _, v := range h[:] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + } } } - return int(shannon + 0.99) + + return int(shannon + 0.99), int(extra + 0.99) } diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go new file mode 100644 index 0000000000..2077802990 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go @@ -0,0 +1,178 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// Sort sorts data. +// It makes one call to data.Len to determine n, and O(n*log(n)) calls to +// data.Less and data.Swap. The sort is not guaranteed to be stable. +func sortByFreq(data []literalNode) { + n := len(data) + quickSortByFreq(data, 0, n, maxDepth(n)) +} + +func quickSortByFreq(data []literalNode, a, b, maxDepth int) { + for b-a > 12 { // Use ShellSort for slices <= 12 elements + if maxDepth == 0 { + heapSort(data, a, b) + return + } + maxDepth-- + mlo, mhi := doPivotByFreq(data, a, b) + // Avoiding recursion on the larger subproblem guarantees + // a stack depth of at most lg(b-a). + if mlo-a < b-mhi { + quickSortByFreq(data, a, mlo, maxDepth) + a = mhi // i.e., quickSortByFreq(data, mhi, b) + } else { + quickSortByFreq(data, mhi, b, maxDepth) + b = mlo // i.e., quickSortByFreq(data, a, mlo) + } + } + if b-a > 1 { + // Do ShellSort pass with gap 6 + // It could be written in this simplified form cause b-a <= 12 + for i := a + 6; i < b; i++ { + if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq { + data[i], data[i-6] = data[i-6], data[i] + } + } + insertionSortByFreq(data, a, b) + } +} + +// siftDownByFreq implements the heap property on data[lo, hi). +// first is an offset into the array where the root of the heap lies. +func siftDownByFreq(data []literalNode, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (data[first+child].freq == data[first+child+1].freq && data[first+child].literal < data[first+child+1].literal || data[first+child].freq < data[first+child+1].freq) { + child++ + } + if data[first+root].freq == data[first+child].freq && data[first+root].literal > data[first+child].literal || data[first+root].freq > data[first+child].freq { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} +func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) { + m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. + if hi-lo > 40 { + // Tukey's ``Ninther,'' median of three medians of three. + s := (hi - lo) / 8 + medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s) + medianOfThreeSortByFreq(data, m, m-s, m+s) + medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s) + } + medianOfThreeSortByFreq(data, lo, m, hi-1) + + // Invariants are: + // data[lo] = pivot (set up by ChoosePivot) + // data[lo < i < a] < pivot + // data[a <= i < b] <= pivot + // data[b <= i < c] unexamined + // data[c <= i < hi-1] > pivot + // data[hi-1] >= pivot + pivot := lo + a, c := lo+1, hi-1 + + for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { + } + b := a + for { + for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot + } + for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot + } + if b >= c { + break + } + // data[b] > pivot; data[c-1] <= pivot + data[b], data[c-1] = data[c-1], data[b] + b++ + c-- + } + // If hi-c<3 then there are duplicates (by property of median of nine). + // Let's be a bit more conservative, and set border to 5. + protect := hi-c < 5 + if !protect && hi-c < (hi-lo)/4 { + // Lets test some points for equality to pivot + dups := 0 + if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot + data[c], data[hi-1] = data[hi-1], data[c] + c++ + dups++ + } + if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot + b-- + dups++ + } + // m-lo = (hi-lo)/2 > 6 + // b-lo > (hi-lo)*3/4-1 > 8 + // ==> m < b ==> data[m] <= pivot + if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot + data[m], data[b-1] = data[b-1], data[m] + b-- + dups++ + } + // if at least 2 points are equal to pivot, assume skewed distribution + protect = dups > 1 + } + if protect { + // Protect against a lot of duplicates + // Add invariant: + // data[a <= i < b] unexamined + // data[b <= i < c] = pivot + for { + for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot + } + for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot + } + if a >= b { + break + } + // data[a] == pivot; data[b-1] < pivot + data[a], data[b-1] = data[b-1], data[a] + a++ + b-- + } + } + // Swap pivot into middle + data[pivot], data[b-1] = data[b-1], data[pivot] + return b - 1, c +} + +// Insertion sort +func insertionSortByFreq(data []literalNode, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// quickSortByFreq, loosely following Bentley and McIlroy, +// ``Engineering a Sort Function,'' SP&E November 1993. + +// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. +func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) { + // sort 3 elements + if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { + data[m1], data[m0] = data[m0], data[m1] + } + // data[m0] <= data[m1] + if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq { + data[m2], data[m1] = data[m1], data[m2] + // data[m0] <= data[m2] && data[m1] < data[m2] + if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { + data[m1], data[m0] = data[m0], data[m1] + } + } + // now data[m0] <= data[m1] <= data[m2] +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go new file mode 100644 index 0000000000..93f1aea109 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go @@ -0,0 +1,201 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// Sort sorts data. +// It makes one call to data.Len to determine n, and O(n*log(n)) calls to +// data.Less and data.Swap. The sort is not guaranteed to be stable. +func sortByLiteral(data []literalNode) { + n := len(data) + quickSort(data, 0, n, maxDepth(n)) +} + +func quickSort(data []literalNode, a, b, maxDepth int) { + for b-a > 12 { // Use ShellSort for slices <= 12 elements + if maxDepth == 0 { + heapSort(data, a, b) + return + } + maxDepth-- + mlo, mhi := doPivot(data, a, b) + // Avoiding recursion on the larger subproblem guarantees + // a stack depth of at most lg(b-a). + if mlo-a < b-mhi { + quickSort(data, a, mlo, maxDepth) + a = mhi // i.e., quickSort(data, mhi, b) + } else { + quickSort(data, mhi, b, maxDepth) + b = mlo // i.e., quickSort(data, a, mlo) + } + } + if b-a > 1 { + // Do ShellSort pass with gap 6 + // It could be written in this simplified form cause b-a <= 12 + for i := a + 6; i < b; i++ { + if data[i].literal < data[i-6].literal { + data[i], data[i-6] = data[i-6], data[i] + } + } + insertionSort(data, a, b) + } +} +func heapSort(data []literalNode, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDown(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDown(data, lo, i, first) + } +} + +// siftDown implements the heap property on data[lo, hi). +// first is an offset into the array where the root of the heap lies. +func siftDown(data []literalNode, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && data[first+child].literal < data[first+child+1].literal { + child++ + } + if data[first+root].literal > data[first+child].literal { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} +func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) { + m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. + if hi-lo > 40 { + // Tukey's ``Ninther,'' median of three medians of three. + s := (hi - lo) / 8 + medianOfThree(data, lo, lo+s, lo+2*s) + medianOfThree(data, m, m-s, m+s) + medianOfThree(data, hi-1, hi-1-s, hi-1-2*s) + } + medianOfThree(data, lo, m, hi-1) + + // Invariants are: + // data[lo] = pivot (set up by ChoosePivot) + // data[lo < i < a] < pivot + // data[a <= i < b] <= pivot + // data[b <= i < c] unexamined + // data[c <= i < hi-1] > pivot + // data[hi-1] >= pivot + pivot := lo + a, c := lo+1, hi-1 + + for ; a < c && data[a].literal < data[pivot].literal; a++ { + } + b := a + for { + for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot + } + for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot + } + if b >= c { + break + } + // data[b] > pivot; data[c-1] <= pivot + data[b], data[c-1] = data[c-1], data[b] + b++ + c-- + } + // If hi-c<3 then there are duplicates (by property of median of nine). + // Let's be a bit more conservative, and set border to 5. + protect := hi-c < 5 + if !protect && hi-c < (hi-lo)/4 { + // Lets test some points for equality to pivot + dups := 0 + if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot + data[c], data[hi-1] = data[hi-1], data[c] + c++ + dups++ + } + if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot + b-- + dups++ + } + // m-lo = (hi-lo)/2 > 6 + // b-lo > (hi-lo)*3/4-1 > 8 + // ==> m < b ==> data[m] <= pivot + if data[m].literal > data[pivot].literal { // data[m] = pivot + data[m], data[b-1] = data[b-1], data[m] + b-- + dups++ + } + // if at least 2 points are equal to pivot, assume skewed distribution + protect = dups > 1 + } + if protect { + // Protect against a lot of duplicates + // Add invariant: + // data[a <= i < b] unexamined + // data[b <= i < c] = pivot + for { + for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot + } + for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot + } + if a >= b { + break + } + // data[a] == pivot; data[b-1] < pivot + data[a], data[b-1] = data[b-1], data[a] + a++ + b-- + } + } + // Swap pivot into middle + data[pivot], data[b-1] = data[b-1], data[pivot] + return b - 1, c +} + +// Insertion sort +func insertionSort(data []literalNode, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && data[j].literal < data[j-1].literal; j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// maxDepth returns a threshold at which quicksort should switch +// to heapsort. It returns 2*ceil(lg(n+1)). +func maxDepth(n int) int { + var depth int + for i := n; i > 0; i >>= 1 { + depth++ + } + return depth * 2 +} + +// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. +func medianOfThree(data []literalNode, m1, m0, m2 int) { + // sort 3 elements + if data[m1].literal < data[m0].literal { + data[m1], data[m0] = data[m0], data[m1] + } + // data[m0] <= data[m1] + if data[m2].literal < data[m1].literal { + data[m2], data[m1] = data[m1], data[m2] + // data[m0] <= data[m2] && data[m1] < data[m2] + if data[m1].literal < data[m0].literal { + data[m1], data[m0] = data[m0], data[m1] + } + } + // now data[m0] <= data[m1] <= data[m2] +} diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index 6dc5b5d06e..7f175a4ec2 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -106,7 +106,7 @@ const ( ) type huffmanDecoder struct { - min int // the minimum code length + maxRead int // the maximum number of bits we can read and not overread chunks *[huffmanNumChunks]uint16 // chunks as described above links [][]uint16 // overflow links linkMask uint32 // mask the width of the link table @@ -126,12 +126,12 @@ func (h *huffmanDecoder) init(lengths []int) bool { if h.chunks == nil { h.chunks = &[huffmanNumChunks]uint16{} } - if h.min != 0 { + if h.maxRead != 0 { *h = huffmanDecoder{chunks: h.chunks, links: h.links} } // Count number of codes of each length, - // compute min and max length. + // compute maxRead and max length. var count [maxCodeLen]int var min, max int for _, n := range lengths { @@ -178,7 +178,7 @@ func (h *huffmanDecoder) init(lengths []int) bool { return false } - h.min = min + h.maxRead = min chunks := h.chunks[:] for i := range chunks { chunks[i] = 0 @@ -342,7 +342,7 @@ func (f *decompressor) nextBlock() { // compressed, fixed Huffman tables f.hl = &fixedHuffmanDecoder f.hd = nil - f.huffmanBlock() + f.huffmanBlockDecoder()() case 2: // compressed, dynamic Huffman tables if f.err = f.readHuffman(); f.err != nil { @@ -350,7 +350,7 @@ func (f *decompressor) nextBlock() { } f.hl = &f.h1 f.hd = &f.h2 - f.huffmanBlock() + f.huffmanBlockDecoder()() default: // 3 is reserved. if debugDecode { @@ -543,12 +543,18 @@ func (f *decompressor) readHuffman() error { return CorruptInputError(f.roffset) } - // As an optimization, we can initialize the min bits to read at a time + // As an optimization, we can initialize the maxRead bits to read at a time // for the HLIT tree to the length of the EOB marker since we know that // every block must terminate with one. This preserves the property that // we never read any extra bytes after the end of the DEFLATE stream. - if f.h1.min < f.bits[endBlockMarker] { - f.h1.min = f.bits[endBlockMarker] + if f.h1.maxRead < f.bits[endBlockMarker] { + f.h1.maxRead = f.bits[endBlockMarker] + } + if !f.final { + // If not the final block, the smallest block possible is + // a predefined table, BTYPE=01, with a single EOB marker. + // This will take up 3 + 7 bits. + f.h1.maxRead += 10 } return nil @@ -558,7 +564,7 @@ func (f *decompressor) readHuffman() error { // hl and hd are the Huffman states for the lit/length values // and the distance values, respectively. If hd == nil, using the // fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBlock() { +func (f *decompressor) huffmanBlockGeneric() { const ( stateInit = iota // Zero value must be stateInit stateDict @@ -574,19 +580,64 @@ func (f *decompressor) huffmanBlock() { readLiteral: // Read literal and/or (length, distance) according to RFC section 3.2.3. { - v, err := f.huffSym(f.hl) - if err != nil { - f.err = err - return + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := f.r.ReadByte() + if err != nil { + f.b = b + f.nb = nb + f.err = noEOF(err) + return + } + f.roffset++ + b |= uint32(c) << (nb & 31) + nb += 8 + } + chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + f.b = b >> (n & 31) + f.nb = nb - n + v = int(chunk >> huffmanValueShift) + break + } + } } + var n uint // number of bits extra var length int + var err error switch { case v < 256: f.dict.writeByte(byte(v)) if f.dict.availWrite() == 0 { f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanBlock + f.step = (*decompressor).huffmanBlockGeneric f.stepState = stateInit return } @@ -714,7 +765,7 @@ copyHistory: if f.dict.availWrite() == 0 || f.copyLen > 0 { f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanBlock // We need to continue this work + f.step = (*decompressor).huffmanBlockGeneric // We need to continue this work f.stepState = stateDict return } @@ -726,21 +777,33 @@ copyHistory: func (f *decompressor) dataBlock() { // Uncompressed. // Discard current half-byte. - f.nb = 0 - f.b = 0 + left := (f.nb) & 7 + f.nb -= left + f.b >>= left + + offBytes := f.nb >> 3 + // Unfilled values will be overwritten. + f.buf[0] = uint8(f.b) + f.buf[1] = uint8(f.b >> 8) + f.buf[2] = uint8(f.b >> 16) + f.buf[3] = uint8(f.b >> 24) + + f.roffset += int64(offBytes) + f.nb, f.b = 0, 0 // Length then ones-complement of length. - nr, err := io.ReadFull(f.r, f.buf[0:4]) + nr, err := io.ReadFull(f.r, f.buf[offBytes:4]) f.roffset += int64(nr) if err != nil { f.err = noEOF(err) return } - n := int(f.buf[0]) | int(f.buf[1])<<8 - nn := int(f.buf[2]) | int(f.buf[3])<<8 - if uint16(nn) != uint16(^n) { + n := uint16(f.buf[0]) | uint16(f.buf[1])<<8 + nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8 + if nn != ^n { if debugDecode { - fmt.Println("uint16(nn) != uint16(^n)", nn, ^n) + ncomp := ^n + fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp) } f.err = CorruptInputError(f.roffset) return @@ -752,7 +815,7 @@ func (f *decompressor) dataBlock() { return } - f.copyLen = n + f.copyLen = int(n) f.copyData() } @@ -816,7 +879,7 @@ func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { // with single element, huffSym must error on these two edge cases. In both // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. - n := uint(h.min) + n := uint(h.maxRead) // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go new file mode 100644 index 0000000000..397dc1b1a1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go @@ -0,0 +1,922 @@ +// Code generated by go generate gen_inflate.go. DO NOT EDIT. + +package flate + +import ( + "bufio" + "bytes" + "fmt" + "math/bits" + "strings" +) + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBytesBuffer() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bytes.Buffer) + moreBits := func() error { + c, err := fr.ReadByte() + if err != nil { + return noEOF(err) + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + return nil + } + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := fr.ReadByte() + if err != nil { + f.b = b + f.nb = nb + f.err = noEOF(err) + return + } + f.roffset++ + b |= uint32(c) << (nb & 31) + nb += 8 + } + chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + f.b = b >> (n & 31) + f.nb = nb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var n uint // number of bits extra + var length int + var err error + switch { + case v < 256: + f.dict.writeByte(byte(v)) + if f.dict.availWrite() == 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBytesBuffer + f.stepState = stateInit + return + } + goto readLiteral + case v == 256: + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + n = 0 + case v < 269: + length = v*2 - (265*2 - 11) + n = 1 + case v < 273: + length = v*4 - (269*4 - 19) + n = 2 + case v < 277: + length = v*8 - (273*8 - 35) + n = 3 + case v < 281: + length = v*16 - (277*16 - 67) + n = 4 + case v < 285: + length = v*32 - (281*32 - 131) + n = 5 + case v < maxNumLit: + length = 258 + n = 0 + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + return + } + if n > 0 { + for f.nb < n { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + } + length += int(f.b & uint32(1<<n-1)) + f.b >>= n + f.nb -= n + } + + var dist int + if f.hd == nil { + for f.nb < 5 { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + } + dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + f.b >>= 5 + f.nb -= 5 + } else { + if dist, err = f.huffSym(f.hd); err != nil { + if debugDecode { + fmt.Println("huffsym:", err) + } + f.err = err + return + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << nb + for f.nb < nb { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<nb:", err) + } + f.err = err + return + } + } + extra |= int(f.b & uint32(1<<nb-1)) + f.b >>= nb + f.nb -= nb + dist = 1<<(nb+1) + 1 + extra + default: + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > f.dict.histSize() { + if debugDecode { + fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, dist + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work + f.stepState = stateDict + return + } + goto readLiteral + } +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBytesReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bytes.Reader) + moreBits := func() error { + c, err := fr.ReadByte() + if err != nil { + return noEOF(err) + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + return nil + } + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := fr.ReadByte() + if err != nil { + f.b = b + f.nb = nb + f.err = noEOF(err) + return + } + f.roffset++ + b |= uint32(c) << (nb & 31) + nb += 8 + } + chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + f.b = b >> (n & 31) + f.nb = nb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var n uint // number of bits extra + var length int + var err error + switch { + case v < 256: + f.dict.writeByte(byte(v)) + if f.dict.availWrite() == 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBytesReader + f.stepState = stateInit + return + } + goto readLiteral + case v == 256: + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + n = 0 + case v < 269: + length = v*2 - (265*2 - 11) + n = 1 + case v < 273: + length = v*4 - (269*4 - 19) + n = 2 + case v < 277: + length = v*8 - (273*8 - 35) + n = 3 + case v < 281: + length = v*16 - (277*16 - 67) + n = 4 + case v < 285: + length = v*32 - (281*32 - 131) + n = 5 + case v < maxNumLit: + length = 258 + n = 0 + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + return + } + if n > 0 { + for f.nb < n { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + } + length += int(f.b & uint32(1<<n-1)) + f.b >>= n + f.nb -= n + } + + var dist int + if f.hd == nil { + for f.nb < 5 { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + } + dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + f.b >>= 5 + f.nb -= 5 + } else { + if dist, err = f.huffSym(f.hd); err != nil { + if debugDecode { + fmt.Println("huffsym:", err) + } + f.err = err + return + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << nb + for f.nb < nb { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<nb:", err) + } + f.err = err + return + } + } + extra |= int(f.b & uint32(1<<nb-1)) + f.b >>= nb + f.nb -= nb + dist = 1<<(nb+1) + 1 + extra + default: + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > f.dict.histSize() { + if debugDecode { + fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, dist + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBytesReader // We need to continue this work + f.stepState = stateDict + return + } + goto readLiteral + } +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBufioReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bufio.Reader) + moreBits := func() error { + c, err := fr.ReadByte() + if err != nil { + return noEOF(err) + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + return nil + } + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := fr.ReadByte() + if err != nil { + f.b = b + f.nb = nb + f.err = noEOF(err) + return + } + f.roffset++ + b |= uint32(c) << (nb & 31) + nb += 8 + } + chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + f.b = b >> (n & 31) + f.nb = nb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var n uint // number of bits extra + var length int + var err error + switch { + case v < 256: + f.dict.writeByte(byte(v)) + if f.dict.availWrite() == 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBufioReader + f.stepState = stateInit + return + } + goto readLiteral + case v == 256: + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + n = 0 + case v < 269: + length = v*2 - (265*2 - 11) + n = 1 + case v < 273: + length = v*4 - (269*4 - 19) + n = 2 + case v < 277: + length = v*8 - (273*8 - 35) + n = 3 + case v < 281: + length = v*16 - (277*16 - 67) + n = 4 + case v < 285: + length = v*32 - (281*32 - 131) + n = 5 + case v < maxNumLit: + length = 258 + n = 0 + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + return + } + if n > 0 { + for f.nb < n { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + } + length += int(f.b & uint32(1<<n-1)) + f.b >>= n + f.nb -= n + } + + var dist int + if f.hd == nil { + for f.nb < 5 { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + } + dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + f.b >>= 5 + f.nb -= 5 + } else { + if dist, err = f.huffSym(f.hd); err != nil { + if debugDecode { + fmt.Println("huffsym:", err) + } + f.err = err + return + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << nb + for f.nb < nb { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<nb:", err) + } + f.err = err + return + } + } + extra |= int(f.b & uint32(1<<nb-1)) + f.b >>= nb + f.nb -= nb + dist = 1<<(nb+1) + 1 + extra + default: + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > f.dict.histSize() { + if debugDecode { + fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, dist + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBufioReader // We need to continue this work + f.stepState = stateDict + return + } + goto readLiteral + } +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanStringsReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*strings.Reader) + moreBits := func() error { + c, err := fr.ReadByte() + if err != nil { + return noEOF(err) + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + return nil + } + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := fr.ReadByte() + if err != nil { + f.b = b + f.nb = nb + f.err = noEOF(err) + return + } + f.roffset++ + b |= uint32(c) << (nb & 31) + nb += 8 + } + chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + f.b = b >> (n & 31) + f.nb = nb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var n uint // number of bits extra + var length int + var err error + switch { + case v < 256: + f.dict.writeByte(byte(v)) + if f.dict.availWrite() == 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanStringsReader + f.stepState = stateInit + return + } + goto readLiteral + case v == 256: + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + n = 0 + case v < 269: + length = v*2 - (265*2 - 11) + n = 1 + case v < 273: + length = v*4 - (269*4 - 19) + n = 2 + case v < 277: + length = v*8 - (273*8 - 35) + n = 3 + case v < 281: + length = v*16 - (277*16 - 67) + n = 4 + case v < 285: + length = v*32 - (281*32 - 131) + n = 5 + case v < maxNumLit: + length = 258 + n = 0 + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + return + } + if n > 0 { + for f.nb < n { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + } + length += int(f.b & uint32(1<<n-1)) + f.b >>= n + f.nb -= n + } + + var dist int + if f.hd == nil { + for f.nb < 5 { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + } + dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + f.b >>= 5 + f.nb -= 5 + } else { + if dist, err = f.huffSym(f.hd); err != nil { + if debugDecode { + fmt.Println("huffsym:", err) + } + f.err = err + return + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << nb + for f.nb < nb { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<nb:", err) + } + f.err = err + return + } + } + extra |= int(f.b & uint32(1<<nb-1)) + f.b >>= nb + f.nb -= nb + dist = 1<<(nb+1) + 1 + extra + default: + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > f.dict.histSize() { + if debugDecode { + fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, dist + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanStringsReader // We need to continue this work + f.stepState = stateDict + return + } + goto readLiteral + } +} + +func (f *decompressor) huffmanBlockDecoder() func() { + switch f.r.(type) { + case *bytes.Buffer: + return f.huffmanBytesBuffer + case *bytes.Reader: + return f.huffmanBytesReader + case *bufio.Reader: + return f.huffmanBufioReader + case *strings.Reader: + return f.huffmanStringsReader + default: + return f.huffmanBlockGeneric + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go index 20de8f11f4..1e5eea3968 100644 --- a/vendor/github.com/klauspost/compress/flate/level1.go +++ b/vendor/github.com/klauspost/compress/flate/level1.go @@ -1,5 +1,7 @@ package flate +import "fmt" + // fastGen maintains the table for matches, // and the previous byte block for level 2. // This is the generic implementation. @@ -14,6 +16,9 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } // Protect against e.cur wraparound. for e.cur >= bufferReset { @@ -76,12 +81,12 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { } now := load6432(src, nextS) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} + e.table[nextHash] = tableEntry{offset: s + e.cur} nextHash = hash(uint32(now)) offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && cv == candidate.val { - e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)} + if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} break } @@ -91,11 +96,11 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { nextS++ candidate = e.table[nextHash] now >>= 8 - e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} + e.table[nextHash] = tableEntry{offset: s + e.cur} offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && cv == candidate.val { - e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)} + if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} break } cv = uint32(now) @@ -134,7 +139,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { // Index first pair after match end. if int(s+l+4) < len(src) { cv := load3232(src, s) - e.table[hash(cv)] = tableEntry{offset: s + e.cur, val: cv} + e.table[hash(cv)] = tableEntry{offset: s + e.cur} } goto emitRemainder } @@ -148,14 +153,14 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { x := load6432(src, s-2) o := e.cur + s - 2 prevHash := hash(uint32(x)) - e.table[prevHash] = tableEntry{offset: o, val: uint32(x)} + e.table[prevHash] = tableEntry{offset: o} x >>= 16 currHash := hash(uint32(x)) candidate = e.table[currHash] - e.table[currHash] = tableEntry{offset: o + 2, val: uint32(x)} + e.table[currHash] = tableEntry{offset: o + 2} offset := s - (candidate.offset - e.cur) - if offset > maxMatchOffset || uint32(x) != candidate.val { + if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { cv = uint32(x >> 8) s++ break diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go index 7c824431e6..5b986a1944 100644 --- a/vendor/github.com/klauspost/compress/flate/level2.go +++ b/vendor/github.com/klauspost/compress/flate/level2.go @@ -1,5 +1,7 @@ package flate +import "fmt" + // fastGen maintains the table for matches, // and the previous byte block for level 2. // This is the generic implementation. @@ -16,6 +18,10 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { minNonLiteralBlockSize = 1 + 1 + inputMargin ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + // Protect against e.cur wraparound. for e.cur >= bufferReset { if len(e.hist) == 0 { @@ -77,12 +83,12 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { } candidate = e.table[nextHash] now := load6432(src, nextS) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} + e.table[nextHash] = tableEntry{offset: s + e.cur} nextHash = hash4u(uint32(now), bTableBits) offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && cv == candidate.val { - e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)} + if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} break } @@ -92,10 +98,10 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { nextS++ candidate = e.table[nextHash] now >>= 8 - e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} + e.table[nextHash] = tableEntry{offset: s + e.cur} offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && cv == candidate.val { + if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { break } cv = uint32(now) @@ -142,7 +148,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { // Index first pair after match end. if int(s+l+4) < len(src) { cv := load3232(src, s) - e.table[hash4u(cv, bTableBits)] = tableEntry{offset: s + e.cur, val: cv} + e.table[hash4u(cv, bTableBits)] = tableEntry{offset: s + e.cur} } goto emitRemainder } @@ -151,15 +157,15 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { for i := s - l + 2; i < s-5; i += 7 { x := load6432(src, int32(i)) nextHash := hash4u(uint32(x), bTableBits) - e.table[nextHash] = tableEntry{offset: e.cur + i, val: uint32(x)} + e.table[nextHash] = tableEntry{offset: e.cur + i} // Skip one x >>= 16 nextHash = hash4u(uint32(x), bTableBits) - e.table[nextHash] = tableEntry{offset: e.cur + i + 2, val: uint32(x)} + e.table[nextHash] = tableEntry{offset: e.cur + i + 2} // Skip one x >>= 16 nextHash = hash4u(uint32(x), bTableBits) - e.table[nextHash] = tableEntry{offset: e.cur + i + 4, val: uint32(x)} + e.table[nextHash] = tableEntry{offset: e.cur + i + 4} } // We could immediately start working at s now, but to improve @@ -172,14 +178,14 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { o := e.cur + s - 2 prevHash := hash4u(uint32(x), bTableBits) prevHash2 := hash4u(uint32(x>>8), bTableBits) - e.table[prevHash] = tableEntry{offset: o, val: uint32(x)} - e.table[prevHash2] = tableEntry{offset: o + 1, val: uint32(x >> 8)} + e.table[prevHash] = tableEntry{offset: o} + e.table[prevHash2] = tableEntry{offset: o + 1} currHash := hash4u(uint32(x>>16), bTableBits) candidate = e.table[currHash] - e.table[currHash] = tableEntry{offset: o + 2, val: uint32(x >> 16)} + e.table[currHash] = tableEntry{offset: o + 2} offset := s - (candidate.offset - e.cur) - if offset > maxMatchOffset || uint32(x>>16) != candidate.val { + if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) { cv = uint32(x >> 24) s++ break diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go index 4153d24c95..c22b4244a5 100644 --- a/vendor/github.com/klauspost/compress/flate/level3.go +++ b/vendor/github.com/klauspost/compress/flate/level3.go @@ -1,5 +1,7 @@ package flate +import "fmt" + // fastEncL3 type fastEncL3 struct { fastGen @@ -13,6 +15,10 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { minNonLiteralBlockSize = 1 + 1 + inputMargin ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + // Protect against e.cur wraparound. for e.cur >= bufferReset { if len(e.hist) == 0 { @@ -75,22 +81,26 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { } candidates := e.table[nextHash] now := load3232(src, nextS) - e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} + + // Safe offset distance until s + 4... + minOffset := e.cur + s - (maxMatchOffset - 4) + e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}} // Check both candidates candidate = candidates.Cur - offset := s - (candidate.offset - e.cur) - if cv == candidate.val { - if offset > maxMatchOffset { - cv = now - // Previous will also be invalid, we have nothing. - continue - } - o2 := s - (candidates.Prev.offset - e.cur) - if cv != candidates.Prev.val || o2 > maxMatchOffset { + if candidate.offset < minOffset { + cv = now + // Previous will also be invalid, we have nothing. + continue + } + + if cv == load3232(src, candidate.offset-e.cur) { + if candidates.Prev.offset < minOffset || cv != load3232(src, candidates.Prev.offset-e.cur) { break } // Both match and are valid, pick longest. + offset := s - (candidate.offset - e.cur) + o2 := s - (candidates.Prev.offset - e.cur) l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:]) if l2 > l1 { candidate = candidates.Prev @@ -100,11 +110,8 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { // We only check if value mismatches. // Offset will always be invalid in other cases. candidate = candidates.Prev - if cv == candidate.val { - offset := s - (candidate.offset - e.cur) - if offset <= maxMatchOffset { - break - } + if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) { + break } } cv = now @@ -152,7 +159,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { nextHash := hash(cv) e.table[nextHash] = tableEntryPrev{ Prev: e.table[nextHash].Cur, - Cur: tableEntry{offset: e.cur + t, val: cv}, + Cur: tableEntry{offset: e.cur + t}, } } goto emitRemainder @@ -164,21 +171,21 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { prevHash := hash(uint32(x)) e.table[prevHash] = tableEntryPrev{ Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, + Cur: tableEntry{offset: e.cur + s - 3}, } x >>= 8 prevHash = hash(uint32(x)) e.table[prevHash] = tableEntryPrev{ Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, + Cur: tableEntry{offset: e.cur + s - 2}, } x >>= 8 prevHash = hash(uint32(x)) e.table[prevHash] = tableEntryPrev{ Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, + Cur: tableEntry{offset: e.cur + s - 1}, } x >>= 8 currHash := hash(uint32(x)) @@ -186,21 +193,18 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { cv = uint32(x) e.table[currHash] = tableEntryPrev{ Prev: candidates.Cur, - Cur: tableEntry{offset: s + e.cur, val: cv}, + Cur: tableEntry{offset: s + e.cur}, } // Check both candidates candidate = candidates.Cur - if cv == candidate.val { - offset := s - (candidate.offset - e.cur) - if offset <= maxMatchOffset { - continue - } - } else { + minOffset := e.cur + s - (maxMatchOffset - 4) + + if candidate.offset > minOffset && cv != load3232(src, candidate.offset-e.cur) { // We only check if value mismatches. // Offset will always be invalid in other cases. candidate = candidates.Prev - if cv == candidate.val { + if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) { offset := s - (candidate.offset - e.cur) if offset <= maxMatchOffset { continue diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go index c689ac771b..e62f0c02b1 100644 --- a/vendor/github.com/klauspost/compress/flate/level4.go +++ b/vendor/github.com/klauspost/compress/flate/level4.go @@ -13,7 +13,9 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin ) - + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } // Protect against e.cur wraparound. for e.cur >= bufferReset { if len(e.hist) == 0 { @@ -90,24 +92,24 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { sCandidate := e.table[nextHashS] lCandidate := e.bTable[nextHashL] next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + entry := tableEntry{offset: s + e.cur} e.table[nextHashS] = entry e.bTable[nextHashL] = entry t = lCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == lCandidate.val { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) { // We got a long match. Use that. break } t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == sCandidate.val { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { // Found a 4 match... lCandidate = e.bTable[hash7(next, tableBits)] // If the next long is a candidate, check if we should use that instead... lOff := nextS - (lCandidate.offset - e.cur) - if lOff < maxMatchOffset && lCandidate.val == uint32(next) { + if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) { l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) if l2 > l1 { s = nextS @@ -135,7 +137,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { if nextEmit < s { emitLiteral(dst, src[nextEmit:s]) } - if false { + if debugDeflate { if t >= s { panic("s-t") } @@ -158,8 +160,8 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { // Index first pair after match end. if int(s+8) < len(src) { cv := load6432(src, s) - e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur} + e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur} } goto emitRemainder } @@ -169,20 +171,20 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { i := nextS if i < s-1 { cv := load6432(src, i) - t := tableEntry{offset: i + e.cur, val: uint32(cv)} - t2 := tableEntry{val: uint32(cv >> 8), offset: t.offset + 1} + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} e.bTable[hash7(cv, tableBits)] = t e.bTable[hash7(cv>>8, tableBits)] = t2 - e.table[hash4u(t2.val, tableBits)] = t2 + e.table[hash4u(uint32(cv>>8), tableBits)] = t2 i += 3 for ; i < s-1; i += 3 { cv := load6432(src, i) - t := tableEntry{offset: i + e.cur, val: uint32(cv)} - t2 := tableEntry{val: uint32(cv >> 8), offset: t.offset + 1} + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} e.bTable[hash7(cv, tableBits)] = t e.bTable[hash7(cv>>8, tableBits)] = t2 - e.table[hash4u(t2.val, tableBits)] = t2 + e.table[hash4u(uint32(cv>>8), tableBits)] = t2 } } } @@ -193,8 +195,8 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { o := e.cur + s - 1 prevHashS := hash4x64(x, tableBits) prevHashL := hash7(x, tableBits) - e.table[prevHashS] = tableEntry{offset: o, val: uint32(x)} - e.bTable[prevHashL] = tableEntry{offset: o, val: uint32(x)} + e.table[prevHashS] = tableEntry{offset: o} + e.bTable[prevHashL] = tableEntry{offset: o} cv = x >> 8 } diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go index 14a2356126..d513f1ffd3 100644 --- a/vendor/github.com/klauspost/compress/flate/level5.go +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -13,6 +13,9 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } // Protect against e.cur wraparound. for e.cur >= bufferReset { @@ -97,7 +100,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { sCandidate := e.table[nextHashS] lCandidate := e.bTable[nextHashL] next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + entry := tableEntry{offset: s + e.cur} e.table[nextHashS] = entry eLong := &e.bTable[nextHashL] eLong.Cur, eLong.Prev = entry, eLong.Cur @@ -107,14 +110,14 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { t = lCandidate.Cur.offset - e.cur if s-t < maxMatchOffset { - if uint32(cv) == lCandidate.Cur.val { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { l = e.matchlen(s+4, t+4, src) + 4 ml1 := e.matchlen(s+4, t2+4, src) + 4 if ml1 > l { @@ -126,30 +129,30 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { break } t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur break } } t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == sCandidate.val { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { // Found a 4 match... l = e.matchlen(s+4, t+4, src) + 4 lCandidate = e.bTable[nextHashL] // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur // If the next long is a candidate, use that... t2 := lCandidate.Cur.offset - e.cur if nextS-t2 < maxMatchOffset { - if lCandidate.Cur.val == uint32(next) { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { ml := e.matchlen(nextS+4, t2+4, src) + 4 if ml > l { t = t2 @@ -160,7 +163,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { } // If the previous long is a candidate, use that... t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && lCandidate.Prev.val == uint32(next) { + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { ml := e.matchlen(nextS+4, t2+4, src) + 4 if ml > l { t = t2 @@ -194,7 +197,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { if nextEmit < s { emitLiteral(dst, src[nextEmit:s]) } - if false { + if debugDeflate { if t >= s { panic(fmt.Sprintln("s-t", s, t)) } @@ -223,31 +226,31 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { i := s - l + 1 if i < s-1 { cv := load6432(src, i) - t := tableEntry{offset: i + e.cur, val: uint32(cv)} + t := tableEntry{offset: i + e.cur} e.table[hash4x64(cv, tableBits)] = t eLong := &e.bTable[hash7(cv, tableBits)] eLong.Cur, eLong.Prev = t, eLong.Cur // Do an long at i+1 cv >>= 8 - t = tableEntry{offset: t.offset + 1, val: uint32(cv)} + t = tableEntry{offset: t.offset + 1} eLong = &e.bTable[hash7(cv, tableBits)] eLong.Cur, eLong.Prev = t, eLong.Cur // We only have enough bits for a short entry at i+2 cv >>= 8 - t = tableEntry{offset: t.offset + 1, val: uint32(cv)} + t = tableEntry{offset: t.offset + 1} e.table[hash4x64(cv, tableBits)] = t // Skip one - otherwise we risk hitting 's' i += 4 for ; i < s-1; i += hashEvery { cv := load6432(src, i) - t := tableEntry{offset: i + e.cur, val: uint32(cv)} - t2 := tableEntry{offset: t.offset + 1, val: uint32(cv >> 8)} + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} eLong := &e.bTable[hash7(cv, tableBits)] eLong.Cur, eLong.Prev = t, eLong.Cur - e.table[hash4u(t2.val, tableBits)] = t2 + e.table[hash4u(uint32(cv>>8), tableBits)] = t2 } } } @@ -258,9 +261,9 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { o := e.cur + s - 1 prevHashS := hash4x64(x, tableBits) prevHashL := hash7(x, tableBits) - e.table[prevHashS] = tableEntry{offset: o, val: uint32(x)} + e.table[prevHashS] = tableEntry{offset: o} eLong := &e.bTable[prevHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: o, val: uint32(x)}, eLong.Cur + eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur cv = x >> 8 } diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go index cad0c7df7f..a52c80ea45 100644 --- a/vendor/github.com/klauspost/compress/flate/level6.go +++ b/vendor/github.com/klauspost/compress/flate/level6.go @@ -13,6 +13,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } // Protect against e.cur wraparound. for e.cur >= bufferReset { @@ -98,7 +101,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { sCandidate := e.table[nextHashS] lCandidate := e.bTable[nextHashL] next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + entry := tableEntry{offset: s + e.cur} e.table[nextHashS] = entry eLong := &e.bTable[nextHashL] eLong.Cur, eLong.Prev = entry, eLong.Cur @@ -109,17 +112,17 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { t = lCandidate.Cur.offset - e.cur if s-t < maxMatchOffset { - if uint32(cv) == lCandidate.Cur.val { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { // Long candidate matches at least 4 bytes. // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur // Check the previous long candidate as well. t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { l = e.matchlen(s+4, t+4, src) + 4 ml1 := e.matchlen(s+4, t2+4, src) + 4 if ml1 > l { @@ -132,17 +135,17 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { } // Current value did not match, but check if previous long value does. t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur break } } t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == sCandidate.val { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { // Found a 4 match... l = e.matchlen(s+4, t+4, src) + 4 @@ -150,9 +153,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { lCandidate = e.bTable[nextHashL] // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur // Check repeat at s + repOff const repOff = 1 @@ -171,7 +174,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { // If the next long is a candidate, use that... t2 = lCandidate.Cur.offset - e.cur if nextS-t2 < maxMatchOffset { - if lCandidate.Cur.val == uint32(next) { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { ml := e.matchlen(nextS+4, t2+4, src) + 4 if ml > l { t = t2 @@ -182,7 +185,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { } // If the previous long is a candidate, use that... t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && lCandidate.Prev.val == uint32(next) { + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { ml := e.matchlen(nextS+4, t2+4, src) + 4 if ml > l { t = t2 @@ -241,9 +244,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { // Index after match end. for i := nextS + 1; i < int32(len(src))-8; i += 2 { cv := load6432(src, i) - e.table[hash4x64(cv, tableBits)] = tableEntry{offset: i + e.cur, val: uint32(cv)} + e.table[hash4x64(cv, tableBits)] = tableEntry{offset: i + e.cur} eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur, val: uint32(cv)}, eLong.Cur + eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur } goto emitRemainder } @@ -252,8 +255,8 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { if true { for i := nextS + 1; i < s-1; i += 2 { cv := load6432(src, i) - t := tableEntry{offset: i + e.cur, val: uint32(cv)} - t2 := tableEntry{offset: t.offset + 1, val: uint32(cv >> 8)} + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} eLong := &e.bTable[hash7(cv, tableBits)] eLong2 := &e.bTable[hash7(cv>>8, tableBits)] e.table[hash4x64(cv, tableBits)] = t diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go index 524ee0ae37..53e8991246 100644 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -3,10 +3,13 @@ package flate import ( "io" "math" + "sync" ) const ( maxStatelessBlock = math.MaxInt16 + // dictionary will be taken from maxStatelessBlock, so limit it. + maxStatelessDict = 8 << 10 slTableBits = 13 slTableSize = 1 << slTableBits @@ -24,11 +27,11 @@ func (s *statelessWriter) Close() error { } s.closed = true // Emit EOF block - return StatelessDeflate(s.dst, nil, true) + return StatelessDeflate(s.dst, nil, true, nil) } func (s *statelessWriter) Write(p []byte) (n int, err error) { - err = StatelessDeflate(s.dst, p, false) + err = StatelessDeflate(s.dst, p, false, nil) if err != nil { return 0, err } @@ -49,11 +52,27 @@ func NewStatelessWriter(dst io.Writer) io.WriteCloser { return &statelessWriter{dst: dst} } +// bitWriterPool contains bit writers that can be reused. +var bitWriterPool = sync.Pool{ + New: func() interface{} { + return newHuffmanBitWriter(nil) + }, +} + // StatelessDeflate allows to compress directly to a Writer without retaining state. // When returning everything will be flushed. -func StatelessDeflate(out io.Writer, in []byte, eof bool) error { +// Up to 8KB of an optional dictionary can be given which is presumed to presumed to precede the block. +// Longer dictionaries will be truncated and will still produce valid output. +// Sending nil dictionary is perfectly fine. +func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { var dst tokens - bw := newHuffmanBitWriter(out) + bw := bitWriterPool.Get().(*huffmanBitWriter) + bw.reset(out) + defer func() { + // don't keep a reference to our output + bw.reset(nil) + bitWriterPool.Put(bw) + }() if eof && len(in) == 0 { // Just write an EOF block. // Could be faster... @@ -62,35 +81,53 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool) error { return bw.err } + // Truncate dict + if len(dict) > maxStatelessDict { + dict = dict[len(dict)-maxStatelessDict:] + } + for len(in) > 0 { todo := in - if len(todo) > maxStatelessBlock { - todo = todo[:maxStatelessBlock] + if len(todo) > maxStatelessBlock-len(dict) { + todo = todo[:maxStatelessBlock-len(dict)] } in = in[len(todo):] + uncompressed := todo + if len(dict) > 0 { + // combine dict and source + bufLen := len(todo) + len(dict) + combined := make([]byte, bufLen) + copy(combined, dict) + copy(combined[len(dict):], todo) + todo = combined + } // Compress - statelessEnc(&dst, todo) + statelessEnc(&dst, todo, int16(len(dict))) isEof := eof && len(in) == 0 if dst.n == 0 { - bw.writeStoredHeader(len(todo), isEof) + bw.writeStoredHeader(len(uncompressed), isEof) if bw.err != nil { return bw.err } - bw.writeBytes(todo) - } else if int(dst.n) > len(todo)-len(todo)>>4 { + bw.writeBytes(uncompressed) + } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 { // If we removed less than 1/16th, huffman compress the block. - bw.writeBlockHuff(isEof, todo, false) + bw.writeBlockHuff(isEof, uncompressed, len(in) == 0) } else { - bw.writeBlockDynamic(&dst, isEof, todo, false) + bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0) + } + if len(in) > 0 { + // Retain a dict if we have more + dict = todo[len(todo)-maxStatelessDict:] + dst.Reset() } if bw.err != nil { return bw.err } - dst.Reset() } if !eof { - // Align. + // Align, only a stored block can do that. bw.writeStoredHeader(0, false) } bw.flush() @@ -116,7 +153,7 @@ func load6416(b []byte, i int16) uint64 { uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 } -func statelessEnc(dst *tokens, src []byte) { +func statelessEnc(dst *tokens, src []byte, startAt int16) { const ( inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin @@ -130,15 +167,23 @@ func statelessEnc(dst *tokens, src []byte) { // This check isn't in the Snappy implementation, but there, the caller // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { + if len(src)-int(startAt) < minNonLiteralBlockSize { // We do not fill the token table. // This will be picked up by caller. - dst.n = uint16(len(src)) + dst.n = 0 return } + // Index until startAt + if startAt > 0 { + cv := load3232(src, 0) + for i := int16(0); i < startAt; i++ { + table[hashSL(cv)] = tableEntry{offset: i} + cv = (cv >> 8) | (uint32(src[i+4]) << 24) + } + } - s := int16(1) - nextEmit := int16(0) + s := startAt + 1 + nextEmit := startAt // sLimit is when to stop looking for offset/length copies. The inputMargin // lets us use a fast path for emitLiteral in the main loop, while we are // looking for copies. diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go index b3df0d8941..f9abf606d6 100644 --- a/vendor/github.com/klauspost/compress/flate/token.go +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -184,9 +184,7 @@ func (t *tokens) indexTokens(in []token) { t.Reset() for _, tok := range in { if tok < matchType { - t.tokens[t.n] = tok - t.litHist[tok]++ - t.n++ + t.AddLiteral(tok.literal()) continue } t.AddMatch(uint32(tok.length()), tok.offset()) @@ -211,50 +209,60 @@ func (t *tokens) AddLiteral(lit byte) { t.nLits++ } +// from https://stackoverflow.com/a/28730362 +func mFastLog2(val float32) float32 { + ux := int32(math.Float32bits(val)) + log2 := (float32)(((ux >> 23) & 255) - 128) + ux &= -0x7f800001 + ux += 127 << 23 + uval := math.Float32frombits(uint32(ux)) + log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759 + return log2 +} + // EstimatedBits will return an minimum size estimated by an *optimal* // compression of the block. // The size of the block func (t *tokens) EstimatedBits() int { - shannon := float64(0) + shannon := float32(0) bits := int(0) nMatches := 0 if t.nLits > 0 { - invTotal := 1.0 / float64(t.nLits) + invTotal := 1.0 / float32(t.nLits) for _, v := range t.litHist[:] { if v > 0 { - n := float64(v) - shannon += math.Ceil(-math.Log2(n*invTotal) * n) + n := float32(v) + shannon += -mFastLog2(n*invTotal) * n } } // Just add 15 for EOB shannon += 15 - for _, v := range t.extraHist[1 : literalCount-256] { + for i, v := range t.extraHist[1 : literalCount-256] { if v > 0 { - n := float64(v) - shannon += math.Ceil(-math.Log2(n*invTotal) * n) - bits += int(lengthExtraBits[v&31]) * int(v) + n := float32(v) + shannon += -mFastLog2(n*invTotal) * n + bits += int(lengthExtraBits[i&31]) * int(v) nMatches += int(v) } } } if nMatches > 0 { - invTotal := 1.0 / float64(nMatches) - for _, v := range t.offHist[:offsetCodeCount] { + invTotal := 1.0 / float32(nMatches) + for i, v := range t.offHist[:offsetCodeCount] { if v > 0 { - n := float64(v) - shannon += math.Ceil(-math.Log2(n*invTotal) * n) - bits += int(offsetExtraBits[v&31]) * int(n) + n := float32(v) + shannon += -mFastLog2(n*invTotal) * n + bits += int(offsetExtraBits[i&31]) * int(v) } } } - return int(shannon) + bits } // AddMatch adds a match to the tokens. // This function is very sensitive to inlining and right on the border. func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { - if debugDecode { + if debugDeflate { if xlength >= maxMatchLength+baseMatchLength { panic(fmt.Errorf("invalid length: %v", xlength)) } @@ -273,7 +281,7 @@ func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { // AddMatchLong adds a match to the tokens, potentially longer than max match length. // Length should NOT have the base subtracted, only offset should. func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) { - if debugDecode { + if debugDeflate { if xoffset >= maxMatchOffset+baseMatchOffset { panic(fmt.Errorf("invalid offset: %v", xoffset)) } diff --git a/vendor/github.com/klauspost/compress/gzip/gzip.go b/vendor/github.com/klauspost/compress/gzip/gzip.go index ed0cc148f8..6794cf48f4 100644 --- a/vendor/github.com/klauspost/compress/gzip/gzip.go +++ b/vendor/github.com/klauspost/compress/gzip/gzip.go @@ -207,7 +207,7 @@ func (z *Writer) Write(p []byte) (int, error) { z.size += uint32(len(p)) z.digest = crc32.Update(z.digest, crc32.IEEETable, p) if z.level == StatelessCompression { - return len(p), flate.StatelessDeflate(z.w, p, false) + return len(p), flate.StatelessDeflate(z.w, p, false, nil) } n, z.err = z.compressor.Write(p) return n, z.err @@ -255,7 +255,7 @@ func (z *Writer) Close() error { } } if z.level == StatelessCompression { - z.err = flate.StatelessDeflate(z.w, nil, true) + z.err = flate.StatelessDeflate(z.w, nil, true, nil) } else { z.err = z.compressor.Close() } diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go index 87f1e369cc..b799e440b4 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.11 -// +build !gccgo,!appengine +// +build go1.11,!gccgo,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s index b3a16ef751..891481539a 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.11 -// +build !gccgo,!appengine +// +build go1.11,!gccgo,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go index 098ec9f6be..7c498e90d9 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go @@ -136,6 +136,33 @@ func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) { return a, b, c, d } +// SetCounter sets the Cipher counter. The next invocation of XORKeyStream will +// behave as if (64 * counter) bytes had been encrypted so far. +// +// To prevent accidental counter reuse, SetCounter panics if counter is +// less than the current value. +func (s *Cipher) SetCounter(counter uint32) { + // Internally, s may buffer multiple blocks, which complicates this + // implementation slightly. When checking whether the counter has rolled + // back, we must use both s.counter and s.len to determine how many blocks + // we have already output. + outputCounter := s.counter - uint32(s.len)/blockSize + if counter < outputCounter { + panic("chacha20: SetCounter attempted to rollback counter") + } + + // In the general case, we set the new counter value and reset s.len to 0, + // causing the next call to XORKeyStream to refill the buffer. However, if + // we're advancing within the existing buffer, we can save work by simply + // setting s.len. + if counter < s.counter { + s.len = int(s.counter-counter) * blockSize + } else { + s.counter = counter + s.len = 0 + } +} + // XORKeyStream XORs each byte in the given slice with a byte from the // cipher's key stream. Dst and src must overlap entirely or not at all. // diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go index ec609ed868..4635307b8f 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !arm64,!s390x,!ppc64le arm64,!go1.11 gccgo appengine +// +build !arm64,!s390x,!ppc64le arm64,!go1.11 gccgo purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go index d0ec61f08d..b799330341 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo,!appengine +// +build !gccgo,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s index 533014ea3e..23c6021643 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s @@ -19,7 +19,7 @@ // The differences in this and the original implementation are // due to the calling conventions and initialization of constants. -// +build !gccgo,!appengine +// +build !gccgo,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go index cd55f45a33..a9244bdf4d 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo,!appengine +// +build !gccgo,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s index de52a2ea8d..89c658c410 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo,!appengine +// +build !gccgo,!purego #include "go_asm.h" #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go index a8dd589ae3..b0c2cd0561 100644 --- a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go +++ b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64,!ppc64le gccgo appengine +// +build !amd64,!ppc64le gccgo purego package poly1305 diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go index df56a652ff..35b9e38c90 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build amd64,!gccgo,!appengine +// +build !gccgo,!purego package poly1305 diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s index 8c0cefbb3c..8d394a212e 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build amd64,!gccgo,!appengine +// +build !gccgo,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go index 32a9cef6bb..2e3ae34c7d 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build s390x,!go1.11 !amd64,!s390x,!ppc64le gccgo appengine nacl +// +build s390x,!go1.11 !amd64,!s390x,!ppc64le gccgo purego package poly1305 diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go index 3233616935..92597bb8c2 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build ppc64le,!gccgo,!appengine +// +build !gccgo,!purego package poly1305 diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s index 4e20bf299a..4e02813879 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build ppc64le,!gccgo,!appengine +// +build !gccgo,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go index a8920ee9d2..5f91ff84a9 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build s390x,go1.11,!gccgo,!appengine +// +build go1.11,!gccgo,!purego package poly1305 diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s index ca5a309d86..806d1694b0 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build s390x,go1.11,!gccgo,!appengine +// +build go1.11,!gccgo,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s index e60bbc1d7f..b439af9369 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s +++ b/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build s390x,go1.11,!gccgo,!appengine +// +build go1.11,!gccgo,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index d63cbf60b7..06f537c135 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -562,9 +562,11 @@ func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { return nil, nil, err } - key := ed25519.PublicKey(w.KeyBytes) + if l := len(w.KeyBytes); l != ed25519.PublicKeySize { + return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) + } - return (ed25519PublicKey)(key), w.Rest, nil + return ed25519PublicKey(w.KeyBytes), w.Rest, nil } func (k ed25519PublicKey) Marshal() []byte { @@ -582,9 +584,11 @@ func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error { if sig.Format != k.Type() { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } + if l := len(k); l != ed25519.PublicKeySize { + return fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) + } - edKey := (ed25519.PublicKey)(k) - if ok := ed25519.Verify(edKey, b, sig.Blob); !ok { + if ok := ed25519.Verify(ed25519.PublicKey(k), b, sig.Blob); !ok { return errors.New("ssh: signature did not verify") } @@ -838,6 +842,10 @@ func parseSKEd25519(in []byte) (out PublicKey, rest []byte, err error) { return nil, nil, err } + if l := len(w.KeyBytes); l != ed25519.PublicKeySize { + return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) + } + key := new(skEd25519PublicKey) key.application = w.Application key.PublicKey = ed25519.PublicKey(w.KeyBytes) @@ -862,6 +870,9 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { if sig.Format != k.Type() { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } + if l := len(k.PublicKey); l != ed25519.PublicKeySize { + return fmt.Errorf("invalid size %d for Ed25519 public key", l) + } h := sha256.New() h.Write([]byte(k.application)) @@ -898,8 +909,7 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { original := Marshal(blob) - edKey := (ed25519.PublicKey)(k.PublicKey) - if ok := ed25519.Verify(edKey, original, edSig.Signature); !ok { + if ok := ed25519.Verify(k.PublicKey, original, edSig.Signature); !ok { return errors.New("ssh: signature did not verify") } @@ -1051,7 +1061,10 @@ func NewPublicKey(key interface{}) (PublicKey, error) { case *dsa.PublicKey: return (*dsaPublicKey)(key), nil case ed25519.PublicKey: - return (ed25519PublicKey)(key), nil + if l := len(key); l != ed25519.PublicKeySize { + return nil, fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) + } + return ed25519PublicKey(key), nil default: return nil, fmt.Errorf("ssh: unsupported key type %T", key) } @@ -1304,7 +1317,6 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv return nil, errors.New("ssh: malformed OpenSSH key") } - // we only handle ed25519 and rsa keys currently switch pk1.Keytype { case KeyAlgoRSA: // https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773 diff --git a/vendor/gopkg.in/ini.v1/ini.go b/vendor/gopkg.in/ini.v1/ini.go index 428b71e34a..fe913a0fd8 100644 --- a/vendor/gopkg.in/ini.v1/ini.go +++ b/vendor/gopkg.in/ini.v1/ini.go @@ -29,7 +29,7 @@ const ( // Maximum allowed depth when recursively substituing variable names. depthValues = 99 - version = "1.51.1" + version = "1.52.0" ) // Version returns current package version literal. diff --git a/vendor/gopkg.in/ini.v1/parser.go b/vendor/gopkg.in/ini.v1/parser.go index 53ab45c46f..c0a93a62a6 100644 --- a/vendor/gopkg.in/ini.v1/parser.go +++ b/vendor/gopkg.in/ini.v1/parser.go @@ -453,7 +453,7 @@ func (f *File) parse(reader io.Reader) (err error) { section.Comment = strings.TrimSpace(p.comment.String()) - // Reset aotu-counter and comments + // Reset auto-counter and comments p.comment.Reset() p.count = 1 diff --git a/vendor/gopkg.in/ini.v1/struct.go b/vendor/gopkg.in/ini.v1/struct.go index d1cd2722fe..a0c3ad5a4e 100644 --- a/vendor/gopkg.in/ini.v1/struct.go +++ b/vendor/gopkg.in/ini.v1/struct.go @@ -305,14 +305,13 @@ func (s *Section) mapTo(val reflect.Value, isStrict bool) error { if isAnonymous || isStruct || isStructPtr { if sec, err := s.f.GetSection(fieldName); err == nil { - // Only set the field to non-nil struct value if we have - // a section for it. Otherwise, we end up with a non-nil - // struct ptr even though there is no data. + // Only set the field to non-nil struct value if we have a section for it. + // Otherwise, we end up with a non-nil struct ptr even though there is no data. if isStructPtr && field.IsNil() { field.Set(reflect.New(tpField.Type.Elem())) } if err = sec.mapTo(field, isStrict); err != nil { - return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + return fmt.Errorf("error mapping field %q: %v", fieldName, err) } continue } @@ -320,7 +319,7 @@ func (s *Section) mapTo(val reflect.Value, isStrict bool) error { if key, err := s.GetKey(fieldName); err == nil { delim := parseDelim(tpField.Tag.Get("delim")) if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { - return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + return fmt.Errorf("error mapping field %q: %v", fieldName, err) } } } @@ -512,6 +511,11 @@ func isEmptyValue(v reflect.Value) bool { return false } +// StructReflector is the interface implemented by struct types that can extract themselves into INI objects. +type StructReflector interface { + ReflectINIStruct(*File) error +} + func (s *Section) reflectFrom(val reflect.Value) error { if val.Kind() == reflect.Ptr { val = val.Elem() @@ -532,6 +536,10 @@ func (s *Section) reflectFrom(val reflect.Value) error { continue } + if r, ok := field.Interface().(StructReflector); ok { + return r.ReflectINIStruct(s.f) + } + fieldName := s.parseFieldName(tpField.Name, rawName) if len(fieldName) == 0 || !field.CanSet() { continue @@ -552,12 +560,11 @@ func (s *Section) reflectFrom(val reflect.Value) error { } if err = sec.reflectFrom(field); err != nil { - return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + return fmt.Errorf("error reflecting field %q: %v", fieldName, err) } continue } - // Note: Same reason as secion. key, err := s.GetKey(fieldName) if err != nil { key, _ = s.NewKey(fieldName, "") @@ -568,8 +575,9 @@ func (s *Section) reflectFrom(val reflect.Value) error { key.Comment = tpField.Tag.Get("comment") } - if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim")), allowShadow); err != nil { - return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + delim := parseDelim(tpField.Tag.Get("delim")) + if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { + return fmt.Errorf("error reflecting field %q: %v", fieldName, err) } } diff --git a/vendor/modules.txt b/vendor/modules.txt index 7c3f1399db..7e84416cdb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -10,11 +10,11 @@ gitea.com/macaron/cache/memcache gitea.com/macaron/cache/redis # gitea.com/macaron/captcha v0.0.0-20190822015246-daa973478bae gitea.com/macaron/captcha -# gitea.com/macaron/cors v0.0.0-20190821152825-7dcef4a17175 +# gitea.com/macaron/cors v0.0.0-20190826180238-95aec09ea8b4 gitea.com/macaron/cors # gitea.com/macaron/csrf v0.0.0-20190822024205-3dc5a4474439 gitea.com/macaron/csrf -# gitea.com/macaron/gzip v0.0.0-20191118033930-0c4c5566a0e5 +# gitea.com/macaron/gzip v0.0.0-20191118041502-506895b47aae gitea.com/macaron/gzip # gitea.com/macaron/i18n v0.0.0-20190822004228-474e714e2223 gitea.com/macaron/i18n @@ -22,7 +22,7 @@ gitea.com/macaron/i18n gitea.com/macaron/inject # gitea.com/macaron/macaron v1.4.0 gitea.com/macaron/macaron -# gitea.com/macaron/session v0.0.0-20190821211443-122c47c5f705 +# gitea.com/macaron/session v0.0.0-20191207215012-613cebf0674d gitea.com/macaron/session gitea.com/macaron/session/couchbase gitea.com/macaron/session/memcache @@ -269,7 +269,7 @@ github.com/keybase/go-crypto/openpgp/errors github.com/keybase/go-crypto/openpgp/packet github.com/keybase/go-crypto/openpgp/s2k github.com/keybase/go-crypto/rsa -# github.com/klauspost/compress v1.9.2 +# github.com/klauspost/compress v1.10.2 github.com/klauspost/compress/flate github.com/klauspost/compress/gzip # github.com/kr/pretty v0.1.0 @@ -467,7 +467,7 @@ go.mongodb.org/mongo-driver/bson/bsonrw go.mongodb.org/mongo-driver/bson/bsontype go.mongodb.org/mongo-driver/bson/primitive go.mongodb.org/mongo-driver/x/bsonx/bsoncore -# golang.org/x/crypto v0.0.0-20200219234226-1ad67e1f0ef4 +# golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d golang.org/x/crypto/acme golang.org/x/crypto/acme/autocert golang.org/x/crypto/argon2 @@ -570,7 +570,7 @@ gopkg.in/alexcesaro/quotedprintable.v3 gopkg.in/asn1-ber.v1 # gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df gopkg.in/gomail.v2 -# gopkg.in/ini.v1 v1.51.1 +# gopkg.in/ini.v1 v1.52.0 gopkg.in/ini.v1 # gopkg.in/ldap.v3 v3.0.2 gopkg.in/ldap.v3 |