brotli without cgo
Some checks failed
continuous-integration/drone/push Build is failing

This commit is contained in:
Sebastian Frank 2022-02-28 10:44:20 +01:00
parent a781485c0a
commit 5cdc4203b7
Signed by: apairon
GPG Key ID: A0E05A8199CE3F57
136 changed files with 256664 additions and 27607 deletions

2
go.mod
View File

@ -5,6 +5,7 @@ go 1.16
require (
github.com/Depado/bfchroma v1.3.0
github.com/alecthomas/chroma v0.10.0 // indirect
github.com/andybalholm/brotli v1.0.4
github.com/davecgh/go-spew v1.1.1
github.com/ddliu/motto v0.3.1
github.com/disintegration/imaging v1.6.2
@ -14,7 +15,6 @@ require (
github.com/gosuri/uilive v0.0.4 // indirect
github.com/gosuri/uiprogress v0.0.1
github.com/imdario/mergo v0.3.12
github.com/itchio/go-brotli v0.0.0-20190702114328-3f28d645a45c
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mattn/go-tty v0.0.4
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7

15
go.sum
View File

@ -1,20 +1,17 @@
github.com/Depado/bfchroma v1.3.0 h1:zz14vpvySU6S0CL6yGPr1vkFevQecIt8dJdCsMS2JpM=
github.com/Depado/bfchroma v1.3.0/go.mod h1:c0bFk0tFmT+clD3TIGurjWCfD/QV8/EebfM3JGr+98M=
github.com/alecthomas/assert v0.0.0-20170929043011-405dbfeb8e38 h1:smF2tmSOzy2Mm+0dGI2AIUHY+w0BUc+4tn40djz7+6U=
github.com/alecthomas/assert v0.0.0-20170929043011-405dbfeb8e38/go.mod h1:r7bzyVFMNntcxPZXK3/+KdruV1H5KSlyVY0gc+NgInI=
github.com/alecthomas/chroma v0.7.3 h1:NfdAERMy+esYQs8OXk0I868/qDxxCEo7FMz1WIqMAeI=
github.com/alecthomas/chroma v0.7.3/go.mod h1:sko8vR34/90zvl5QdcUdvzL3J8NKjAUx9va9jPuFNoM=
github.com/alecthomas/chroma v0.10.0 h1:7XDcGkCQopCNKjZHfYrNLraA+M7e0fMiJ/Mfikbfjek=
github.com/alecthomas/chroma v0.10.0/go.mod h1:jtJATyUxlIORhUOFNA9NZDWGAQ8wpxQQqNSB4rjA/1s=
github.com/alecthomas/colour v0.0.0-20160524082231-60882d9e2721 h1:JHZL0hZKJ1VENNfmXvHbgYlbUOvpzYzvy2aZU5gXVeo=
github.com/alecthomas/colour v0.0.0-20160524082231-60882d9e2721/go.mod h1:QO9JBoKquHd+jz9nshCh40fOfO+JzsoXy8qTHF68zU0=
github.com/alecthomas/kong v0.2.4/go.mod h1:kQOmtJgV+Lb4aj+I2LEn40cbtawdWJ9Y8QLq+lElKxE=
github.com/alecthomas/repr v0.0.0-20180818092828-117648cd9897/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ=
github.com/alecthomas/repr v0.0.0-20200325044227-4184120f674c h1:MVVbswUlqicyj8P/JljoocA7AyCo62gzD0O7jfvrhtE=
github.com/alecthomas/repr v0.0.0-20200325044227-4184120f674c/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ=
github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964 h1:y5HC9v93H5EPKqaS1UYVg1uYah5Xf51mBfIoWehClUQ=
github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964/go.mod h1:Xd9hchkHSWYkEqJwUGisez3G1QY8Ryz0sdWrLPMGjLk=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@ -23,7 +20,6 @@ github.com/ddliu/motto v0.3.1 h1:k2uMOMy/LGA1okqJhtuq0ajHhYEIr798qlBULt+1kWs=
github.com/ddliu/motto v0.3.1/go.mod h1:jhu/Dn9mRcDsZNeb2rCLApdM8OoTFV77Ti2DVQx1ltE=
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
github.com/dlclark/regexp2 v1.2.0 h1:8sAhBGEM0dRWogWqWyQeIJnxjWO6oIjl8FKqREDsGfk=
github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E=
github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
@ -47,8 +43,6 @@ github.com/gosuri/uiprogress v0.0.1 h1:0kpv/XY/qTmFWl/SkaJykZXrBBzwwadmW8fRb7RJS
github.com/gosuri/uiprogress v0.0.1/go.mod h1:C1RTYn4Sc7iEyf6j8ft5dyoZ4212h8G1ol9QQluh5+0=
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/itchio/go-brotli v0.0.0-20190702114328-3f28d645a45c h1:Jf20xV/yR/O6eSUqLTuXhka/+54YR59sGwN7b3MkxYk=
github.com/itchio/go-brotli v0.0.0-20190702114328-3f28d645a45c/go.mod h1:oRXh43p/JW9kWosasd+2kHfDpb1ec4m7YrZ5E39s1iI=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@ -59,7 +53,6 @@ github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
@ -86,7 +79,6 @@ github.com/robertkrimen/otto v0.0.0-20211024170158-b87d35c0b86f/go.mod h1:/mK7FZ
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
@ -100,7 +92,6 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8 h1:hVwzHzIUGRjiF7EcUjqNxk3NCfkPxbDKRdnNE1Rpg0U=
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20211028202545-6944b10bf410 h1:hTftEOvwiOq2+O8k2D5/Q7COC7k5Qcrgc2TFURJYnvQ=
golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
@ -111,13 +102,11 @@ golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 h1:opSr2sbRXk5X5/givKrrKj9HXxFpW2sdCiP8MJSKLQY=
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9 h1:nhht2DYV/Sn3qOayu8lM+cU1ii9sTLUeBQwQQfUHtrs=
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=

View File

@ -1,5 +1,3 @@
// +build cgo
package mark2web
import (
@ -7,7 +5,7 @@ import (
"os"
"gitbase.de/apairon/mark2web/pkg/logger"
"github.com/itchio/go-brotli/enc"
"github.com/andybalholm/brotli"
)
var brotliSupported = true
@ -22,7 +20,7 @@ func handleBrotliCompression(filename string, content []byte) {
defer f.Close()
bw := enc.NewBrotliWriter(f, nil)
bw := brotli.NewWriterLevel(f, brotli.BestCompression)
defer bw.Close()
if content != nil {

View File

@ -42,7 +42,6 @@ func (node *TreeNode) ReadContentDir(inBase string, outBase string, dir string,
node.InputFiles = make([]string, 0)
}
node.InputFiles = append(node.InputFiles, f.Name())
break
default:
logger.D("FIL %s", p)
if node.OtherFiles == nil {
@ -73,7 +72,7 @@ func (node *TreeNode) processMarkdownWithHeader(md []byte, errorRef string) (*Pa
newConfig := new(PathConfig)
headerRegex := regexp.MustCompile("(?s)^---(.*?)\\r?\\n\\r?---\\r?\\n\\r?")
headerRegex := regexp.MustCompile(`(?s)^---(.*?)\r?\n\r?---\r?\\n\r?`)
yamlData := headerRegex.Find(md)
if string(yamlData) != "" {
// replace tabs
@ -96,7 +95,7 @@ func (node *TreeNode) processMarkdownWithHeader(md []byte, errorRef string) (*Pa
}
// use --- for splitting document in markdown parts
regex := regexp.MustCompile("\\r?\\n\\r?---\\r?\\n\\r?")
regex := regexp.MustCompile(`\r?\n\r?---\r?\n\r?`)
inputParts := regex.Split(string(md), -1)
htmlParts := make([]*pongo2.Value, 0)

View File

@ -1,14 +0,0 @@
// +build !cgo
package mark2web
import "gitbase.de/apairon/mark2web/pkg/helper"
var brotliSupported = false
func init() {
helper.Log.Warning("cgo is disabled, so brotli compression is not supported")
}
func handleBrotliCompression(filename string, content []byte) {
}

View File

@ -1,4 +1,4 @@
Copyright (c) 2009, 2010, 2013-2015 by the Brotli Authors.
Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

7
vendor/github.com/andybalholm/brotli/README.md generated vendored Normal file
View File

@ -0,0 +1,7 @@
This package is a brotli compressor and decompressor implemented in Go.
It was translated from the reference implementation (https://github.com/google/brotli)
with the `c2go` tool at https://github.com/andybalholm/c2go.
I am using it in production with https://github.com/andybalholm/redwood.
API documentation is found at https://pkg.go.dev/github.com/andybalholm/brotli?tab=doc.

View File

@ -0,0 +1,185 @@
package brotli
import (
"sync"
)
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Function to find backward reference copies. */
func computeDistanceCode(distance uint, max_distance uint, dist_cache []int) uint {
if distance <= max_distance {
var distance_plus_3 uint = distance + 3
var offset0 uint = distance_plus_3 - uint(dist_cache[0])
var offset1 uint = distance_plus_3 - uint(dist_cache[1])
if distance == uint(dist_cache[0]) {
return 0
} else if distance == uint(dist_cache[1]) {
return 1
} else if offset0 < 7 {
return (0x9750468 >> (4 * offset0)) & 0xF
} else if offset1 < 7 {
return (0xFDB1ACE >> (4 * offset1)) & 0xF
} else if distance == uint(dist_cache[2]) {
return 2
} else if distance == uint(dist_cache[3]) {
return 3
}
}
return distance + numDistanceShortCodes - 1
}
var hasherSearchResultPool sync.Pool
func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) {
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
var insert_length uint = *last_insert_len
var pos_end uint = position + num_bytes
var store_end uint
if num_bytes >= hasher.StoreLookahead() {
store_end = position + num_bytes - hasher.StoreLookahead() + 1
} else {
store_end = position
}
var random_heuristics_window_size uint = literalSpreeLengthForSparseSearch(params)
var apply_random_heuristics uint = position + random_heuristics_window_size
var gap uint = 0
/* Set maximum distance, see section 9.1. of the spec. */
const kMinScore uint = scoreBase + 100
/* For speed up heuristics for random data. */
/* Minimum score to accept a backward reference. */
hasher.PrepareDistanceCache(dist_cache)
sr2, _ := hasherSearchResultPool.Get().(*hasherSearchResult)
if sr2 == nil {
sr2 = &hasherSearchResult{}
}
sr, _ := hasherSearchResultPool.Get().(*hasherSearchResult)
if sr == nil {
sr = &hasherSearchResult{}
}
for position+hasher.HashTypeLength() < pos_end {
var max_length uint = pos_end - position
var max_distance uint = brotli_min_size_t(position, max_backward_limit)
sr.len = 0
sr.len_code_delta = 0
sr.distance = 0
sr.score = kMinScore
hasher.FindLongestMatch(&params.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position, max_length, max_distance, gap, params.dist.max_distance, sr)
if sr.score > kMinScore {
/* Found a match. Let's look for something even better ahead. */
var delayed_backward_references_in_row int = 0
max_length--
for ; ; max_length-- {
var cost_diff_lazy uint = 175
if params.quality < minQualityForExtensiveReferenceSearch {
sr2.len = brotli_min_size_t(sr.len-1, max_length)
} else {
sr2.len = 0
}
sr2.len_code_delta = 0
sr2.distance = 0
sr2.score = kMinScore
max_distance = brotli_min_size_t(position+1, max_backward_limit)
hasher.FindLongestMatch(&params.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position+1, max_length, max_distance, gap, params.dist.max_distance, sr2)
if sr2.score >= sr.score+cost_diff_lazy {
/* Ok, let's just write one byte for now and start a match from the
next byte. */
position++
insert_length++
*sr = *sr2
delayed_backward_references_in_row++
if delayed_backward_references_in_row < 4 && position+hasher.HashTypeLength() < pos_end {
continue
}
}
break
}
apply_random_heuristics = position + 2*sr.len + random_heuristics_window_size
max_distance = brotli_min_size_t(position, max_backward_limit)
{
/* The first 16 codes are special short-codes,
and the minimum offset is 1. */
var distance_code uint = computeDistanceCode(sr.distance, max_distance+gap, dist_cache)
if (sr.distance <= (max_distance + gap)) && distance_code > 0 {
dist_cache[3] = dist_cache[2]
dist_cache[2] = dist_cache[1]
dist_cache[1] = dist_cache[0]
dist_cache[0] = int(sr.distance)
hasher.PrepareDistanceCache(dist_cache)
}
*commands = append(*commands, makeCommand(&params.dist, insert_length, sr.len, sr.len_code_delta, distance_code))
}
*num_literals += insert_length
insert_length = 0
/* Put the hash keys into the table, if there are enough bytes left.
Depending on the hasher implementation, it can push all positions
in the given range or only a subset of them.
Avoid hash poisoning with RLE data. */
{
var range_start uint = position + 2
var range_end uint = brotli_min_size_t(position+sr.len, store_end)
if sr.distance < sr.len>>2 {
range_start = brotli_min_size_t(range_end, brotli_max_size_t(range_start, position+sr.len-(sr.distance<<2)))
}
hasher.StoreRange(ringbuffer, ringbuffer_mask, range_start, range_end)
}
position += sr.len
} else {
insert_length++
position++
/* If we have not seen matches for a long time, we can skip some
match lookups. Unsuccessful match lookups are very very expensive
and this kind of a heuristic speeds up compression quite
a lot. */
if position > apply_random_heuristics {
/* Going through uncompressible data, jump. */
if position > apply_random_heuristics+4*random_heuristics_window_size {
var kMargin uint = brotli_max_size_t(hasher.StoreLookahead()-1, 4)
/* It is quite a long time since we saw a copy, so we assume
that this data is not compressible, and store hashes less
often. Hashes of non compressible data are less likely to
turn out to be useful in the future, too, so we store less of
them to not to flood out the hash table of good compressible
data. */
var pos_jump uint = brotli_min_size_t(position+16, pos_end-kMargin)
for ; position < pos_jump; position += 4 {
hasher.Store(ringbuffer, ringbuffer_mask, position)
insert_length += 4
}
} else {
var kMargin uint = brotli_max_size_t(hasher.StoreLookahead()-1, 2)
var pos_jump uint = brotli_min_size_t(position+8, pos_end-kMargin)
for ; position < pos_jump; position += 2 {
hasher.Store(ringbuffer, ringbuffer_mask, position)
insert_length += 2
}
}
}
}
}
insert_length += pos_end - position
*last_insert_len = insert_length
hasherSearchResultPool.Put(sr)
hasherSearchResultPool.Put(sr2)
}

View File

@ -0,0 +1,796 @@
package brotli
import "math"
type zopfliNode struct {
length uint32
distance uint32
dcode_insert_length uint32
u struct {
cost float32
next uint32
shortcut uint32
}
}
const maxEffectiveDistanceAlphabetSize = 544
const kInfinity float32 = 1.7e38 /* ~= 2 ^ 127 */
var kDistanceCacheIndex = []uint32{0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1}
var kDistanceCacheOffset = []int{0, 0, 0, 0, -1, 1, -2, 2, -3, 3, -1, 1, -2, 2, -3, 3}
func initZopfliNodes(array []zopfliNode, length uint) {
var stub zopfliNode
var i uint
stub.length = 1
stub.distance = 0
stub.dcode_insert_length = 0
stub.u.cost = kInfinity
for i = 0; i < length; i++ {
array[i] = stub
}
}
func zopfliNodeCopyLength(self *zopfliNode) uint32 {
return self.length & 0x1FFFFFF
}
func zopfliNodeLengthCode(self *zopfliNode) uint32 {
var modifier uint32 = self.length >> 25
return zopfliNodeCopyLength(self) + 9 - modifier
}
func zopfliNodeCopyDistance(self *zopfliNode) uint32 {
return self.distance
}
func zopfliNodeDistanceCode(self *zopfliNode) uint32 {
var short_code uint32 = self.dcode_insert_length >> 27
if short_code == 0 {
return zopfliNodeCopyDistance(self) + numDistanceShortCodes - 1
} else {
return short_code - 1
}
}
func zopfliNodeCommandLength(self *zopfliNode) uint32 {
return zopfliNodeCopyLength(self) + (self.dcode_insert_length & 0x7FFFFFF)
}
/* Histogram based cost model for zopflification. */
type zopfliCostModel struct {
cost_cmd_ [numCommandSymbols]float32
cost_dist_ []float32
distance_histogram_size uint32
literal_costs_ []float32
min_cost_cmd_ float32
num_bytes_ uint
}
func initZopfliCostModel(self *zopfliCostModel, dist *distanceParams, num_bytes uint) {
var distance_histogram_size uint32 = dist.alphabet_size
if distance_histogram_size > maxEffectiveDistanceAlphabetSize {
distance_histogram_size = maxEffectiveDistanceAlphabetSize
}
self.num_bytes_ = num_bytes
self.literal_costs_ = make([]float32, (num_bytes + 2))
self.cost_dist_ = make([]float32, (dist.alphabet_size))
self.distance_histogram_size = distance_histogram_size
}
func cleanupZopfliCostModel(self *zopfliCostModel) {
self.literal_costs_ = nil
self.cost_dist_ = nil
}
func setCost(histogram []uint32, histogram_size uint, literal_histogram bool, cost []float32) {
var sum uint = 0
var missing_symbol_sum uint
var log2sum float32
var missing_symbol_cost float32
var i uint
for i = 0; i < histogram_size; i++ {
sum += uint(histogram[i])
}
log2sum = float32(fastLog2(sum))
missing_symbol_sum = sum
if !literal_histogram {
for i = 0; i < histogram_size; i++ {
if histogram[i] == 0 {
missing_symbol_sum++
}
}
}
missing_symbol_cost = float32(fastLog2(missing_symbol_sum)) + 2
for i = 0; i < histogram_size; i++ {
if histogram[i] == 0 {
cost[i] = missing_symbol_cost
continue
}
/* Shannon bits for this symbol. */
cost[i] = log2sum - float32(fastLog2(uint(histogram[i])))
/* Cannot be coded with less than 1 bit */
if cost[i] < 1 {
cost[i] = 1
}
}
}
func zopfliCostModelSetFromCommands(self *zopfliCostModel, position uint, ringbuffer []byte, ringbuffer_mask uint, commands []command, last_insert_len uint) {
var histogram_literal [numLiteralSymbols]uint32
var histogram_cmd [numCommandSymbols]uint32
var histogram_dist [maxEffectiveDistanceAlphabetSize]uint32
var cost_literal [numLiteralSymbols]float32
var pos uint = position - last_insert_len
var min_cost_cmd float32 = kInfinity
var cost_cmd []float32 = self.cost_cmd_[:]
var literal_costs []float32
histogram_literal = [numLiteralSymbols]uint32{}
histogram_cmd = [numCommandSymbols]uint32{}
histogram_dist = [maxEffectiveDistanceAlphabetSize]uint32{}
for i := range commands {
var inslength uint = uint(commands[i].insert_len_)
var copylength uint = uint(commandCopyLen(&commands[i]))
var distcode uint = uint(commands[i].dist_prefix_) & 0x3FF
var cmdcode uint = uint(commands[i].cmd_prefix_)
var j uint
histogram_cmd[cmdcode]++
if cmdcode >= 128 {
histogram_dist[distcode]++
}
for j = 0; j < inslength; j++ {
histogram_literal[ringbuffer[(pos+j)&ringbuffer_mask]]++
}
pos += inslength + copylength
}
setCost(histogram_literal[:], numLiteralSymbols, true, cost_literal[:])
setCost(histogram_cmd[:], numCommandSymbols, false, cost_cmd)
setCost(histogram_dist[:], uint(self.distance_histogram_size), false, self.cost_dist_)
for i := 0; i < numCommandSymbols; i++ {
min_cost_cmd = brotli_min_float(min_cost_cmd, cost_cmd[i])
}
self.min_cost_cmd_ = min_cost_cmd
{
literal_costs = self.literal_costs_
var literal_carry float32 = 0.0
num_bytes := int(self.num_bytes_)
literal_costs[0] = 0.0
for i := 0; i < num_bytes; i++ {
literal_carry += cost_literal[ringbuffer[(position+uint(i))&ringbuffer_mask]]
literal_costs[i+1] = literal_costs[i] + literal_carry
literal_carry -= literal_costs[i+1] - literal_costs[i]
}
}
}
func zopfliCostModelSetFromLiteralCosts(self *zopfliCostModel, position uint, ringbuffer []byte, ringbuffer_mask uint) {
var literal_costs []float32 = self.literal_costs_
var literal_carry float32 = 0.0
var cost_dist []float32 = self.cost_dist_
var cost_cmd []float32 = self.cost_cmd_[:]
var num_bytes uint = self.num_bytes_
var i uint
estimateBitCostsForLiterals(position, num_bytes, ringbuffer_mask, ringbuffer, literal_costs[1:])
literal_costs[0] = 0.0
for i = 0; i < num_bytes; i++ {
literal_carry += literal_costs[i+1]
literal_costs[i+1] = literal_costs[i] + literal_carry
literal_carry -= literal_costs[i+1] - literal_costs[i]
}
for i = 0; i < numCommandSymbols; i++ {
cost_cmd[i] = float32(fastLog2(uint(11 + uint32(i))))
}
for i = 0; uint32(i) < self.distance_histogram_size; i++ {
cost_dist[i] = float32(fastLog2(uint(20 + uint32(i))))
}
self.min_cost_cmd_ = float32(fastLog2(11))
}
func zopfliCostModelGetCommandCost(self *zopfliCostModel, cmdcode uint16) float32 {
return self.cost_cmd_[cmdcode]
}
func zopfliCostModelGetDistanceCost(self *zopfliCostModel, distcode uint) float32 {
return self.cost_dist_[distcode]
}
func zopfliCostModelGetLiteralCosts(self *zopfliCostModel, from uint, to uint) float32 {
return self.literal_costs_[to] - self.literal_costs_[from]
}
func zopfliCostModelGetMinCostCmd(self *zopfliCostModel) float32 {
return self.min_cost_cmd_
}
/* REQUIRES: len >= 2, start_pos <= pos */
/* REQUIRES: cost < kInfinity, nodes[start_pos].cost < kInfinity */
/* Maintains the "ZopfliNode array invariant". */
func updateZopfliNode(nodes []zopfliNode, pos uint, start_pos uint, len uint, len_code uint, dist uint, short_code uint, cost float32) {
var next *zopfliNode = &nodes[pos+len]
next.length = uint32(len | (len+9-len_code)<<25)
next.distance = uint32(dist)
next.dcode_insert_length = uint32(short_code<<27 | (pos - start_pos))
next.u.cost = cost
}
type posData struct {
pos uint
distance_cache [4]int
costdiff float32
cost float32
}
/* Maintains the smallest 8 cost difference together with their positions */
type startPosQueue struct {
q_ [8]posData
idx_ uint
}
func initStartPosQueue(self *startPosQueue) {
self.idx_ = 0
}
func startPosQueueSize(self *startPosQueue) uint {
return brotli_min_size_t(self.idx_, 8)
}
func startPosQueuePush(self *startPosQueue, posdata *posData) {
var offset uint = ^(self.idx_) & 7
self.idx_++
var len uint = startPosQueueSize(self)
var i uint
var q []posData = self.q_[:]
q[offset] = *posdata
/* Restore the sorted order. In the list of |len| items at most |len - 1|
adjacent element comparisons / swaps are required. */
for i = 1; i < len; i++ {
if q[offset&7].costdiff > q[(offset+1)&7].costdiff {
var tmp posData = q[offset&7]
q[offset&7] = q[(offset+1)&7]
q[(offset+1)&7] = tmp
}
offset++
}
}
func startPosQueueAt(self *startPosQueue, k uint) *posData {
return &self.q_[(k-self.idx_)&7]
}
/* Returns the minimum possible copy length that can improve the cost of any */
/* future position. */
func computeMinimumCopyLength(start_cost float32, nodes []zopfliNode, num_bytes uint, pos uint) uint {
var min_cost float32 = start_cost
var len uint = 2
var next_len_bucket uint = 4
/* Compute the minimum possible cost of reaching any future position. */
var next_len_offset uint = 10
for pos+len <= num_bytes && nodes[pos+len].u.cost <= min_cost {
/* We already reached (pos + len) with no more cost than the minimum
possible cost of reaching anything from this pos, so there is no point in
looking for lengths <= len. */
len++
if len == next_len_offset {
/* We reached the next copy length code bucket, so we add one more
extra bit to the minimum cost. */
min_cost += 1.0
next_len_offset += next_len_bucket
next_len_bucket *= 2
}
}
return uint(len)
}
/* REQUIRES: nodes[pos].cost < kInfinity
REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */
func computeDistanceShortcut(block_start uint, pos uint, max_backward_limit uint, gap uint, nodes []zopfliNode) uint32 {
var clen uint = uint(zopfliNodeCopyLength(&nodes[pos]))
var ilen uint = uint(nodes[pos].dcode_insert_length & 0x7FFFFFF)
var dist uint = uint(zopfliNodeCopyDistance(&nodes[pos]))
/* Since |block_start + pos| is the end position of the command, the copy part
starts from |block_start + pos - clen|. Distances that are greater than
this or greater than |max_backward_limit| + |gap| are static dictionary
references, and do not update the last distances.
Also distance code 0 (last distance) does not update the last distances. */
if pos == 0 {
return 0
} else if dist+clen <= block_start+pos+gap && dist <= max_backward_limit+gap && zopfliNodeDistanceCode(&nodes[pos]) > 0 {
return uint32(pos)
} else {
return nodes[pos-clen-ilen].u.shortcut
}
}
/* Fills in dist_cache[0..3] with the last four distances (as defined by
Section 4. of the Spec) that would be used at (block_start + pos) if we
used the shortest path of commands from block_start, computed from
nodes[0..pos]. The last four distances at block_start are in
starting_dist_cache[0..3].
REQUIRES: nodes[pos].cost < kInfinity
REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */
func computeDistanceCache(pos uint, starting_dist_cache []int, nodes []zopfliNode, dist_cache []int) {
var idx int = 0
var p uint = uint(nodes[pos].u.shortcut)
for idx < 4 && p > 0 {
var ilen uint = uint(nodes[p].dcode_insert_length & 0x7FFFFFF)
var clen uint = uint(zopfliNodeCopyLength(&nodes[p]))
var dist uint = uint(zopfliNodeCopyDistance(&nodes[p]))
dist_cache[idx] = int(dist)
idx++
/* Because of prerequisite, p >= clen + ilen >= 2. */
p = uint(nodes[p-clen-ilen].u.shortcut)
}
for ; idx < 4; idx++ {
dist_cache[idx] = starting_dist_cache[0]
starting_dist_cache = starting_dist_cache[1:]
}
}
/* Maintains "ZopfliNode array invariant" and pushes node to the queue, if it
is eligible. */
func evaluateNode(block_start uint, pos uint, max_backward_limit uint, gap uint, starting_dist_cache []int, model *zopfliCostModel, queue *startPosQueue, nodes []zopfliNode) {
/* Save cost, because ComputeDistanceCache invalidates it. */
var node_cost float32 = nodes[pos].u.cost
nodes[pos].u.shortcut = computeDistanceShortcut(block_start, pos, max_backward_limit, gap, nodes)
if node_cost <= zopfliCostModelGetLiteralCosts(model, 0, pos) {
var posdata posData
posdata.pos = pos
posdata.cost = node_cost
posdata.costdiff = node_cost - zopfliCostModelGetLiteralCosts(model, 0, pos)
computeDistanceCache(pos, starting_dist_cache, nodes, posdata.distance_cache[:])
startPosQueuePush(queue, &posdata)
}
}
/* Returns longest copy length. */
func updateNodes(num_bytes uint, block_start uint, pos uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, max_backward_limit uint, starting_dist_cache []int, num_matches uint, matches []backwardMatch, model *zopfliCostModel, queue *startPosQueue, nodes []zopfliNode) uint {
var cur_ix uint = block_start + pos
var cur_ix_masked uint = cur_ix & ringbuffer_mask
var max_distance uint = brotli_min_size_t(cur_ix, max_backward_limit)
var max_len uint = num_bytes - pos
var max_zopfli_len uint = maxZopfliLen(params)
var max_iters uint = maxZopfliCandidates(params)
var min_len uint
var result uint = 0
var k uint
var gap uint = 0
evaluateNode(block_start, pos, max_backward_limit, gap, starting_dist_cache, model, queue, nodes)
{
var posdata *posData = startPosQueueAt(queue, 0)
var min_cost float32 = (posdata.cost + zopfliCostModelGetMinCostCmd(model) + zopfliCostModelGetLiteralCosts(model, posdata.pos, pos))
min_len = computeMinimumCopyLength(min_cost, nodes, num_bytes, pos)
}
/* Go over the command starting positions in order of increasing cost
difference. */
for k = 0; k < max_iters && k < startPosQueueSize(queue); k++ {
var posdata *posData = startPosQueueAt(queue, k)
var start uint = posdata.pos
var inscode uint16 = getInsertLengthCode(pos - start)
var start_costdiff float32 = posdata.costdiff
var base_cost float32 = start_costdiff + float32(getInsertExtra(inscode)) + zopfliCostModelGetLiteralCosts(model, 0, pos)
var best_len uint = min_len - 1
var j uint = 0
/* Look for last distance matches using the distance cache from this
starting position. */
for ; j < numDistanceShortCodes && best_len < max_len; j++ {
var idx uint = uint(kDistanceCacheIndex[j])
var backward uint = uint(posdata.distance_cache[idx] + kDistanceCacheOffset[j])
var prev_ix uint = cur_ix - backward
var len uint = 0
var continuation byte = ringbuffer[cur_ix_masked+best_len]
if cur_ix_masked+best_len > ringbuffer_mask {
break
}
if backward > max_distance+gap {
/* Word dictionary -> ignore. */
continue
}
if backward <= max_distance {
/* Regular backward reference. */
if prev_ix >= cur_ix {
continue
}
prev_ix &= ringbuffer_mask
if prev_ix+best_len > ringbuffer_mask || continuation != ringbuffer[prev_ix+best_len] {
continue
}
len = findMatchLengthWithLimit(ringbuffer[prev_ix:], ringbuffer[cur_ix_masked:], max_len)
} else {
continue
}
{
var dist_cost float32 = base_cost + zopfliCostModelGetDistanceCost(model, j)
var l uint
for l = best_len + 1; l <= len; l++ {
var copycode uint16 = getCopyLengthCode(l)
var cmdcode uint16 = combineLengthCodes(inscode, copycode, j == 0)
var tmp float32
if cmdcode < 128 {
tmp = base_cost
} else {
tmp = dist_cost
}
var cost float32 = tmp + float32(getCopyExtra(copycode)) + zopfliCostModelGetCommandCost(model, cmdcode)
if cost < nodes[pos+l].u.cost {
updateZopfliNode(nodes, pos, start, l, l, backward, j+1, cost)
result = brotli_max_size_t(result, l)
}
best_len = l
}
}
}
/* At higher iterations look only for new last distance matches, since
looking only for new command start positions with the same distances
does not help much. */
if k >= 2 {
continue
}
{
/* Loop through all possible copy lengths at this position. */
var len uint = min_len
for j = 0; j < num_matches; j++ {
var match backwardMatch = matches[j]
var dist uint = uint(match.distance)
var is_dictionary_match bool = (dist > max_distance+gap)
var dist_code uint = dist + numDistanceShortCodes - 1
var dist_symbol uint16
var distextra uint32
var distnumextra uint32
var dist_cost float32
var max_match_len uint
/* We already tried all possible last distance matches, so we can use
normal distance code here. */
prefixEncodeCopyDistance(dist_code, uint(params.dist.num_direct_distance_codes), uint(params.dist.distance_postfix_bits), &dist_symbol, &distextra)
distnumextra = uint32(dist_symbol) >> 10
dist_cost = base_cost + float32(distnumextra) + zopfliCostModelGetDistanceCost(model, uint(dist_symbol)&0x3FF)
/* Try all copy lengths up until the maximum copy length corresponding
to this distance. If the distance refers to the static dictionary, or
the maximum length is long enough, try only one maximum length. */
max_match_len = backwardMatchLength(&match)
if len < max_match_len && (is_dictionary_match || max_match_len > max_zopfli_len) {
len = max_match_len
}
for ; len <= max_match_len; len++ {
var len_code uint
if is_dictionary_match {
len_code = backwardMatchLengthCode(&match)
} else {
len_code = len
}
var copycode uint16 = getCopyLengthCode(len_code)
var cmdcode uint16 = combineLengthCodes(inscode, copycode, false)
var cost float32 = dist_cost + float32(getCopyExtra(copycode)) + zopfliCostModelGetCommandCost(model, cmdcode)
if cost < nodes[pos+len].u.cost {
updateZopfliNode(nodes, pos, start, uint(len), len_code, dist, 0, cost)
if len > result {
result = len
}
}
}
}
}
}
return result
}
func computeShortestPathFromNodes(num_bytes uint, nodes []zopfliNode) uint {
var index uint = num_bytes
var num_commands uint = 0
for nodes[index].dcode_insert_length&0x7FFFFFF == 0 && nodes[index].length == 1 {
index--
}
nodes[index].u.next = math.MaxUint32
for index != 0 {
var len uint = uint(zopfliNodeCommandLength(&nodes[index]))
index -= uint(len)
nodes[index].u.next = uint32(len)
num_commands++
}
return num_commands
}
/* REQUIRES: nodes != NULL and len(nodes) >= num_bytes + 1 */
func zopfliCreateCommands(num_bytes uint, block_start uint, nodes []zopfliNode, dist_cache []int, last_insert_len *uint, params *encoderParams, commands *[]command, num_literals *uint) {
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
var pos uint = 0
var offset uint32 = nodes[0].u.next
var i uint
var gap uint = 0
for i = 0; offset != math.MaxUint32; i++ {
var next *zopfliNode = &nodes[uint32(pos)+offset]
var copy_length uint = uint(zopfliNodeCopyLength(next))
var insert_length uint = uint(next.dcode_insert_length & 0x7FFFFFF)
pos += insert_length
offset = next.u.next
if i == 0 {
insert_length += *last_insert_len
*last_insert_len = 0
}
{
var distance uint = uint(zopfliNodeCopyDistance(next))
var len_code uint = uint(zopfliNodeLengthCode(next))
var max_distance uint = brotli_min_size_t(block_start+pos, max_backward_limit)
var is_dictionary bool = (distance > max_distance+gap)
var dist_code uint = uint(zopfliNodeDistanceCode(next))
*commands = append(*commands, makeCommand(&params.dist, insert_length, copy_length, int(len_code)-int(copy_length), dist_code))
if !is_dictionary && dist_code > 0 {
dist_cache[3] = dist_cache[2]
dist_cache[2] = dist_cache[1]
dist_cache[1] = dist_cache[0]
dist_cache[0] = int(distance)
}
}
*num_literals += insert_length
pos += copy_length
}
*last_insert_len += num_bytes - pos
}
func zopfliIterate(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, gap uint, dist_cache []int, model *zopfliCostModel, num_matches []uint32, matches []backwardMatch, nodes []zopfliNode) uint {
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
var max_zopfli_len uint = maxZopfliLen(params)
var queue startPosQueue
var cur_match_pos uint = 0
var i uint
nodes[0].length = 0
nodes[0].u.cost = 0
initStartPosQueue(&queue)
for i = 0; i+3 < num_bytes; i++ {
var skip uint = updateNodes(num_bytes, position, i, ringbuffer, ringbuffer_mask, params, max_backward_limit, dist_cache, uint(num_matches[i]), matches[cur_match_pos:], model, &queue, nodes)
if skip < longCopyQuickStep {
skip = 0
}
cur_match_pos += uint(num_matches[i])
if num_matches[i] == 1 && backwardMatchLength(&matches[cur_match_pos-1]) > max_zopfli_len {
skip = brotli_max_size_t(backwardMatchLength(&matches[cur_match_pos-1]), skip)
}
if skip > 1 {
skip--
for skip != 0 {
i++
if i+3 >= num_bytes {
break
}
evaluateNode(position, i, max_backward_limit, gap, dist_cache, model, &queue, nodes)
cur_match_pos += uint(num_matches[i])
skip--
}
}
}
return computeShortestPathFromNodes(num_bytes, nodes)
}
/* Computes the shortest path of commands from position to at most
position + num_bytes.
On return, path->size() is the number of commands found and path[i] is the
length of the i-th command (copy length plus insert length).
Note that the sum of the lengths of all commands can be less than num_bytes.
On return, the nodes[0..num_bytes] array will have the following
"ZopfliNode array invariant":
For each i in [1..num_bytes], if nodes[i].cost < kInfinity, then
(1) nodes[i].copy_length() >= 2
(2) nodes[i].command_length() <= i and
(3) nodes[i - nodes[i].command_length()].cost < kInfinity
REQUIRES: nodes != nil and len(nodes) >= num_bytes + 1 */
func zopfliComputeShortestPath(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, dist_cache []int, hasher *h10, nodes []zopfliNode) uint {
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
var max_zopfli_len uint = maxZopfliLen(params)
var model zopfliCostModel
var queue startPosQueue
var matches [2 * (maxNumMatchesH10 + 64)]backwardMatch
var store_end uint
if num_bytes >= hasher.StoreLookahead() {
store_end = position + num_bytes - hasher.StoreLookahead() + 1
} else {
store_end = position
}
var i uint
var gap uint = 0
var lz_matches_offset uint = 0
nodes[0].length = 0
nodes[0].u.cost = 0
initZopfliCostModel(&model, &params.dist, num_bytes)
zopfliCostModelSetFromLiteralCosts(&model, position, ringbuffer, ringbuffer_mask)
initStartPosQueue(&queue)
for i = 0; i+hasher.HashTypeLength()-1 < num_bytes; i++ {
var pos uint = position + i
var max_distance uint = brotli_min_size_t(pos, max_backward_limit)
var skip uint
var num_matches uint
num_matches = findAllMatchesH10(hasher, &params.dictionary, ringbuffer, ringbuffer_mask, pos, num_bytes-i, max_distance, gap, params, matches[lz_matches_offset:])
if num_matches > 0 && backwardMatchLength(&matches[num_matches-1]) > max_zopfli_len {
matches[0] = matches[num_matches-1]
num_matches = 1
}
skip = updateNodes(num_bytes, position, i, ringbuffer, ringbuffer_mask, params, max_backward_limit, dist_cache, num_matches, matches[:], &model, &queue, nodes)
if skip < longCopyQuickStep {
skip = 0
}
if num_matches == 1 && backwardMatchLength(&matches[0]) > max_zopfli_len {
skip = brotli_max_size_t(backwardMatchLength(&matches[0]), skip)
}
if skip > 1 {
/* Add the tail of the copy to the hasher. */
hasher.StoreRange(ringbuffer, ringbuffer_mask, pos+1, brotli_min_size_t(pos+skip, store_end))
skip--
for skip != 0 {
i++
if i+hasher.HashTypeLength()-1 >= num_bytes {
break
}
evaluateNode(position, i, max_backward_limit, gap, dist_cache, &model, &queue, nodes)
skip--
}
}
}
cleanupZopfliCostModel(&model)
return computeShortestPathFromNodes(num_bytes, nodes)
}
func createZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher *h10, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) {
var nodes []zopfliNode
nodes = make([]zopfliNode, (num_bytes + 1))
initZopfliNodes(nodes, num_bytes+1)
zopfliComputeShortestPath(num_bytes, position, ringbuffer, ringbuffer_mask, params, dist_cache, hasher, nodes)
zopfliCreateCommands(num_bytes, position, nodes, dist_cache, last_insert_len, params, commands, num_literals)
nodes = nil
}
func createHqZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) {
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
var num_matches []uint32 = make([]uint32, num_bytes)
var matches_size uint = 4 * num_bytes
var store_end uint
if num_bytes >= hasher.StoreLookahead() {
store_end = position + num_bytes - hasher.StoreLookahead() + 1
} else {
store_end = position
}
var cur_match_pos uint = 0
var i uint
var orig_num_literals uint
var orig_last_insert_len uint
var orig_dist_cache [4]int
var orig_num_commands int
var model zopfliCostModel
var nodes []zopfliNode
var matches []backwardMatch = make([]backwardMatch, matches_size)
var gap uint = 0
var shadow_matches uint = 0
var new_array []backwardMatch
for i = 0; i+hasher.HashTypeLength()-1 < num_bytes; i++ {
var pos uint = position + i
var max_distance uint = brotli_min_size_t(pos, max_backward_limit)
var max_length uint = num_bytes - i
var num_found_matches uint
var cur_match_end uint
var j uint
/* Ensure that we have enough free slots. */
if matches_size < cur_match_pos+maxNumMatchesH10+shadow_matches {
var new_size uint = matches_size
if new_size == 0 {
new_size = cur_match_pos + maxNumMatchesH10 + shadow_matches
}
for new_size < cur_match_pos+maxNumMatchesH10+shadow_matches {
new_size *= 2
}
new_array = make([]backwardMatch, new_size)
if matches_size != 0 {
copy(new_array, matches[:matches_size])
}
matches = new_array
matches_size = new_size
}
num_found_matches = findAllMatchesH10(hasher.(*h10), &params.dictionary, ringbuffer, ringbuffer_mask, pos, max_length, max_distance, gap, params, matches[cur_match_pos+shadow_matches:])
cur_match_end = cur_match_pos + num_found_matches
for j = cur_match_pos; j+1 < cur_match_end; j++ {
assert(backwardMatchLength(&matches[j]) <= backwardMatchLength(&matches[j+1]))
}
num_matches[i] = uint32(num_found_matches)
if num_found_matches > 0 {
var match_len uint = backwardMatchLength(&matches[cur_match_end-1])
if match_len > maxZopfliLenQuality11 {
var skip uint = match_len - 1
matches[cur_match_pos] = matches[cur_match_end-1]
cur_match_pos++
num_matches[i] = 1
/* Add the tail of the copy to the hasher. */
hasher.StoreRange(ringbuffer, ringbuffer_mask, pos+1, brotli_min_size_t(pos+match_len, store_end))
var pos uint = i
for i := 0; i < int(skip); i++ {
num_matches[pos+1:][i] = 0
}
i += skip
} else {
cur_match_pos = cur_match_end
}
}
}
orig_num_literals = *num_literals
orig_last_insert_len = *last_insert_len
copy(orig_dist_cache[:], dist_cache[:4])
orig_num_commands = len(*commands)
nodes = make([]zopfliNode, (num_bytes + 1))
initZopfliCostModel(&model, &params.dist, num_bytes)
for i = 0; i < 2; i++ {
initZopfliNodes(nodes, num_bytes+1)
if i == 0 {
zopfliCostModelSetFromLiteralCosts(&model, position, ringbuffer, ringbuffer_mask)
} else {
zopfliCostModelSetFromCommands(&model, position, ringbuffer, ringbuffer_mask, (*commands)[orig_num_commands:], orig_last_insert_len)
}
*commands = (*commands)[:orig_num_commands]
*num_literals = orig_num_literals
*last_insert_len = orig_last_insert_len
copy(dist_cache, orig_dist_cache[:4])
zopfliIterate(num_bytes, position, ringbuffer, ringbuffer_mask, params, gap, dist_cache, &model, num_matches, matches, nodes)
zopfliCreateCommands(num_bytes, position, nodes, dist_cache, last_insert_len, params, commands, num_literals)
}
cleanupZopfliCostModel(&model)
nodes = nil
matches = nil
num_matches = nil
}

436
vendor/github.com/andybalholm/brotli/bit_cost.go generated vendored Normal file
View File

@ -0,0 +1,436 @@
package brotli
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Functions to estimate the bit cost of Huffman trees. */
func shannonEntropy(population []uint32, size uint, total *uint) float64 {
var sum uint = 0
var retval float64 = 0
var population_end []uint32 = population[size:]
var p uint
for -cap(population) < -cap(population_end) {
p = uint(population[0])
population = population[1:]
sum += p
retval -= float64(p) * fastLog2(p)
}
if sum != 0 {
retval += float64(sum) * fastLog2(sum)
}
*total = sum
return retval
}
func bitsEntropy(population []uint32, size uint) float64 {
var sum uint
var retval float64 = shannonEntropy(population, size, &sum)
if retval < float64(sum) {
/* At least one bit per literal is needed. */
retval = float64(sum)
}
return retval
}
const kOneSymbolHistogramCost float64 = 12
const kTwoSymbolHistogramCost float64 = 20
const kThreeSymbolHistogramCost float64 = 28
const kFourSymbolHistogramCost float64 = 37
func populationCostLiteral(histogram *histogramLiteral) float64 {
var data_size uint = histogramDataSizeLiteral()
var count int = 0
var s [5]uint
var bits float64 = 0.0
var i uint
if histogram.total_count_ == 0 {
return kOneSymbolHistogramCost
}
for i = 0; i < data_size; i++ {
if histogram.data_[i] > 0 {
s[count] = i
count++
if count > 4 {
break
}
}
}
if count == 1 {
return kOneSymbolHistogramCost
}
if count == 2 {
return kTwoSymbolHistogramCost + float64(histogram.total_count_)
}
if count == 3 {
var histo0 uint32 = histogram.data_[s[0]]
var histo1 uint32 = histogram.data_[s[1]]
var histo2 uint32 = histogram.data_[s[2]]
var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2))
return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax)
}
if count == 4 {
var histo [4]uint32
var h23 uint32
var histomax uint32
for i = 0; i < 4; i++ {
histo[i] = histogram.data_[s[i]]
}
/* Sort */
for i = 0; i < 4; i++ {
var j uint
for j = i + 1; j < 4; j++ {
if histo[j] > histo[i] {
var tmp uint32 = histo[j]
histo[j] = histo[i]
histo[i] = tmp
}
}
}
h23 = histo[2] + histo[3]
histomax = brotli_max_uint32_t(h23, histo[0])
return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax)
}
{
var max_depth uint = 1
var depth_histo = [codeLengthCodes]uint32{0}
/* In this loop we compute the entropy of the histogram and simultaneously
build a simplified histogram of the code length codes where we use the
zero repeat code 17, but we don't use the non-zero repeat code 16. */
var log2total float64 = fastLog2(histogram.total_count_)
for i = 0; i < data_size; {
if histogram.data_[i] > 0 {
var log2p float64 = log2total - fastLog2(uint(histogram.data_[i]))
/* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) =
= log2(total_count) - log2(count(symbol)) */
var depth uint = uint(log2p + 0.5)
/* Approximate the bit depth by round(-log2(P(symbol))) */
bits += float64(histogram.data_[i]) * log2p
if depth > 15 {
depth = 15
}
if depth > max_depth {
max_depth = depth
}
depth_histo[depth]++
i++
} else {
var reps uint32 = 1
/* Compute the run length of zeros and add the appropriate number of 0
and 17 code length codes to the code length code histogram. */
var k uint
for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ {
reps++
}
i += uint(reps)
if i == data_size {
/* Don't add any cost for the last zero run, since these are encoded
only implicitly. */
break
}
if reps < 3 {
depth_histo[0] += reps
} else {
reps -= 2
for reps > 0 {
depth_histo[repeatZeroCodeLength]++
/* Add the 3 extra bits for the 17 code length code. */
bits += 3
reps >>= 3
}
}
}
}
/* Add the estimated encoding cost of the code length code histogram. */
bits += float64(18 + 2*max_depth)
/* Add the entropy of the code length code histogram. */
bits += bitsEntropy(depth_histo[:], codeLengthCodes)
}
return bits
}
func populationCostCommand(histogram *histogramCommand) float64 {
var data_size uint = histogramDataSizeCommand()
var count int = 0
var s [5]uint
var bits float64 = 0.0
var i uint
if histogram.total_count_ == 0 {
return kOneSymbolHistogramCost
}
for i = 0; i < data_size; i++ {
if histogram.data_[i] > 0 {
s[count] = i
count++
if count > 4 {
break
}
}
}
if count == 1 {
return kOneSymbolHistogramCost
}
if count == 2 {
return kTwoSymbolHistogramCost + float64(histogram.total_count_)
}
if count == 3 {
var histo0 uint32 = histogram.data_[s[0]]
var histo1 uint32 = histogram.data_[s[1]]
var histo2 uint32 = histogram.data_[s[2]]
var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2))
return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax)
}
if count == 4 {
var histo [4]uint32
var h23 uint32
var histomax uint32
for i = 0; i < 4; i++ {
histo[i] = histogram.data_[s[i]]
}
/* Sort */
for i = 0; i < 4; i++ {
var j uint
for j = i + 1; j < 4; j++ {
if histo[j] > histo[i] {
var tmp uint32 = histo[j]
histo[j] = histo[i]
histo[i] = tmp
}
}
}
h23 = histo[2] + histo[3]
histomax = brotli_max_uint32_t(h23, histo[0])
return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax)
}
{
var max_depth uint = 1
var depth_histo = [codeLengthCodes]uint32{0}
/* In this loop we compute the entropy of the histogram and simultaneously
build a simplified histogram of the code length codes where we use the
zero repeat code 17, but we don't use the non-zero repeat code 16. */
var log2total float64 = fastLog2(histogram.total_count_)
for i = 0; i < data_size; {
if histogram.data_[i] > 0 {
var log2p float64 = log2total - fastLog2(uint(histogram.data_[i]))
/* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) =
= log2(total_count) - log2(count(symbol)) */
var depth uint = uint(log2p + 0.5)
/* Approximate the bit depth by round(-log2(P(symbol))) */
bits += float64(histogram.data_[i]) * log2p
if depth > 15 {
depth = 15
}
if depth > max_depth {
max_depth = depth
}
depth_histo[depth]++
i++
} else {
var reps uint32 = 1
/* Compute the run length of zeros and add the appropriate number of 0
and 17 code length codes to the code length code histogram. */
var k uint
for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ {
reps++
}
i += uint(reps)
if i == data_size {
/* Don't add any cost for the last zero run, since these are encoded
only implicitly. */
break
}
if reps < 3 {
depth_histo[0] += reps
} else {
reps -= 2
for reps > 0 {
depth_histo[repeatZeroCodeLength]++
/* Add the 3 extra bits for the 17 code length code. */
bits += 3
reps >>= 3
}
}
}
}
/* Add the estimated encoding cost of the code length code histogram. */
bits += float64(18 + 2*max_depth)
/* Add the entropy of the code length code histogram. */
bits += bitsEntropy(depth_histo[:], codeLengthCodes)
}
return bits
}
func populationCostDistance(histogram *histogramDistance) float64 {
var data_size uint = histogramDataSizeDistance()
var count int = 0
var s [5]uint
var bits float64 = 0.0
var i uint
if histogram.total_count_ == 0 {
return kOneSymbolHistogramCost
}
for i = 0; i < data_size; i++ {
if histogram.data_[i] > 0 {
s[count] = i
count++
if count > 4 {
break
}
}
}
if count == 1 {
return kOneSymbolHistogramCost
}
if count == 2 {
return kTwoSymbolHistogramCost + float64(histogram.total_count_)
}
if count == 3 {
var histo0 uint32 = histogram.data_[s[0]]
var histo1 uint32 = histogram.data_[s[1]]
var histo2 uint32 = histogram.data_[s[2]]
var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2))
return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax)
}
if count == 4 {
var histo [4]uint32
var h23 uint32
var histomax uint32
for i = 0; i < 4; i++ {
histo[i] = histogram.data_[s[i]]
}
/* Sort */
for i = 0; i < 4; i++ {
var j uint
for j = i + 1; j < 4; j++ {
if histo[j] > histo[i] {
var tmp uint32 = histo[j]
histo[j] = histo[i]
histo[i] = tmp
}
}
}
h23 = histo[2] + histo[3]
histomax = brotli_max_uint32_t(h23, histo[0])
return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax)
}
{
var max_depth uint = 1
var depth_histo = [codeLengthCodes]uint32{0}
/* In this loop we compute the entropy of the histogram and simultaneously
build a simplified histogram of the code length codes where we use the
zero repeat code 17, but we don't use the non-zero repeat code 16. */
var log2total float64 = fastLog2(histogram.total_count_)
for i = 0; i < data_size; {
if histogram.data_[i] > 0 {
var log2p float64 = log2total - fastLog2(uint(histogram.data_[i]))
/* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) =
= log2(total_count) - log2(count(symbol)) */
var depth uint = uint(log2p + 0.5)
/* Approximate the bit depth by round(-log2(P(symbol))) */
bits += float64(histogram.data_[i]) * log2p
if depth > 15 {
depth = 15
}
if depth > max_depth {
max_depth = depth
}
depth_histo[depth]++
i++
} else {
var reps uint32 = 1
/* Compute the run length of zeros and add the appropriate number of 0
and 17 code length codes to the code length code histogram. */
var k uint
for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ {
reps++
}
i += uint(reps)
if i == data_size {
/* Don't add any cost for the last zero run, since these are encoded
only implicitly. */
break
}
if reps < 3 {
depth_histo[0] += reps
} else {
reps -= 2
for reps > 0 {
depth_histo[repeatZeroCodeLength]++
/* Add the 3 extra bits for the 17 code length code. */
bits += 3
reps >>= 3
}
}
}
}
/* Add the estimated encoding cost of the code length code histogram. */
bits += float64(18 + 2*max_depth)
/* Add the entropy of the code length code histogram. */
bits += bitsEntropy(depth_histo[:], codeLengthCodes)
}
return bits
}

266
vendor/github.com/andybalholm/brotli/bit_reader.go generated vendored Normal file
View File

@ -0,0 +1,266 @@
package brotli
import "encoding/binary"
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Bit reading helpers */
const shortFillBitWindowRead = (8 >> 1)
var kBitMask = [33]uint32{
0x00000000,
0x00000001,
0x00000003,
0x00000007,
0x0000000F,
0x0000001F,
0x0000003F,
0x0000007F,
0x000000FF,
0x000001FF,
0x000003FF,
0x000007FF,
0x00000FFF,
0x00001FFF,
0x00003FFF,
0x00007FFF,
0x0000FFFF,
0x0001FFFF,
0x0003FFFF,
0x0007FFFF,
0x000FFFFF,
0x001FFFFF,
0x003FFFFF,
0x007FFFFF,
0x00FFFFFF,
0x01FFFFFF,
0x03FFFFFF,
0x07FFFFFF,
0x0FFFFFFF,
0x1FFFFFFF,
0x3FFFFFFF,
0x7FFFFFFF,
0xFFFFFFFF,
}
func bitMask(n uint32) uint32 {
return kBitMask[n]
}
type bitReader struct {
val_ uint64
bit_pos_ uint32
input []byte
input_len uint
byte_pos uint
}
type bitReaderState struct {
val_ uint64
bit_pos_ uint32
input []byte
input_len uint
byte_pos uint
}
/* Initializes the BrotliBitReader fields. */
/* Ensures that accumulator is not empty.
May consume up to sizeof(brotli_reg_t) - 1 bytes of input.
Returns false if data is required but there is no input available.
For BROTLI_ALIGNED_READ this function also prepares bit reader for aligned
reading. */
func bitReaderSaveState(from *bitReader, to *bitReaderState) {
to.val_ = from.val_
to.bit_pos_ = from.bit_pos_
to.input = from.input
to.input_len = from.input_len
to.byte_pos = from.byte_pos
}
func bitReaderRestoreState(to *bitReader, from *bitReaderState) {
to.val_ = from.val_
to.bit_pos_ = from.bit_pos_
to.input = from.input
to.input_len = from.input_len
to.byte_pos = from.byte_pos
}
func getAvailableBits(br *bitReader) uint32 {
return 64 - br.bit_pos_
}
/* Returns amount of unread bytes the bit reader still has buffered from the
BrotliInput, including whole bytes in br->val_. */
func getRemainingBytes(br *bitReader) uint {
return uint(uint32(br.input_len-br.byte_pos) + (getAvailableBits(br) >> 3))
}
/* Checks if there is at least |num| bytes left in the input ring-buffer
(excluding the bits remaining in br->val_). */
func checkInputAmount(br *bitReader, num uint) bool {
return br.input_len-br.byte_pos >= num
}
/* Guarantees that there are at least |n_bits| + 1 bits in accumulator.
Precondition: accumulator contains at least 1 bit.
|n_bits| should be in the range [1..24] for regular build. For portable
non-64-bit little-endian build only 16 bits are safe to request. */
func fillBitWindow(br *bitReader, n_bits uint32) {
if br.bit_pos_ >= 32 {
br.val_ >>= 32
br.bit_pos_ ^= 32 /* here same as -= 32 because of the if condition */
br.val_ |= (uint64(binary.LittleEndian.Uint32(br.input[br.byte_pos:]))) << 32
br.byte_pos += 4
}
}
/* Mostly like BrotliFillBitWindow, but guarantees only 16 bits and reads no
more than BROTLI_SHORT_FILL_BIT_WINDOW_READ bytes of input. */
func fillBitWindow16(br *bitReader) {
fillBitWindow(br, 17)
}
/* Tries to pull one byte of input to accumulator.
Returns false if there is no input available. */
func pullByte(br *bitReader) bool {
if br.byte_pos == br.input_len {
return false
}
br.val_ >>= 8
br.val_ |= (uint64(br.input[br.byte_pos])) << 56
br.bit_pos_ -= 8
br.byte_pos++
return true
}
/* Returns currently available bits.
The number of valid bits could be calculated by BrotliGetAvailableBits. */
func getBitsUnmasked(br *bitReader) uint64 {
return br.val_ >> br.bit_pos_
}
/* Like BrotliGetBits, but does not mask the result.
The result contains at least 16 valid bits. */
func get16BitsUnmasked(br *bitReader) uint32 {
fillBitWindow(br, 16)
return uint32(getBitsUnmasked(br))
}
/* Returns the specified number of bits from |br| without advancing bit
position. */
func getBits(br *bitReader, n_bits uint32) uint32 {
fillBitWindow(br, n_bits)
return uint32(getBitsUnmasked(br)) & bitMask(n_bits)
}
/* Tries to peek the specified amount of bits. Returns false, if there
is not enough input. */
func safeGetBits(br *bitReader, n_bits uint32, val *uint32) bool {
for getAvailableBits(br) < n_bits {
if !pullByte(br) {
return false
}
}
*val = uint32(getBitsUnmasked(br)) & bitMask(n_bits)
return true
}
/* Advances the bit pos by |n_bits|. */
func dropBits(br *bitReader, n_bits uint32) {
br.bit_pos_ += n_bits
}
func bitReaderUnload(br *bitReader) {
var unused_bytes uint32 = getAvailableBits(br) >> 3
var unused_bits uint32 = unused_bytes << 3
br.byte_pos -= uint(unused_bytes)
if unused_bits == 64 {
br.val_ = 0
} else {
br.val_ <<= unused_bits
}
br.bit_pos_ += unused_bits
}
/* Reads the specified number of bits from |br| and advances the bit pos.
Precondition: accumulator MUST contain at least |n_bits|. */
func takeBits(br *bitReader, n_bits uint32, val *uint32) {
*val = uint32(getBitsUnmasked(br)) & bitMask(n_bits)
dropBits(br, n_bits)
}
/* Reads the specified number of bits from |br| and advances the bit pos.
Assumes that there is enough input to perform BrotliFillBitWindow. */
func readBits(br *bitReader, n_bits uint32) uint32 {
var val uint32
fillBitWindow(br, n_bits)
takeBits(br, n_bits, &val)
return val
}
/* Tries to read the specified amount of bits. Returns false, if there
is not enough input. |n_bits| MUST be positive. */
func safeReadBits(br *bitReader, n_bits uint32, val *uint32) bool {
for getAvailableBits(br) < n_bits {
if !pullByte(br) {
return false
}
}
takeBits(br, n_bits, val)
return true
}
/* Advances the bit reader position to the next byte boundary and verifies
that any skipped bits are set to zero. */
func bitReaderJumpToByteBoundary(br *bitReader) bool {
var pad_bits_count uint32 = getAvailableBits(br) & 0x7
var pad_bits uint32 = 0
if pad_bits_count != 0 {
takeBits(br, pad_bits_count, &pad_bits)
}
return pad_bits == 0
}
/* Copies remaining input bytes stored in the bit reader to the output. Value
|num| may not be larger than BrotliGetRemainingBytes. The bit reader must be
warmed up again after this. */
func copyBytes(dest []byte, br *bitReader, num uint) {
for getAvailableBits(br) >= 8 && num > 0 {
dest[0] = byte(getBitsUnmasked(br))
dropBits(br, 8)
dest = dest[1:]
num--
}
copy(dest, br.input[br.byte_pos:][:num])
br.byte_pos += num
}
func initBitReader(br *bitReader) {
br.val_ = 0
br.bit_pos_ = 64
}
func warmupBitReader(br *bitReader) bool {
/* Fixing alignment after unaligned BrotliFillWindow would result accumulator
overflow. If unalignment is caused by BrotliSafeReadBits, then there is
enough space in accumulator to fix alignment. */
if getAvailableBits(br) == 0 {
if !pullByte(br) {
return false
}
}
return true
}

144
vendor/github.com/andybalholm/brotli/block_splitter.go generated vendored Normal file
View File

@ -0,0 +1,144 @@
package brotli
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Block split point selection utilities. */
type blockSplit struct {
num_types uint
num_blocks uint
types []byte
lengths []uint32
types_alloc_size uint
lengths_alloc_size uint
}
const (
kMaxLiteralHistograms uint = 100
kMaxCommandHistograms uint = 50
kLiteralBlockSwitchCost float64 = 28.1
kCommandBlockSwitchCost float64 = 13.5
kDistanceBlockSwitchCost float64 = 14.6
kLiteralStrideLength uint = 70
kCommandStrideLength uint = 40
kSymbolsPerLiteralHistogram uint = 544
kSymbolsPerCommandHistogram uint = 530
kSymbolsPerDistanceHistogram uint = 544
kMinLengthForBlockSplitting uint = 128
kIterMulForRefining uint = 2
kMinItersForRefining uint = 100
)
func countLiterals(cmds []command) uint {
var total_length uint = 0
/* Count how many we have. */
for i := range cmds {
total_length += uint(cmds[i].insert_len_)
}
return total_length
}
func copyLiteralsToByteArray(cmds []command, data []byte, offset uint, mask uint, literals []byte) {
var pos uint = 0
var from_pos uint = offset & mask
for i := range cmds {
var insert_len uint = uint(cmds[i].insert_len_)
if from_pos+insert_len > mask {
var head_size uint = mask + 1 - from_pos
copy(literals[pos:], data[from_pos:][:head_size])
from_pos = 0
pos += head_size
insert_len -= head_size
}
if insert_len > 0 {
copy(literals[pos:], data[from_pos:][:insert_len])
pos += insert_len
}
from_pos = uint((uint32(from_pos+insert_len) + commandCopyLen(&cmds[i])) & uint32(mask))
}
}
func myRand(seed *uint32) uint32 {
/* Initial seed should be 7. In this case, loop length is (1 << 29). */
*seed *= 16807
return *seed
}
func bitCost(count uint) float64 {
if count == 0 {
return -2.0
} else {
return fastLog2(count)
}
}
const histogramsPerBatch = 64
const clustersPerBatch = 16
func initBlockSplit(self *blockSplit) {
self.num_types = 0
self.num_blocks = 0
self.types = self.types[:0]
self.lengths = self.lengths[:0]
self.types_alloc_size = 0
self.lengths_alloc_size = 0
}
func splitBlock(cmds []command, data []byte, pos uint, mask uint, params *encoderParams, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit) {
{
var literals_count uint = countLiterals(cmds)
var literals []byte = make([]byte, literals_count)
/* Create a continuous array of literals. */
copyLiteralsToByteArray(cmds, data, pos, mask, literals)
/* Create the block split on the array of literals.
Literal histograms have alphabet size 256. */
splitByteVectorLiteral(literals, literals_count, kSymbolsPerLiteralHistogram, kMaxLiteralHistograms, kLiteralStrideLength, kLiteralBlockSwitchCost, params, literal_split)
literals = nil
}
{
var insert_and_copy_codes []uint16 = make([]uint16, len(cmds))
/* Compute prefix codes for commands. */
for i := range cmds {
insert_and_copy_codes[i] = cmds[i].cmd_prefix_
}
/* Create the block split on the array of command prefixes. */
splitByteVectorCommand(insert_and_copy_codes, kSymbolsPerCommandHistogram, kMaxCommandHistograms, kCommandStrideLength, kCommandBlockSwitchCost, params, insert_and_copy_split)
/* TODO: reuse for distances? */
insert_and_copy_codes = nil
}
{
var distance_prefixes []uint16 = make([]uint16, len(cmds))
var j uint = 0
/* Create a continuous array of distance prefixes. */
for i := range cmds {
var cmd *command = &cmds[i]
if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 {
distance_prefixes[j] = cmd.dist_prefix_ & 0x3FF
j++
}
}
/* Create the block split on the array of distance prefixes. */
splitByteVectorDistance(distance_prefixes, j, kSymbolsPerDistanceHistogram, kMaxCommandHistograms, kCommandStrideLength, kDistanceBlockSwitchCost, params, dist_split)
distance_prefixes = nil
}
}

View File

@ -0,0 +1,434 @@
package brotli
import "math"
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
func initialEntropyCodesCommand(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramCommand) {
var seed uint32 = 7
var block_length uint = length / num_histograms
var i uint
clearHistogramsCommand(histograms, num_histograms)
for i = 0; i < num_histograms; i++ {
var pos uint = length * i / num_histograms
if i != 0 {
pos += uint(myRand(&seed) % uint32(block_length))
}
if pos+stride >= length {
pos = length - stride - 1
}
histogramAddVectorCommand(&histograms[i], data[pos:], stride)
}
}
func randomSampleCommand(seed *uint32, data []uint16, length uint, stride uint, sample *histogramCommand) {
var pos uint = 0
if stride >= length {
stride = length
} else {
pos = uint(myRand(seed) % uint32(length-stride+1))
}
histogramAddVectorCommand(sample, data[pos:], stride)
}
func refineEntropyCodesCommand(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramCommand) {
var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining
var seed uint32 = 7
var iter uint
iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms
for iter = 0; iter < iters; iter++ {
var sample histogramCommand
histogramClearCommand(&sample)
randomSampleCommand(&seed, data, length, stride, &sample)
histogramAddHistogramCommand(&histograms[iter%num_histograms], &sample)
}
}
/* Assigns a block id from the range [0, num_histograms) to each data element
in data[0..length) and fills in block_id[0..length) with the assigned values.
Returns the number of blocks, i.e. one plus the number of block switches. */
func findBlocksCommand(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramCommand, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint {
var data_size uint = histogramDataSizeCommand()
var bitmaplen uint = (num_histograms + 7) >> 3
var num_blocks uint = 1
var i uint
var j uint
assert(num_histograms <= 256)
if num_histograms <= 1 {
for i = 0; i < length; i++ {
block_id[i] = 0
}
return 1
}
for i := 0; i < int(data_size*num_histograms); i++ {
insert_cost[i] = 0
}
for i = 0; i < num_histograms; i++ {
insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_)))
}
for i = data_size; i != 0; {
i--
for j = 0; j < num_histograms; j++ {
insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i]))
}
}
for i := 0; i < int(num_histograms); i++ {
cost[i] = 0
}
for i := 0; i < int(length*bitmaplen); i++ {
switch_signal[i] = 0
}
/* After each iteration of this loop, cost[k] will contain the difference
between the minimum cost of arriving at the current byte position using
entropy code k, and the minimum cost of arriving at the current byte
position. This difference is capped at the block switch cost, and if it
reaches block switch cost, it means that when we trace back from the last
position, we need to switch here. */
for i = 0; i < length; i++ {
var byte_ix uint = i
var ix uint = byte_ix * bitmaplen
var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms
var min_cost float64 = 1e99
var block_switch_cost float64 = block_switch_bitcost
var k uint
for k = 0; k < num_histograms; k++ {
/* We are coding the symbol in data[byte_ix] with entropy code k. */
cost[k] += insert_cost[insert_cost_ix+k]
if cost[k] < min_cost {
min_cost = cost[k]
block_id[byte_ix] = byte(k)
}
}
/* More blocks for the beginning. */
if byte_ix < 2000 {
block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000
}
for k = 0; k < num_histograms; k++ {
cost[k] -= min_cost
if cost[k] >= block_switch_cost {
var mask byte = byte(1 << (k & 7))
cost[k] = block_switch_cost
assert(k>>3 < bitmaplen)
switch_signal[ix+(k>>3)] |= mask
/* Trace back from the last position and switch at the marked places. */
}
}
}
{
var byte_ix uint = length - 1
var ix uint = byte_ix * bitmaplen
var cur_id byte = block_id[byte_ix]
for byte_ix > 0 {
var mask byte = byte(1 << (cur_id & 7))
assert(uint(cur_id)>>3 < bitmaplen)
byte_ix--
ix -= bitmaplen
if switch_signal[ix+uint(cur_id>>3)]&mask != 0 {
if cur_id != block_id[byte_ix] {
cur_id = block_id[byte_ix]
num_blocks++
}
}
block_id[byte_ix] = cur_id
}
}
return num_blocks
}
var remapBlockIdsCommand_kInvalidId uint16 = 256
func remapBlockIdsCommand(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint {
var next_id uint16 = 0
var i uint
for i = 0; i < num_histograms; i++ {
new_id[i] = remapBlockIdsCommand_kInvalidId
}
for i = 0; i < length; i++ {
assert(uint(block_ids[i]) < num_histograms)
if new_id[block_ids[i]] == remapBlockIdsCommand_kInvalidId {
new_id[block_ids[i]] = next_id
next_id++
}
}
for i = 0; i < length; i++ {
block_ids[i] = byte(new_id[block_ids[i]])
assert(uint(block_ids[i]) < num_histograms)
}
assert(uint(next_id) <= num_histograms)
return uint(next_id)
}
func buildBlockHistogramsCommand(data []uint16, length uint, block_ids []byte, num_histograms uint, histograms []histogramCommand) {
var i uint
clearHistogramsCommand(histograms, num_histograms)
for i = 0; i < length; i++ {
histogramAddCommand(&histograms[block_ids[i]], uint(data[i]))
}
}
var clusterBlocksCommand_kInvalidIndex uint32 = math.MaxUint32
func clusterBlocksCommand(data []uint16, length uint, num_blocks uint, block_ids []byte, split *blockSplit) {
var histogram_symbols []uint32 = make([]uint32, num_blocks)
var block_lengths []uint32 = make([]uint32, num_blocks)
var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch
var all_histograms_size uint = 0
var all_histograms_capacity uint = expected_num_clusters
var all_histograms []histogramCommand = make([]histogramCommand, all_histograms_capacity)
var cluster_size_size uint = 0
var cluster_size_capacity uint = expected_num_clusters
var cluster_size []uint32 = make([]uint32, cluster_size_capacity)
var num_clusters uint = 0
var histograms []histogramCommand = make([]histogramCommand, brotli_min_size_t(num_blocks, histogramsPerBatch))
var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2
var pairs_capacity uint = max_num_pairs + 1
var pairs []histogramPair = make([]histogramPair, pairs_capacity)
var pos uint = 0
var clusters []uint32
var num_final_clusters uint
var new_index []uint32
var i uint
var sizes = [histogramsPerBatch]uint32{0}
var new_clusters = [histogramsPerBatch]uint32{0}
var symbols = [histogramsPerBatch]uint32{0}
var remap = [histogramsPerBatch]uint32{0}
for i := 0; i < int(num_blocks); i++ {
block_lengths[i] = 0
}
{
var block_idx uint = 0
for i = 0; i < length; i++ {
assert(block_idx < num_blocks)
block_lengths[block_idx]++
if i+1 == length || block_ids[i] != block_ids[i+1] {
block_idx++
}
}
assert(block_idx == num_blocks)
}
for i = 0; i < num_blocks; i += histogramsPerBatch {
var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch)
var num_new_clusters uint
var j uint
for j = 0; j < num_to_combine; j++ {
var k uint
histogramClearCommand(&histograms[j])
for k = 0; uint32(k) < block_lengths[i+j]; k++ {
histogramAddCommand(&histograms[j], uint(data[pos]))
pos++
}
histograms[j].bit_cost_ = populationCostCommand(&histograms[j])
new_clusters[j] = uint32(j)
symbols[j] = uint32(j)
sizes[j] = 1
}
num_new_clusters = histogramCombineCommand(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs)
if all_histograms_capacity < (all_histograms_size + num_new_clusters) {
var _new_size uint
if all_histograms_capacity == 0 {
_new_size = all_histograms_size + num_new_clusters
} else {
_new_size = all_histograms_capacity
}
var new_array []histogramCommand
for _new_size < (all_histograms_size + num_new_clusters) {
_new_size *= 2
}
new_array = make([]histogramCommand, _new_size)
if all_histograms_capacity != 0 {
copy(new_array, all_histograms[:all_histograms_capacity])
}
all_histograms = new_array
all_histograms_capacity = _new_size
}
brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters)
for j = 0; j < num_new_clusters; j++ {
all_histograms[all_histograms_size] = histograms[new_clusters[j]]
all_histograms_size++
cluster_size[cluster_size_size] = sizes[new_clusters[j]]
cluster_size_size++
remap[new_clusters[j]] = uint32(j)
}
for j = 0; j < num_to_combine; j++ {
histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]]
}
num_clusters += num_new_clusters
assert(num_clusters == cluster_size_size)
assert(num_clusters == all_histograms_size)
}
histograms = nil
max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters)
if pairs_capacity < max_num_pairs+1 {
pairs = nil
pairs = make([]histogramPair, (max_num_pairs + 1))
}
clusters = make([]uint32, num_clusters)
for i = 0; i < num_clusters; i++ {
clusters[i] = uint32(i)
}
num_final_clusters = histogramCombineCommand(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs)
pairs = nil
cluster_size = nil
new_index = make([]uint32, num_clusters)
for i = 0; i < num_clusters; i++ {
new_index[i] = clusterBlocksCommand_kInvalidIndex
}
pos = 0
{
var next_index uint32 = 0
for i = 0; i < num_blocks; i++ {
var histo histogramCommand
var j uint
var best_out uint32
var best_bits float64
histogramClearCommand(&histo)
for j = 0; uint32(j) < block_lengths[i]; j++ {
histogramAddCommand(&histo, uint(data[pos]))
pos++
}
if i == 0 {
best_out = histogram_symbols[0]
} else {
best_out = histogram_symbols[i-1]
}
best_bits = histogramBitCostDistanceCommand(&histo, &all_histograms[best_out])
for j = 0; j < num_final_clusters; j++ {
var cur_bits float64 = histogramBitCostDistanceCommand(&histo, &all_histograms[clusters[j]])
if cur_bits < best_bits {
best_bits = cur_bits
best_out = clusters[j]
}
}
histogram_symbols[i] = best_out
if new_index[best_out] == clusterBlocksCommand_kInvalidIndex {
new_index[best_out] = next_index
next_index++
}
}
}
clusters = nil
all_histograms = nil
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks)
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks)
{
var cur_length uint32 = 0
var block_idx uint = 0
var max_type byte = 0
for i = 0; i < num_blocks; i++ {
cur_length += block_lengths[i]
if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] {
var id byte = byte(new_index[histogram_symbols[i]])
split.types[block_idx] = id
split.lengths[block_idx] = cur_length
max_type = brotli_max_uint8_t(max_type, id)
cur_length = 0
block_idx++
}
}
split.num_blocks = block_idx
split.num_types = uint(max_type) + 1
}
new_index = nil
block_lengths = nil
histogram_symbols = nil
}
func splitByteVectorCommand(data []uint16, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) {
length := uint(len(data))
var data_size uint = histogramDataSizeCommand()
var num_histograms uint = length/literals_per_histogram + 1
var histograms []histogramCommand
if num_histograms > max_histograms {
num_histograms = max_histograms
}
if length == 0 {
split.num_types = 1
return
} else if length < kMinLengthForBlockSplitting {
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1)
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1)
split.num_types = 1
split.types[split.num_blocks] = 0
split.lengths[split.num_blocks] = uint32(length)
split.num_blocks++
return
}
histograms = make([]histogramCommand, num_histograms)
/* Find good entropy codes. */
initialEntropyCodesCommand(data, length, sampling_stride_length, num_histograms, histograms)
refineEntropyCodesCommand(data, length, sampling_stride_length, num_histograms, histograms)
{
var block_ids []byte = make([]byte, length)
var num_blocks uint = 0
var bitmaplen uint = (num_histograms + 7) >> 3
var insert_cost []float64 = make([]float64, (data_size * num_histograms))
var cost []float64 = make([]float64, num_histograms)
var switch_signal []byte = make([]byte, (length * bitmaplen))
var new_id []uint16 = make([]uint16, num_histograms)
var iters uint
if params.quality < hqZopflificationQuality {
iters = 3
} else {
iters = 10
}
/* Find a good path through literals with the good entropy codes. */
var i uint
for i = 0; i < iters; i++ {
num_blocks = findBlocksCommand(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids)
num_histograms = remapBlockIdsCommand(block_ids, length, new_id, num_histograms)
buildBlockHistogramsCommand(data, length, block_ids, num_histograms, histograms)
}
insert_cost = nil
cost = nil
switch_signal = nil
new_id = nil
histograms = nil
clusterBlocksCommand(data, length, num_blocks, block_ids, split)
block_ids = nil
}
}

View File

@ -0,0 +1,433 @@
package brotli
import "math"
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
func initialEntropyCodesDistance(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramDistance) {
var seed uint32 = 7
var block_length uint = length / num_histograms
var i uint
clearHistogramsDistance(histograms, num_histograms)
for i = 0; i < num_histograms; i++ {
var pos uint = length * i / num_histograms
if i != 0 {
pos += uint(myRand(&seed) % uint32(block_length))
}
if pos+stride >= length {
pos = length - stride - 1
}
histogramAddVectorDistance(&histograms[i], data[pos:], stride)
}
}
func randomSampleDistance(seed *uint32, data []uint16, length uint, stride uint, sample *histogramDistance) {
var pos uint = 0
if stride >= length {
stride = length
} else {
pos = uint(myRand(seed) % uint32(length-stride+1))
}
histogramAddVectorDistance(sample, data[pos:], stride)
}
func refineEntropyCodesDistance(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramDistance) {
var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining
var seed uint32 = 7
var iter uint
iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms
for iter = 0; iter < iters; iter++ {
var sample histogramDistance
histogramClearDistance(&sample)
randomSampleDistance(&seed, data, length, stride, &sample)
histogramAddHistogramDistance(&histograms[iter%num_histograms], &sample)
}
}
/* Assigns a block id from the range [0, num_histograms) to each data element
in data[0..length) and fills in block_id[0..length) with the assigned values.
Returns the number of blocks, i.e. one plus the number of block switches. */
func findBlocksDistance(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramDistance, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint {
var data_size uint = histogramDataSizeDistance()
var bitmaplen uint = (num_histograms + 7) >> 3
var num_blocks uint = 1
var i uint
var j uint
assert(num_histograms <= 256)
if num_histograms <= 1 {
for i = 0; i < length; i++ {
block_id[i] = 0
}
return 1
}
for i := 0; i < int(data_size*num_histograms); i++ {
insert_cost[i] = 0
}
for i = 0; i < num_histograms; i++ {
insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_)))
}
for i = data_size; i != 0; {
i--
for j = 0; j < num_histograms; j++ {
insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i]))
}
}
for i := 0; i < int(num_histograms); i++ {
cost[i] = 0
}
for i := 0; i < int(length*bitmaplen); i++ {
switch_signal[i] = 0
}
/* After each iteration of this loop, cost[k] will contain the difference
between the minimum cost of arriving at the current byte position using
entropy code k, and the minimum cost of arriving at the current byte
position. This difference is capped at the block switch cost, and if it
reaches block switch cost, it means that when we trace back from the last
position, we need to switch here. */
for i = 0; i < length; i++ {
var byte_ix uint = i
var ix uint = byte_ix * bitmaplen
var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms
var min_cost float64 = 1e99
var block_switch_cost float64 = block_switch_bitcost
var k uint
for k = 0; k < num_histograms; k++ {
/* We are coding the symbol in data[byte_ix] with entropy code k. */
cost[k] += insert_cost[insert_cost_ix+k]
if cost[k] < min_cost {
min_cost = cost[k]
block_id[byte_ix] = byte(k)
}
}
/* More blocks for the beginning. */
if byte_ix < 2000 {
block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000
}
for k = 0; k < num_histograms; k++ {
cost[k] -= min_cost
if cost[k] >= block_switch_cost {
var mask byte = byte(1 << (k & 7))
cost[k] = block_switch_cost
assert(k>>3 < bitmaplen)
switch_signal[ix+(k>>3)] |= mask
/* Trace back from the last position and switch at the marked places. */
}
}
}
{
var byte_ix uint = length - 1
var ix uint = byte_ix * bitmaplen
var cur_id byte = block_id[byte_ix]
for byte_ix > 0 {
var mask byte = byte(1 << (cur_id & 7))
assert(uint(cur_id)>>3 < bitmaplen)
byte_ix--
ix -= bitmaplen
if switch_signal[ix+uint(cur_id>>3)]&mask != 0 {
if cur_id != block_id[byte_ix] {
cur_id = block_id[byte_ix]
num_blocks++
}
}
block_id[byte_ix] = cur_id
}
}
return num_blocks
}
var remapBlockIdsDistance_kInvalidId uint16 = 256
func remapBlockIdsDistance(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint {
var next_id uint16 = 0
var i uint
for i = 0; i < num_histograms; i++ {
new_id[i] = remapBlockIdsDistance_kInvalidId
}
for i = 0; i < length; i++ {
assert(uint(block_ids[i]) < num_histograms)
if new_id[block_ids[i]] == remapBlockIdsDistance_kInvalidId {
new_id[block_ids[i]] = next_id
next_id++
}
}
for i = 0; i < length; i++ {
block_ids[i] = byte(new_id[block_ids[i]])
assert(uint(block_ids[i]) < num_histograms)
}
assert(uint(next_id) <= num_histograms)
return uint(next_id)
}
func buildBlockHistogramsDistance(data []uint16, length uint, block_ids []byte, num_histograms uint, histograms []histogramDistance) {
var i uint
clearHistogramsDistance(histograms, num_histograms)
for i = 0; i < length; i++ {
histogramAddDistance(&histograms[block_ids[i]], uint(data[i]))
}
}
var clusterBlocksDistance_kInvalidIndex uint32 = math.MaxUint32
func clusterBlocksDistance(data []uint16, length uint, num_blocks uint, block_ids []byte, split *blockSplit) {
var histogram_symbols []uint32 = make([]uint32, num_blocks)
var block_lengths []uint32 = make([]uint32, num_blocks)
var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch
var all_histograms_size uint = 0
var all_histograms_capacity uint = expected_num_clusters
var all_histograms []histogramDistance = make([]histogramDistance, all_histograms_capacity)
var cluster_size_size uint = 0
var cluster_size_capacity uint = expected_num_clusters
var cluster_size []uint32 = make([]uint32, cluster_size_capacity)
var num_clusters uint = 0
var histograms []histogramDistance = make([]histogramDistance, brotli_min_size_t(num_blocks, histogramsPerBatch))
var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2
var pairs_capacity uint = max_num_pairs + 1
var pairs []histogramPair = make([]histogramPair, pairs_capacity)
var pos uint = 0
var clusters []uint32
var num_final_clusters uint
var new_index []uint32
var i uint
var sizes = [histogramsPerBatch]uint32{0}
var new_clusters = [histogramsPerBatch]uint32{0}
var symbols = [histogramsPerBatch]uint32{0}
var remap = [histogramsPerBatch]uint32{0}
for i := 0; i < int(num_blocks); i++ {
block_lengths[i] = 0
}
{
var block_idx uint = 0
for i = 0; i < length; i++ {
assert(block_idx < num_blocks)
block_lengths[block_idx]++
if i+1 == length || block_ids[i] != block_ids[i+1] {
block_idx++
}
}
assert(block_idx == num_blocks)
}
for i = 0; i < num_blocks; i += histogramsPerBatch {
var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch)
var num_new_clusters uint
var j uint
for j = 0; j < num_to_combine; j++ {
var k uint
histogramClearDistance(&histograms[j])
for k = 0; uint32(k) < block_lengths[i+j]; k++ {
histogramAddDistance(&histograms[j], uint(data[pos]))
pos++
}
histograms[j].bit_cost_ = populationCostDistance(&histograms[j])
new_clusters[j] = uint32(j)
symbols[j] = uint32(j)
sizes[j] = 1
}
num_new_clusters = histogramCombineDistance(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs)
if all_histograms_capacity < (all_histograms_size + num_new_clusters) {
var _new_size uint
if all_histograms_capacity == 0 {
_new_size = all_histograms_size + num_new_clusters
} else {
_new_size = all_histograms_capacity
}
var new_array []histogramDistance
for _new_size < (all_histograms_size + num_new_clusters) {
_new_size *= 2
}
new_array = make([]histogramDistance, _new_size)
if all_histograms_capacity != 0 {
copy(new_array, all_histograms[:all_histograms_capacity])
}
all_histograms = new_array
all_histograms_capacity = _new_size
}
brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters)
for j = 0; j < num_new_clusters; j++ {
all_histograms[all_histograms_size] = histograms[new_clusters[j]]
all_histograms_size++
cluster_size[cluster_size_size] = sizes[new_clusters[j]]
cluster_size_size++
remap[new_clusters[j]] = uint32(j)
}
for j = 0; j < num_to_combine; j++ {
histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]]
}
num_clusters += num_new_clusters
assert(num_clusters == cluster_size_size)
assert(num_clusters == all_histograms_size)
}
histograms = nil
max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters)
if pairs_capacity < max_num_pairs+1 {
pairs = nil
pairs = make([]histogramPair, (max_num_pairs + 1))
}
clusters = make([]uint32, num_clusters)
for i = 0; i < num_clusters; i++ {
clusters[i] = uint32(i)
}
num_final_clusters = histogramCombineDistance(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs)
pairs = nil
cluster_size = nil
new_index = make([]uint32, num_clusters)
for i = 0; i < num_clusters; i++ {
new_index[i] = clusterBlocksDistance_kInvalidIndex
}
pos = 0
{
var next_index uint32 = 0
for i = 0; i < num_blocks; i++ {
var histo histogramDistance
var j uint
var best_out uint32
var best_bits float64
histogramClearDistance(&histo)
for j = 0; uint32(j) < block_lengths[i]; j++ {
histogramAddDistance(&histo, uint(data[pos]))
pos++
}
if i == 0 {
best_out = histogram_symbols[0]
} else {
best_out = histogram_symbols[i-1]
}
best_bits = histogramBitCostDistanceDistance(&histo, &all_histograms[best_out])
for j = 0; j < num_final_clusters; j++ {
var cur_bits float64 = histogramBitCostDistanceDistance(&histo, &all_histograms[clusters[j]])
if cur_bits < best_bits {
best_bits = cur_bits
best_out = clusters[j]
}
}
histogram_symbols[i] = best_out
if new_index[best_out] == clusterBlocksDistance_kInvalidIndex {
new_index[best_out] = next_index
next_index++
}
}
}
clusters = nil
all_histograms = nil
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks)
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks)
{
var cur_length uint32 = 0
var block_idx uint = 0
var max_type byte = 0
for i = 0; i < num_blocks; i++ {
cur_length += block_lengths[i]
if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] {
var id byte = byte(new_index[histogram_symbols[i]])
split.types[block_idx] = id
split.lengths[block_idx] = cur_length
max_type = brotli_max_uint8_t(max_type, id)
cur_length = 0
block_idx++
}
}
split.num_blocks = block_idx
split.num_types = uint(max_type) + 1
}
new_index = nil
block_lengths = nil
histogram_symbols = nil
}
func splitByteVectorDistance(data []uint16, length uint, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) {
var data_size uint = histogramDataSizeDistance()
var num_histograms uint = length/literals_per_histogram + 1
var histograms []histogramDistance
if num_histograms > max_histograms {
num_histograms = max_histograms
}
if length == 0 {
split.num_types = 1
return
} else if length < kMinLengthForBlockSplitting {
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1)
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1)
split.num_types = 1
split.types[split.num_blocks] = 0
split.lengths[split.num_blocks] = uint32(length)
split.num_blocks++
return
}
histograms = make([]histogramDistance, num_histograms)
/* Find good entropy codes. */
initialEntropyCodesDistance(data, length, sampling_stride_length, num_histograms, histograms)
refineEntropyCodesDistance(data, length, sampling_stride_length, num_histograms, histograms)
{
var block_ids []byte = make([]byte, length)
var num_blocks uint = 0
var bitmaplen uint = (num_histograms + 7) >> 3
var insert_cost []float64 = make([]float64, (data_size * num_histograms))
var cost []float64 = make([]float64, num_histograms)
var switch_signal []byte = make([]byte, (length * bitmaplen))
var new_id []uint16 = make([]uint16, num_histograms)
var iters uint
if params.quality < hqZopflificationQuality {
iters = 3
} else {
iters = 10
}
/* Find a good path through literals with the good entropy codes. */
var i uint
for i = 0; i < iters; i++ {
num_blocks = findBlocksDistance(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids)
num_histograms = remapBlockIdsDistance(block_ids, length, new_id, num_histograms)
buildBlockHistogramsDistance(data, length, block_ids, num_histograms, histograms)
}
insert_cost = nil
cost = nil
switch_signal = nil
new_id = nil
histograms = nil
clusterBlocksDistance(data, length, num_blocks, block_ids, split)
block_ids = nil
}
}

View File

@ -0,0 +1,433 @@
package brotli
import "math"
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
func initialEntropyCodesLiteral(data []byte, length uint, stride uint, num_histograms uint, histograms []histogramLiteral) {
var seed uint32 = 7
var block_length uint = length / num_histograms
var i uint
clearHistogramsLiteral(histograms, num_histograms)
for i = 0; i < num_histograms; i++ {
var pos uint = length * i / num_histograms
if i != 0 {
pos += uint(myRand(&seed) % uint32(block_length))
}
if pos+stride >= length {
pos = length - stride - 1
}
histogramAddVectorLiteral(&histograms[i], data[pos:], stride)
}
}
func randomSampleLiteral(seed *uint32, data []byte, length uint, stride uint, sample *histogramLiteral) {
var pos uint = 0
if stride >= length {
stride = length
} else {
pos = uint(myRand(seed) % uint32(length-stride+1))
}
histogramAddVectorLiteral(sample, data[pos:], stride)
}
func refineEntropyCodesLiteral(data []byte, length uint, stride uint, num_histograms uint, histograms []histogramLiteral) {
var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining
var seed uint32 = 7
var iter uint
iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms
for iter = 0; iter < iters; iter++ {
var sample histogramLiteral
histogramClearLiteral(&sample)
randomSampleLiteral(&seed, data, length, stride, &sample)
histogramAddHistogramLiteral(&histograms[iter%num_histograms], &sample)
}
}
/* Assigns a block id from the range [0, num_histograms) to each data element
in data[0..length) and fills in block_id[0..length) with the assigned values.
Returns the number of blocks, i.e. one plus the number of block switches. */
func findBlocksLiteral(data []byte, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramLiteral, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint {
var data_size uint = histogramDataSizeLiteral()
var bitmaplen uint = (num_histograms + 7) >> 3
var num_blocks uint = 1
var i uint
var j uint
assert(num_histograms <= 256)
if num_histograms <= 1 {
for i = 0; i < length; i++ {
block_id[i] = 0
}
return 1
}
for i := 0; i < int(data_size*num_histograms); i++ {
insert_cost[i] = 0
}
for i = 0; i < num_histograms; i++ {
insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_)))
}
for i = data_size; i != 0; {
i--
for j = 0; j < num_histograms; j++ {
insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i]))
}
}
for i := 0; i < int(num_histograms); i++ {
cost[i] = 0
}
for i := 0; i < int(length*bitmaplen); i++ {
switch_signal[i] = 0
}
/* After each iteration of this loop, cost[k] will contain the difference
between the minimum cost of arriving at the current byte position using
entropy code k, and the minimum cost of arriving at the current byte
position. This difference is capped at the block switch cost, and if it
reaches block switch cost, it means that when we trace back from the last
position, we need to switch here. */
for i = 0; i < length; i++ {
var byte_ix uint = i
var ix uint = byte_ix * bitmaplen
var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms
var min_cost float64 = 1e99
var block_switch_cost float64 = block_switch_bitcost
var k uint
for k = 0; k < num_histograms; k++ {
/* We are coding the symbol in data[byte_ix] with entropy code k. */
cost[k] += insert_cost[insert_cost_ix+k]
if cost[k] < min_cost {
min_cost = cost[k]
block_id[byte_ix] = byte(k)
}
}
/* More blocks for the beginning. */
if byte_ix < 2000 {
block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000
}
for k = 0; k < num_histograms; k++ {
cost[k] -= min_cost
if cost[k] >= block_switch_cost {
var mask byte = byte(1 << (k & 7))
cost[k] = block_switch_cost
assert(k>>3 < bitmaplen)
switch_signal[ix+(k>>3)] |= mask
/* Trace back from the last position and switch at the marked places. */
}
}
}
{
var byte_ix uint = length - 1
var ix uint = byte_ix * bitmaplen
var cur_id byte = block_id[byte_ix]
for byte_ix > 0 {
var mask byte = byte(1 << (cur_id & 7))
assert(uint(cur_id)>>3 < bitmaplen)
byte_ix--
ix -= bitmaplen
if switch_signal[ix+uint(cur_id>>3)]&mask != 0 {
if cur_id != block_id[byte_ix] {
cur_id = block_id[byte_ix]
num_blocks++
}
}
block_id[byte_ix] = cur_id
}
}
return num_blocks
}
var remapBlockIdsLiteral_kInvalidId uint16 = 256
func remapBlockIdsLiteral(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint {
var next_id uint16 = 0
var i uint
for i = 0; i < num_histograms; i++ {
new_id[i] = remapBlockIdsLiteral_kInvalidId
}
for i = 0; i < length; i++ {
assert(uint(block_ids[i]) < num_histograms)
if new_id[block_ids[i]] == remapBlockIdsLiteral_kInvalidId {
new_id[block_ids[i]] = next_id
next_id++
}
}
for i = 0; i < length; i++ {
block_ids[i] = byte(new_id[block_ids[i]])
assert(uint(block_ids[i]) < num_histograms)
}
assert(uint(next_id) <= num_histograms)
return uint(next_id)
}
func buildBlockHistogramsLiteral(data []byte, length uint, block_ids []byte, num_histograms uint, histograms []histogramLiteral) {
var i uint
clearHistogramsLiteral(histograms, num_histograms)
for i = 0; i < length; i++ {
histogramAddLiteral(&histograms[block_ids[i]], uint(data[i]))
}
}
var clusterBlocksLiteral_kInvalidIndex uint32 = math.MaxUint32
func clusterBlocksLiteral(data []byte, length uint, num_blocks uint, block_ids []byte, split *blockSplit) {
var histogram_symbols []uint32 = make([]uint32, num_blocks)
var block_lengths []uint32 = make([]uint32, num_blocks)
var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch
var all_histograms_size uint = 0
var all_histograms_capacity uint = expected_num_clusters
var all_histograms []histogramLiteral = make([]histogramLiteral, all_histograms_capacity)
var cluster_size_size uint = 0
var cluster_size_capacity uint = expected_num_clusters
var cluster_size []uint32 = make([]uint32, cluster_size_capacity)
var num_clusters uint = 0
var histograms []histogramLiteral = make([]histogramLiteral, brotli_min_size_t(num_blocks, histogramsPerBatch))
var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2
var pairs_capacity uint = max_num_pairs + 1
var pairs []histogramPair = make([]histogramPair, pairs_capacity)
var pos uint = 0
var clusters []uint32
var num_final_clusters uint
var new_index []uint32
var i uint
var sizes = [histogramsPerBatch]uint32{0}
var new_clusters = [histogramsPerBatch]uint32{0}
var symbols = [histogramsPerBatch]uint32{0}
var remap = [histogramsPerBatch]uint32{0}
for i := 0; i < int(num_blocks); i++ {
block_lengths[i] = 0
}
{
var block_idx uint = 0
for i = 0; i < length; i++ {
assert(block_idx < num_blocks)
block_lengths[block_idx]++
if i+1 == length || block_ids[i] != block_ids[i+1] {
block_idx++
}
}
assert(block_idx == num_blocks)
}
for i = 0; i < num_blocks; i += histogramsPerBatch {
var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch)
var num_new_clusters uint
var j uint
for j = 0; j < num_to_combine; j++ {
var k uint
histogramClearLiteral(&histograms[j])
for k = 0; uint32(k) < block_lengths[i+j]; k++ {
histogramAddLiteral(&histograms[j], uint(data[pos]))
pos++
}
histograms[j].bit_cost_ = populationCostLiteral(&histograms[j])
new_clusters[j] = uint32(j)
symbols[j] = uint32(j)
sizes[j] = 1
}
num_new_clusters = histogramCombineLiteral(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs)
if all_histograms_capacity < (all_histograms_size + num_new_clusters) {
var _new_size uint
if all_histograms_capacity == 0 {
_new_size = all_histograms_size + num_new_clusters
} else {
_new_size = all_histograms_capacity
}
var new_array []histogramLiteral
for _new_size < (all_histograms_size + num_new_clusters) {
_new_size *= 2
}
new_array = make([]histogramLiteral, _new_size)
if all_histograms_capacity != 0 {
copy(new_array, all_histograms[:all_histograms_capacity])
}
all_histograms = new_array
all_histograms_capacity = _new_size
}
brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters)
for j = 0; j < num_new_clusters; j++ {
all_histograms[all_histograms_size] = histograms[new_clusters[j]]
all_histograms_size++
cluster_size[cluster_size_size] = sizes[new_clusters[j]]
cluster_size_size++
remap[new_clusters[j]] = uint32(j)
}
for j = 0; j < num_to_combine; j++ {
histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]]
}
num_clusters += num_new_clusters
assert(num_clusters == cluster_size_size)
assert(num_clusters == all_histograms_size)
}
histograms = nil
max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters)
if pairs_capacity < max_num_pairs+1 {
pairs = nil
pairs = make([]histogramPair, (max_num_pairs + 1))
}
clusters = make([]uint32, num_clusters)
for i = 0; i < num_clusters; i++ {
clusters[i] = uint32(i)
}
num_final_clusters = histogramCombineLiteral(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs)
pairs = nil
cluster_size = nil
new_index = make([]uint32, num_clusters)
for i = 0; i < num_clusters; i++ {
new_index[i] = clusterBlocksLiteral_kInvalidIndex
}
pos = 0
{
var next_index uint32 = 0
for i = 0; i < num_blocks; i++ {
var histo histogramLiteral
var j uint
var best_out uint32
var best_bits float64
histogramClearLiteral(&histo)
for j = 0; uint32(j) < block_lengths[i]; j++ {
histogramAddLiteral(&histo, uint(data[pos]))
pos++
}
if i == 0 {
best_out = histogram_symbols[0]
} else {
best_out = histogram_symbols[i-1]
}
best_bits = histogramBitCostDistanceLiteral(&histo, &all_histograms[best_out])
for j = 0; j < num_final_clusters; j++ {
var cur_bits float64 = histogramBitCostDistanceLiteral(&histo, &all_histograms[clusters[j]])
if cur_bits < best_bits {
best_bits = cur_bits
best_out = clusters[j]
}
}
histogram_symbols[i] = best_out
if new_index[best_out] == clusterBlocksLiteral_kInvalidIndex {
new_index[best_out] = next_index
next_index++
}
}
}
clusters = nil
all_histograms = nil
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks)
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks)
{
var cur_length uint32 = 0
var block_idx uint = 0
var max_type byte = 0
for i = 0; i < num_blocks; i++ {
cur_length += block_lengths[i]
if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] {
var id byte = byte(new_index[histogram_symbols[i]])
split.types[block_idx] = id
split.lengths[block_idx] = cur_length
max_type = brotli_max_uint8_t(max_type, id)
cur_length = 0
block_idx++
}
}
split.num_blocks = block_idx
split.num_types = uint(max_type) + 1
}
new_index = nil
block_lengths = nil
histogram_symbols = nil
}
func splitByteVectorLiteral(data []byte, length uint, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) {
var data_size uint = histogramDataSizeLiteral()
var num_histograms uint = length/literals_per_histogram + 1
var histograms []histogramLiteral
if num_histograms > max_histograms {
num_histograms = max_histograms
}
if length == 0 {
split.num_types = 1
return
} else if length < kMinLengthForBlockSplitting {
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1)
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1)
split.num_types = 1
split.types[split.num_blocks] = 0
split.lengths[split.num_blocks] = uint32(length)
split.num_blocks++
return
}
histograms = make([]histogramLiteral, num_histograms)
/* Find good entropy codes. */
initialEntropyCodesLiteral(data, length, sampling_stride_length, num_histograms, histograms)
refineEntropyCodesLiteral(data, length, sampling_stride_length, num_histograms, histograms)
{
var block_ids []byte = make([]byte, length)
var num_blocks uint = 0
var bitmaplen uint = (num_histograms + 7) >> 3
var insert_cost []float64 = make([]float64, (data_size * num_histograms))
var cost []float64 = make([]float64, num_histograms)
var switch_signal []byte = make([]byte, (length * bitmaplen))
var new_id []uint16 = make([]uint16, num_histograms)
var iters uint
if params.quality < hqZopflificationQuality {
iters = 3
} else {
iters = 10
}
/* Find a good path through literals with the good entropy codes. */
var i uint
for i = 0; i < iters; i++ {
num_blocks = findBlocksLiteral(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids)
num_histograms = remapBlockIdsLiteral(block_ids, length, new_id, num_histograms)
buildBlockHistogramsLiteral(data, length, block_ids, num_histograms, histograms)
}
insert_cost = nil
cost = nil
switch_signal = nil
new_id = nil
histograms = nil
clusterBlocksLiteral(data, length, num_blocks, block_ids, split)
block_ids = nil
}
}

1300
vendor/github.com/andybalholm/brotli/brotli_bit_stream.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

30
vendor/github.com/andybalholm/brotli/cluster.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package brotli
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Functions for clustering similar histograms together. */
type histogramPair struct {
idx1 uint32
idx2 uint32
cost_combo float64
cost_diff float64
}
func histogramPairIsLess(p1 *histogramPair, p2 *histogramPair) bool {
if p1.cost_diff != p2.cost_diff {
return p1.cost_diff > p2.cost_diff
}
return (p1.idx2 - p1.idx1) > (p2.idx2 - p2.idx1)
}
/* Returns entropy reduction of the context map when we combine two clusters. */
func clusterCostDiff(size_a uint, size_b uint) float64 {
var size_c uint = size_a + size_b
return float64(size_a)*fastLog2(size_a) + float64(size_b)*fastLog2(size_b) - float64(size_c)*fastLog2(size_c)
}

164
vendor/github.com/andybalholm/brotli/cluster_command.go generated vendored Normal file
View File

@ -0,0 +1,164 @@
package brotli
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */
func compareAndPushToQueueCommand(out []histogramCommand, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) {
var is_good_pair bool = false
var p histogramPair
p.idx2 = 0
p.idx1 = p.idx2
p.cost_combo = 0
p.cost_diff = p.cost_combo
if idx1 == idx2 {
return
}
if idx2 < idx1 {
var t uint32 = idx2
idx2 = idx1
idx1 = t
}
p.idx1 = idx1
p.idx2 = idx2
p.cost_diff = 0.5 * clusterCostDiff(uint(cluster_size[idx1]), uint(cluster_size[idx2]))
p.cost_diff -= out[idx1].bit_cost_
p.cost_diff -= out[idx2].bit_cost_
if out[idx1].total_count_ == 0 {
p.cost_combo = out[idx2].bit_cost_
is_good_pair = true
} else if out[idx2].total_count_ == 0 {
p.cost_combo = out[idx1].bit_cost_
is_good_pair = true
} else {
var threshold float64
if *num_pairs == 0 {
threshold = 1e99
} else {
threshold = brotli_max_double(0.0, pairs[0].cost_diff)
}
var combo histogramCommand = out[idx1]
var cost_combo float64
histogramAddHistogramCommand(&combo, &out[idx2])
cost_combo = populationCostCommand(&combo)
if cost_combo < threshold-p.cost_diff {
p.cost_combo = cost_combo
is_good_pair = true
}
}
if is_good_pair {
p.cost_diff += p.cost_combo
if *num_pairs > 0 && histogramPairIsLess(&pairs[0], &p) {
/* Replace the top of the queue if needed. */
if *num_pairs < max_num_pairs {
pairs[*num_pairs] = pairs[0]
(*num_pairs)++
}
pairs[0] = p
} else if *num_pairs < max_num_pairs {
pairs[*num_pairs] = p
(*num_pairs)++
}
}
}
func histogramCombineCommand(out []histogramCommand, cluster_size []uint32, symbols []uint32, clusters []uint32, pairs []histogramPair, num_clusters uint, symbols_size uint, max_clusters uint, max_num_pairs uint) uint {
var cost_diff_threshold float64 = 0.0
var min_cluster_size uint = 1
var num_pairs uint = 0
{
/* We maintain a vector of histogram pairs, with the property that the pair
with the maximum bit cost reduction is the first. */
var idx1 uint
for idx1 = 0; idx1 < num_clusters; idx1++ {
var idx2 uint
for idx2 = idx1 + 1; idx2 < num_clusters; idx2++ {
compareAndPushToQueueCommand(out, cluster_size, clusters[idx1], clusters[idx2], max_num_pairs, pairs[0:], &num_pairs)
}
}
}
for num_clusters > min_cluster_size {
var best_idx1 uint32
var best_idx2 uint32
var i uint
if pairs[0].cost_diff >= cost_diff_threshold {
cost_diff_threshold = 1e99
min_cluster_size = max_clusters
continue
}
/* Take the best pair from the top of heap. */
best_idx1 = pairs[0].idx1
best_idx2 = pairs[0].idx2
histogramAddHistogramCommand(&out[best_idx1], &out[best_idx2])
out[best_idx1].bit_cost_ = pairs[0].cost_combo
cluster_size[best_idx1] += cluster_size[best_idx2]
for i = 0; i < symbols_size; i++ {
if symbols[i] == best_idx2 {
symbols[i] = best_idx1
}
}
for i = 0; i < num_clusters; i++ {
if clusters[i] == best_idx2 {
copy(clusters[i:], clusters[i+1:][:num_clusters-i-1])
break
}
}
num_clusters--
{
/* Remove pairs intersecting the just combined best pair. */
var copy_to_idx uint = 0
for i = 0; i < num_pairs; i++ {
var p *histogramPair = &pairs[i]
if p.idx1 == best_idx1 || p.idx2 == best_idx1 || p.idx1 == best_idx2 || p.idx2 == best_idx2 {
/* Remove invalid pair from the queue. */
continue
}
if histogramPairIsLess(&pairs[0], p) {
/* Replace the top of the queue if needed. */
var front histogramPair = pairs[0]
pairs[0] = *p
pairs[copy_to_idx] = front
} else {
pairs[copy_to_idx] = *p
}
copy_to_idx++
}
num_pairs = copy_to_idx
}
/* Push new pairs formed with the combined histogram to the heap. */
for i = 0; i < num_clusters; i++ {
compareAndPushToQueueCommand(out, cluster_size, best_idx1, clusters[i], max_num_pairs, pairs[0:], &num_pairs)
}
}
return num_clusters
}
/* What is the bit cost of moving histogram from cur_symbol to candidate. */
func histogramBitCostDistanceCommand(histogram *histogramCommand, candidate *histogramCommand) float64 {
if histogram.total_count_ == 0 {
return 0.0
} else {
var tmp histogramCommand = *histogram
histogramAddHistogramCommand(&tmp, candidate)
return populationCostCommand(&tmp) - candidate.bit_cost_
}
}

View File

@ -0,0 +1,326 @@
package brotli
import "math"
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */
func compareAndPushToQueueDistance(out []histogramDistance, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) {
var is_good_pair bool = false
var p histogramPair
p.idx2 = 0
p.idx1 = p.idx2
p.cost_combo = 0
p.cost_diff = p.cost_combo
if idx1 == idx2 {
return
}
if idx2 < idx1 {
var t uint32 = idx2
idx2 = idx1
idx1 = t
}
p.idx1 = idx1
p.idx2 = idx2
p.cost_diff = 0.5 * clusterCostDiff(uint(cluster_size[idx1]), uint(cluster_size[idx2]))
p.cost_diff -= out[idx1].bit_cost_
p.cost_diff -= out[idx2].bit_cost_
if out[idx1].total_count_ == 0 {
p.cost_combo = out[idx2].bit_cost_
is_good_pair = true
} else if out[idx2].total_count_ == 0 {
p.cost_combo = out[idx1].bit_cost_
is_good_pair = true
} else {
var threshold float64
if *num_pairs == 0 {
threshold = 1e99
} else {
threshold = brotli_max_double(0.0, pairs[0].cost_diff)
}
var combo histogramDistance = out[idx1]
var cost_combo float64
histogramAddHistogramDistance(&combo, &out[idx2])
cost_combo = populationCostDistance(&combo)
if cost_combo < threshold-p.cost_diff {
p.cost_combo = cost_combo
is_good_pair = true
}
}
if is_good_pair {
p.cost_diff += p.cost_combo
if *num_pairs > 0 && histogramPairIsLess(&pairs[0], &p) {
/* Replace the top of the queue if needed. */
if *num_pairs < max_num_pairs {
pairs[*num_pairs] = pairs[0]
(*num_pairs)++
}
pairs[0] = p
} else if *num_pairs < max_num_pairs {
pairs[*num_pairs] = p
(*num_pairs)++
}
}
}
func histogramCombineDistance(out []histogramDistance, cluster_size []uint32, symbols []uint32, clusters []uint32, pairs []histogramPair, num_clusters uint, symbols_size uint, max_clusters uint, max_num_pairs uint) uint {
var cost_diff_threshold float64 = 0.0
var min_cluster_size uint = 1
var num_pairs uint = 0
{
/* We maintain a vector of histogram pairs, with the property that the pair
with the maximum bit cost reduction is the first. */
var idx1 uint
for idx1 = 0; idx1 < num_clusters; idx1++ {
var idx2 uint
for idx2 = idx1 + 1; idx2 < num_clusters; idx2++ {
compareAndPushToQueueDistance(out, cluster_size, clusters[idx1], clusters[idx2], max_num_pairs, pairs[0:], &num_pairs)
}
}
}
for num_clusters > min_cluster_size {
var best_idx1 uint32
var best_idx2 uint32
var i uint
if pairs[0].cost_diff >= cost_diff_threshold {
cost_diff_threshold = 1e99
min_cluster_size = max_clusters
continue
}
/* Take the best pair from the top of heap. */
best_idx1 = pairs[0].idx1
best_idx2 = pairs[0].idx2
histogramAddHistogramDistance(&out[best_idx1], &out[best_idx2])
out[best_idx1].bit_cost_ = pairs[0].cost_combo
cluster_size[best_idx1] += cluster_size[best_idx2]
for i = 0; i < symbols_size; i++ {
if symbols[i] == best_idx2 {
symbols[i] = best_idx1
}
}
for i = 0; i < num_clusters; i++ {
if clusters[i] == best_idx2 {
copy(clusters[i:], clusters[i+1:][:num_clusters-i-1])
break
}
}
num_clusters--
{
/* Remove pairs intersecting the just combined best pair. */
var copy_to_idx uint = 0
for i = 0; i < num_pairs; i++ {
var p *histogramPair = &pairs[i]
if p.idx1 == best_idx1 || p.idx2 == best_idx1 || p.idx1 == best_idx2 || p.idx2 == best_idx2 {
/* Remove invalid pair from the queue. */
continue
}
if histogramPairIsLess(&pairs[0], p) {
/* Replace the top of the queue if needed. */
var front histogramPair = pairs[0]
pairs[0] = *p
pairs[copy_to_idx] = front
} else {
pairs[copy_to_idx] = *p
}
copy_to_idx++
}
num_pairs = copy_to_idx
}
/* Push new pairs formed with the combined histogram to the heap. */
for i = 0; i < num_clusters; i++ {
compareAndPushToQueueDistance(out, cluster_size, best_idx1, clusters[i], max_num_pairs, pairs[0:], &num_pairs)
}
}
return num_clusters
}
/* What is the bit cost of moving histogram from cur_symbol to candidate. */
func histogramBitCostDistanceDistance(histogram *histogramDistance, candidate *histogramDistance) float64 {
if histogram.total_count_ == 0 {
return 0.0
} else {
var tmp histogramDistance = *histogram
histogramAddHistogramDistance(&tmp, candidate)
return populationCostDistance(&tmp) - candidate.bit_cost_
}
}
/* Find the best 'out' histogram for each of the 'in' histograms.
When called, clusters[0..num_clusters) contains the unique values from
symbols[0..in_size), but this property is not preserved in this function.
Note: we assume that out[]->bit_cost_ is already up-to-date. */
func histogramRemapDistance(in []histogramDistance, in_size uint, clusters []uint32, num_clusters uint, out []histogramDistance, symbols []uint32) {
var i uint
for i = 0; i < in_size; i++ {
var best_out uint32
if i == 0 {
best_out = symbols[0]
} else {
best_out = symbols[i-1]
}
var best_bits float64 = histogramBitCostDistanceDistance(&in[i], &out[best_out])
var j uint
for j = 0; j < num_clusters; j++ {
var cur_bits float64 = histogramBitCostDistanceDistance(&in[i], &out[clusters[j]])
if cur_bits < best_bits {
best_bits = cur_bits
best_out = clusters[j]
}
}
symbols[i] = best_out
}
/* Recompute each out based on raw and symbols. */
for i = 0; i < num_clusters; i++ {
histogramClearDistance(&out[clusters[i]])
}
for i = 0; i < in_size; i++ {
histogramAddHistogramDistance(&out[symbols[i]], &in[i])
}
}
/* Reorders elements of the out[0..length) array and changes values in
symbols[0..length) array in the following way:
* when called, symbols[] contains indexes into out[], and has N unique
values (possibly N < length)
* on return, symbols'[i] = f(symbols[i]) and
out'[symbols'[i]] = out[symbols[i]], for each 0 <= i < length,
where f is a bijection between the range of symbols[] and [0..N), and
the first occurrences of values in symbols'[i] come in consecutive
increasing order.
Returns N, the number of unique values in symbols[]. */
var histogramReindexDistance_kInvalidIndex uint32 = math.MaxUint32
func histogramReindexDistance(out []histogramDistance, symbols []uint32, length uint) uint {
var new_index []uint32 = make([]uint32, length)
var next_index uint32
var tmp []histogramDistance
var i uint
for i = 0; i < length; i++ {
new_index[i] = histogramReindexDistance_kInvalidIndex
}
next_index = 0
for i = 0; i < length; i++ {
if new_index[symbols[i]] == histogramReindexDistance_kInvalidIndex {
new_index[symbols[i]] = next_index
next_index++
}
}
/* TODO: by using idea of "cycle-sort" we can avoid allocation of
tmp and reduce the number of copying by the factor of 2. */
tmp = make([]histogramDistance, next_index)
next_index = 0
for i = 0; i < length; i++ {
if new_index[symbols[i]] == next_index {
tmp[next_index] = out[symbols[i]]
next_index++
}
symbols[i] = new_index[symbols[i]]
}
new_index = nil
for i = 0; uint32(i) < next_index; i++ {
out[i] = tmp[i]
}
tmp = nil
return uint(next_index)
}
func clusterHistogramsDistance(in []histogramDistance, in_size uint, max_histograms uint, out []histogramDistance, out_size *uint, histogram_symbols []uint32) {
var cluster_size []uint32 = make([]uint32, in_size)
var clusters []uint32 = make([]uint32, in_size)
var num_clusters uint = 0
var max_input_histograms uint = 64
var pairs_capacity uint = max_input_histograms * max_input_histograms / 2
var pairs []histogramPair = make([]histogramPair, (pairs_capacity + 1))
var i uint
/* For the first pass of clustering, we allow all pairs. */
for i = 0; i < in_size; i++ {
cluster_size[i] = 1
}
for i = 0; i < in_size; i++ {
out[i] = in[i]
out[i].bit_cost_ = populationCostDistance(&in[i])
histogram_symbols[i] = uint32(i)
}
for i = 0; i < in_size; i += max_input_histograms {
var num_to_combine uint = brotli_min_size_t(in_size-i, max_input_histograms)
var num_new_clusters uint
var j uint
for j = 0; j < num_to_combine; j++ {
clusters[num_clusters+j] = uint32(i + j)
}
num_new_clusters = histogramCombineDistance(out, cluster_size, histogram_symbols[i:], clusters[num_clusters:], pairs, num_to_combine, num_to_combine, max_histograms, pairs_capacity)
num_clusters += num_new_clusters
}
{
/* For the second pass, we limit the total number of histogram pairs.
After this limit is reached, we only keep searching for the best pair. */
var max_num_pairs uint = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters)
if pairs_capacity < (max_num_pairs + 1) {
var _new_size uint
if pairs_capacity == 0 {
_new_size = max_num_pairs + 1
} else {
_new_size = pairs_capacity
}
var new_array []histogramPair
for _new_size < (max_num_pairs + 1) {
_new_size *= 2
}
new_array = make([]histogramPair, _new_size)
if pairs_capacity != 0 {
copy(new_array, pairs[:pairs_capacity])
}
pairs = new_array
pairs_capacity = _new_size
}
/* Collapse similar histograms. */
num_clusters = histogramCombineDistance(out, cluster_size, histogram_symbols, clusters, pairs, num_clusters, in_size, max_histograms, max_num_pairs)
}
pairs = nil
cluster_size = nil
/* Find the optimal map from original histograms to the final ones. */
histogramRemapDistance(in, in_size, clusters, num_clusters, out, histogram_symbols)
clusters = nil
/* Convert the context map to a canonical form. */
*out_size = histogramReindexDistance(out, histogram_symbols, in_size)
}

326
vendor/github.com/andybalholm/brotli/cluster_literal.go generated vendored Normal file
View File

@ -0,0 +1,326 @@
package brotli
import "math"
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */
func compareAndPushToQueueLiteral(out []histogramLiteral, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) {
var is_good_pair bool = false
var p histogramPair
p.idx2 = 0
p.idx1 = p.idx2
p.cost_combo = 0
p.cost_diff = p.cost_combo
if idx1 == idx2 {
return
}
if idx2 < idx1 {
var t uint32 = idx2
idx2 = idx1
idx1 = t
}
p.idx1 = idx1
p.idx2 = idx2
p.cost_diff = 0.5 * clusterCostDiff(uint(cluster_size[idx1]), uint(cluster_size[idx2]))
p.cost_diff -= out[idx1].bit_cost_
p.cost_diff -= out[idx2].bit_cost_
if out[idx1].total_count_ == 0 {
p.cost_combo = out[idx2].bit_cost_
is_good_pair = true
} else if out[idx2].total_count_ == 0 {
p.cost_combo = out[idx1].bit_cost_
is_good_pair = true
} else {
var threshold float64
if *num_pairs == 0 {
threshold = 1e99
} else {
threshold = brotli_max_double(0.0, pairs[0].cost_diff)
}
var combo histogramLiteral = out[idx1]
var cost_combo float64
histogramAddHistogramLiteral(&combo, &out[idx2])
cost_combo = populationCostLiteral(&combo)
if cost_combo < threshold-p.cost_diff {
p.cost_combo = cost_combo
is_good_pair = true
}
}
if is_good_pair {
p.cost_diff += p.cost_combo
if *num_pairs > 0 && histogramPairIsLess(&pairs[0], &p) {
/* Replace the top of the queue if needed. */
if *num_pairs < max_num_pairs {
pairs[*num_pairs] = pairs[0]
(*num_pairs)++
}
pairs[0] = p
} else if *num_pairs < max_num_pairs {
pairs[*num_pairs] = p
(*num_pairs)++
}
}
}
func histogramCombineLiteral(out []histogramLiteral, cluster_size []uint32, symbols []uint32, clusters []uint32, pairs []histogramPair, num_clusters uint, symbols_size uint, max_clusters uint, max_num_pairs uint) uint {
var cost_diff_threshold float64 = 0.0
var min_cluster_size uint = 1
var num_pairs uint = 0
{
/* We maintain a vector of histogram pairs, with the property that the pair
with the maximum bit cost reduction is the first. */
var idx1 uint
for idx1 = 0; idx1 < num_clusters; idx1++ {
var idx2 uint
for idx2 = idx1 + 1; idx2 < num_clusters; idx2++ {
compareAndPushToQueueLiteral(out, cluster_size, clusters[idx1], clusters[idx2], max_num_pairs, pairs[0:], &num_pairs)
}
}
}
for num_clusters > min_cluster_size {
var best_idx1 uint32
var best_idx2 uint32
var i uint
if pairs[0].cost_diff >= cost_diff_threshold {
cost_diff_threshold = 1e99
min_cluster_size = max_clusters
continue
}
/* Take the best pair from the top of heap. */
best_idx1 = pairs[0].idx1
best_idx2 = pairs[0].idx2
histogramAddHistogramLiteral(&out[best_idx1], &out[best_idx2])
out[best_idx1].bit_cost_ = pairs[0].cost_combo
cluster_size[best_idx1] += cluster_size[best_idx2]
for i = 0; i < symbols_size; i++ {
if symbols[i] == best_idx2 {
symbols[i] = best_idx1
}
}
for i = 0; i < num_clusters; i++ {
if clusters[i] == best_idx2 {
copy(clusters[i:], clusters[i+1:][:num_clusters-i-1])
break
}
}
num_clusters--
{
/* Remove pairs intersecting the just combined best pair. */
var copy_to_idx uint = 0
for i = 0; i < num_pairs; i++ {
var p *histogramPair = &pairs[i]
if p.idx1 == best_idx1 || p.idx2 == best_idx1 || p.idx1 == best_idx2 || p.idx2 == best_idx2 {
/* Remove invalid pair from the queue. */
continue
}
if histogramPairIsLess(&pairs[0], p) {
/* Replace the top of the queue if needed. */
var front histogramPair = pairs[0]
pairs[0] = *p
pairs[copy_to_idx] = front
} else {
pairs[copy_to_idx] = *p
}
copy_to_idx++
}
num_pairs = copy_to_idx
}
/* Push new pairs formed with the combined histogram to the heap. */
for i = 0; i < num_clusters; i++ {
compareAndPushToQueueLiteral(out, cluster_size, best_idx1, clusters[i], max_num_pairs, pairs[0:], &num_pairs)
}
}
return num_clusters
}
/* What is the bit cost of moving histogram from cur_symbol to candidate. */
func histogramBitCostDistanceLiteral(histogram *histogramLiteral, candidate *histogramLiteral) float64 {
if histogram.total_count_ == 0 {
return 0.0
} else {
var tmp histogramLiteral = *histogram
histogramAddHistogramLiteral(&tmp, candidate)
return populationCostLiteral(&tmp) - candidate.bit_cost_
}
}
/* Find the best 'out' histogram for each of the 'in' histograms.
When called, clusters[0..num_clusters) contains the unique values from
symbols[0..in_size), but this property is not preserved in this function.
Note: we assume that out[]->bit_cost_ is already up-to-date. */
func histogramRemapLiteral(in []histogramLiteral, in_size uint, clusters []uint32, num_clusters uint, out []histogramLiteral, symbols []uint32) {
var i uint
for i = 0; i < in_size; i++ {
var best_out uint32
if i == 0 {
best_out = symbols[0]
} else {
best_out = symbols[i-1]
}
var best_bits float64 = histogramBitCostDistanceLiteral(&in[i], &out[best_out])
var j uint
for j = 0; j < num_clusters; j++ {
var cur_bits float64 = histogramBitCostDistanceLiteral(&in[i], &out[clusters[j]])
if cur_bits < best_bits {
best_bits = cur_bits
best_out = clusters[j]
}
}
symbols[i] = best_out
}
/* Recompute each out based on raw and symbols. */
for i = 0; i < num_clusters; i++ {
histogramClearLiteral(&out[clusters[i]])
}
for i = 0; i < in_size; i++ {
histogramAddHistogramLiteral(&out[symbols[i]], &in[i])
}
}
/* Reorders elements of the out[0..length) array and changes values in
symbols[0..length) array in the following way:
* when called, symbols[] contains indexes into out[], and has N unique
values (possibly N < length)
* on return, symbols'[i] = f(symbols[i]) and
out'[symbols'[i]] = out[symbols[i]], for each 0 <= i < length,
where f is a bijection between the range of symbols[] and [0..N), and
the first occurrences of values in symbols'[i] come in consecutive
increasing order.
Returns N, the number of unique values in symbols[]. */
var histogramReindexLiteral_kInvalidIndex uint32 = math.MaxUint32
func histogramReindexLiteral(out []histogramLiteral, symbols []uint32, length uint) uint {
var new_index []uint32 = make([]uint32, length)
var next_index uint32
var tmp []histogramLiteral
var i uint
for i = 0; i < length; i++ {
new_index[i] = histogramReindexLiteral_kInvalidIndex
}
next_index = 0
for i = 0; i < length; i++ {
if new_index[symbols[i]] == histogramReindexLiteral_kInvalidIndex {
new_index[symbols[i]] = next_index
next_index++
}
}
/* TODO: by using idea of "cycle-sort" we can avoid allocation of
tmp and reduce the number of copying by the factor of 2. */
tmp = make([]histogramLiteral, next_index)
next_index = 0
for i = 0; i < length; i++ {
if new_index[symbols[i]] == next_index {
tmp[next_index] = out[symbols[i]]
next_index++
}
symbols[i] = new_index[symbols[i]]
}
new_index = nil
for i = 0; uint32(i) < next_index; i++ {
out[i] = tmp[i]
}
tmp = nil
return uint(next_index)
}
func clusterHistogramsLiteral(in []histogramLiteral, in_size uint, max_histograms uint, out []histogramLiteral, out_size *uint, histogram_symbols []uint32) {
var cluster_size []uint32 = make([]uint32, in_size)
var clusters []uint32 = make([]uint32, in_size)
var num_clusters uint = 0
var max_input_histograms uint = 64
var pairs_capacity uint = max_input_histograms * max_input_histograms / 2
var pairs []histogramPair = make([]histogramPair, (pairs_capacity + 1))
var i uint
/* For the first pass of clustering, we allow all pairs. */
for i = 0; i < in_size; i++ {
cluster_size[i] = 1
}
for i = 0; i < in_size; i++ {
out[i] = in[i]
out[i].bit_cost_ = populationCostLiteral(&in[i])
histogram_symbols[i] = uint32(i)
}
for i = 0; i < in_size; i += max_input_histograms {
var num_to_combine uint = brotli_min_size_t(in_size-i, max_input_histograms)
var num_new_clusters uint
var j uint
for j = 0; j < num_to_combine; j++ {
clusters[num_clusters+j] = uint32(i + j)
}
num_new_clusters = histogramCombineLiteral(out, cluster_size, histogram_symbols[i:], clusters[num_clusters:], pairs, num_to_combine, num_to_combine, max_histograms, pairs_capacity)
num_clusters += num_new_clusters
}
{
/* For the second pass, we limit the total number of histogram pairs.
After this limit is reached, we only keep searching for the best pair. */
var max_num_pairs uint = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters)
if pairs_capacity < (max_num_pairs + 1) {
var _new_size uint
if pairs_capacity == 0 {
_new_size = max_num_pairs + 1
} else {
_new_size = pairs_capacity
}
var new_array []histogramPair
for _new_size < (max_num_pairs + 1) {
_new_size *= 2
}
new_array = make([]histogramPair, _new_size)
if pairs_capacity != 0 {
copy(new_array, pairs[:pairs_capacity])
}
pairs = new_array
pairs_capacity = _new_size
}
/* Collapse similar histograms. */
num_clusters = histogramCombineLiteral(out, cluster_size, histogram_symbols, clusters, pairs, num_clusters, in_size, max_histograms, max_num_pairs)
}
pairs = nil
cluster_size = nil
/* Find the optimal map from original histograms to the final ones. */
histogramRemapLiteral(in, in_size, clusters, num_clusters, out, histogram_symbols)
clusters = nil
/* Convert the context map to a canonical form. */
*out_size = histogramReindexLiteral(out, histogram_symbols, in_size)
}

254
vendor/github.com/andybalholm/brotli/command.go generated vendored Normal file
View File

@ -0,0 +1,254 @@
package brotli
var kInsBase = []uint32{
0,
1,
2,
3,
4,
5,
6,
8,
10,
14,
18,
26,
34,
50,
66,
98,
130,
194,
322,
578,
1090,
2114,
6210,
22594,
}
var kInsExtra = []uint32{
0,
0,
0,
0,
0,
0,
1,
1,
2,
2,
3,
3,
4,
4,
5,
5,
6,
7,
8,
9,
10,
12,
14,
24,
}
var kCopyBase = []uint32{
2,
3,
4,
5,
6,
7,
8,
9,
10,
12,
14,
18,
22,
30,
38,
54,
70,
102,
134,
198,
326,
582,
1094,
2118,
}
var kCopyExtra = []uint32{
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
2,
2,
3,
3,
4,
4,
5,
5,
6,
7,
8,
9,
10,
24,
}
func getInsertLengthCode(insertlen uint) uint16 {
if insertlen < 6 {
return uint16(insertlen)
} else if insertlen < 130 {
var nbits uint32 = log2FloorNonZero(insertlen-2) - 1
return uint16((nbits << 1) + uint32((insertlen-2)>>nbits) + 2)
} else if insertlen < 2114 {
return uint16(log2FloorNonZero(insertlen-66) + 10)
} else if insertlen < 6210 {
return 21
} else if insertlen < 22594 {
return 22
} else {
return 23
}
}
func getCopyLengthCode(copylen uint) uint16 {
if copylen < 10 {
return uint16(copylen - 2)
} else if copylen < 134 {
var nbits uint32 = log2FloorNonZero(copylen-6) - 1
return uint16((nbits << 1) + uint32((copylen-6)>>nbits) + 4)
} else if copylen < 2118 {
return uint16(log2FloorNonZero(copylen-70) + 12)
} else {
return 23
}
}
func combineLengthCodes(inscode uint16, copycode uint16, use_last_distance bool) uint16 {
var bits64 uint16 = uint16(copycode&0x7 | (inscode&0x7)<<3)
if use_last_distance && inscode < 8 && copycode < 16 {
if copycode < 8 {
return bits64
} else {
return bits64 | 64
}
} else {
/* Specification: 5 Encoding of ... (last table) */
/* offset = 2 * index, where index is in range [0..8] */
var offset uint32 = 2 * ((uint32(copycode) >> 3) + 3*(uint32(inscode)>>3))
/* All values in specification are K * 64,
where K = [2, 3, 6, 4, 5, 8, 7, 9, 10],
i + 1 = [1, 2, 3, 4, 5, 6, 7, 8, 9],
K - i - 1 = [1, 1, 3, 0, 0, 2, 0, 1, 2] = D.
All values in D require only 2 bits to encode.
Magic constant is shifted 6 bits left, to avoid final multiplication. */
offset = (offset << 5) + 0x40 + ((0x520D40 >> offset) & 0xC0)
return uint16(offset | uint32(bits64))
}
}
func getLengthCode(insertlen uint, copylen uint, use_last_distance bool, code *uint16) {
var inscode uint16 = getInsertLengthCode(insertlen)
var copycode uint16 = getCopyLengthCode(copylen)
*code = combineLengthCodes(inscode, copycode, use_last_distance)
}
func getInsertBase(inscode uint16) uint32 {
return kInsBase[inscode]
}
func getInsertExtra(inscode uint16) uint32 {
return kInsExtra[inscode]
}
func getCopyBase(copycode uint16) uint32 {
return kCopyBase[copycode]
}
func getCopyExtra(copycode uint16) uint32 {
return kCopyExtra[copycode]
}
type command struct {
insert_len_ uint32
copy_len_ uint32
dist_extra_ uint32
cmd_prefix_ uint16
dist_prefix_ uint16
}
/* distance_code is e.g. 0 for same-as-last short code, or 16 for offset 1. */
func makeCommand(dist *distanceParams, insertlen uint, copylen uint, copylen_code_delta int, distance_code uint) (cmd command) {
/* Don't rely on signed int representation, use honest casts. */
var delta uint32 = uint32(byte(int8(copylen_code_delta)))
cmd.insert_len_ = uint32(insertlen)
cmd.copy_len_ = uint32(uint32(copylen) | delta<<25)
/* The distance prefix and extra bits are stored in this Command as if
npostfix and ndirect were 0, they are only recomputed later after the
clustering if needed. */
prefixEncodeCopyDistance(distance_code, uint(dist.num_direct_distance_codes), uint(dist.distance_postfix_bits), &cmd.dist_prefix_, &cmd.dist_extra_)
getLengthCode(insertlen, uint(int(copylen)+copylen_code_delta), (cmd.dist_prefix_&0x3FF == 0), &cmd.cmd_prefix_)
return cmd
}
func makeInsertCommand(insertlen uint) (cmd command) {
cmd.insert_len_ = uint32(insertlen)
cmd.copy_len_ = 4 << 25
cmd.dist_extra_ = 0
cmd.dist_prefix_ = numDistanceShortCodes
getLengthCode(insertlen, 4, false, &cmd.cmd_prefix_)
return cmd
}
func commandRestoreDistanceCode(self *command, dist *distanceParams) uint32 {
if uint32(self.dist_prefix_&0x3FF) < numDistanceShortCodes+dist.num_direct_distance_codes {
return uint32(self.dist_prefix_) & 0x3FF
} else {
var dcode uint32 = uint32(self.dist_prefix_) & 0x3FF
var nbits uint32 = uint32(self.dist_prefix_) >> 10
var extra uint32 = self.dist_extra_
var postfix_mask uint32 = (1 << dist.distance_postfix_bits) - 1
var hcode uint32 = (dcode - dist.num_direct_distance_codes - numDistanceShortCodes) >> dist.distance_postfix_bits
var lcode uint32 = (dcode - dist.num_direct_distance_codes - numDistanceShortCodes) & postfix_mask
var offset uint32 = ((2 + (hcode & 1)) << nbits) - 4
return ((offset + extra) << dist.distance_postfix_bits) + lcode + dist.num_direct_distance_codes + numDistanceShortCodes
}
}
func commandDistanceContext(self *command) uint32 {
var r uint32 = uint32(self.cmd_prefix_) >> 6
var c uint32 = uint32(self.cmd_prefix_) & 7
if (r == 0 || r == 2 || r == 4 || r == 7) && (c <= 2) {
return c
}
return 3
}
func commandCopyLen(self *command) uint32 {
return self.copy_len_ & 0x1FFFFFF
}
func commandCopyLenCode(self *command) uint32 {
var modifier uint32 = self.copy_len_ >> 25
var delta int32 = int32(int8(byte(modifier | (modifier&0x40)<<1)))
return uint32(int32(self.copy_len_&0x1FFFFFF) + delta)
}

View File

@ -0,0 +1,834 @@
package brotli
import "encoding/binary"
/* Copyright 2015 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Function for fast encoding of an input fragment, independently from the input
history. This function uses one-pass processing: when we find a backward
match, we immediately emit the corresponding command and literal codes to
the bit stream.
Adapted from the CompressFragment() function in
https://github.com/google/snappy/blob/master/snappy.cc */
const maxDistance_compress_fragment = 262128
func hash5(p []byte, shift uint) uint32 {
var h uint64 = (binary.LittleEndian.Uint64(p) << 24) * uint64(kHashMul32)
return uint32(h >> shift)
}
func hashBytesAtOffset5(v uint64, offset int, shift uint) uint32 {
assert(offset >= 0)
assert(offset <= 3)
{
var h uint64 = ((v >> uint(8*offset)) << 24) * uint64(kHashMul32)
return uint32(h >> shift)
}
}
func isMatch5(p1 []byte, p2 []byte) bool {
return binary.LittleEndian.Uint32(p1) == binary.LittleEndian.Uint32(p2) &&
p1[4] == p2[4]
}
/* Builds a literal prefix code into "depths" and "bits" based on the statistics
of the "input" string and stores it into the bit stream.
Note that the prefix code here is built from the pre-LZ77 input, therefore
we can only approximate the statistics of the actual literal stream.
Moreover, for long inputs we build a histogram from a sample of the input
and thus have to assign a non-zero depth for each literal.
Returns estimated compression ratio millibytes/char for encoding given input
with generated code. */
func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte, bits []uint16, storage_ix *uint, storage []byte) uint {
var histogram = [256]uint32{0}
var histogram_total uint
var i uint
if input_size < 1<<15 {
for i = 0; i < input_size; i++ {
histogram[input[i]]++
}
histogram_total = input_size
for i = 0; i < 256; i++ {
/* We weigh the first 11 samples with weight 3 to account for the
balancing effect of the LZ77 phase on the histogram. */
var adjust uint32 = 2 * brotli_min_uint32_t(histogram[i], 11)
histogram[i] += adjust
histogram_total += uint(adjust)
}
} else {
const kSampleRate uint = 29
for i = 0; i < input_size; i += kSampleRate {
histogram[input[i]]++
}
histogram_total = (input_size + kSampleRate - 1) / kSampleRate
for i = 0; i < 256; i++ {
/* We add 1 to each population count to avoid 0 bit depths (since this is
only a sample and we don't know if the symbol appears or not), and we
weigh the first 11 samples with weight 3 to account for the balancing
effect of the LZ77 phase on the histogram (more frequent symbols are
more likely to be in backward references instead as literals). */
var adjust uint32 = 1 + 2*brotli_min_uint32_t(histogram[i], 11)
histogram[i] += adjust
histogram_total += uint(adjust)
}
}
buildAndStoreHuffmanTreeFast(histogram[:], histogram_total, /* max_bits = */
8, depths, bits, storage_ix, storage)
{
var literal_ratio uint = 0
for i = 0; i < 256; i++ {
if histogram[i] != 0 {
literal_ratio += uint(histogram[i] * uint32(depths[i]))
}
}
/* Estimated encoding ratio, millibytes per symbol. */
return (literal_ratio * 125) / histogram_total
}
}
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and
"bits" based on "histogram" and stores it into the bit stream. */
func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
var tree [129]huffmanTree
var cmd_depth = [numCommandSymbols]byte{0}
/* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */
var cmd_bits [64]uint16
createHuffmanTree(histogram, 64, 15, tree[:], depth)
createHuffmanTree(histogram[64:], 64, 14, tree[:], depth[64:])
/* We have to jump through a few hoops here in order to compute
the command bits because the symbols are in a different order than in
the full alphabet. This looks complicated, but having the symbols
in this order in the command bits saves a few branches in the Emit*
functions. */
copy(cmd_depth[:], depth[:24])
copy(cmd_depth[24:][:], depth[40:][:8])
copy(cmd_depth[32:][:], depth[24:][:8])
copy(cmd_depth[40:][:], depth[48:][:8])
copy(cmd_depth[48:][:], depth[32:][:8])
copy(cmd_depth[56:][:], depth[56:][:8])
convertBitDepthsToSymbols(cmd_depth[:], 64, cmd_bits[:])
copy(bits, cmd_bits[:24])
copy(bits[24:], cmd_bits[32:][:8])
copy(bits[32:], cmd_bits[48:][:8])
copy(bits[40:], cmd_bits[24:][:8])
copy(bits[48:], cmd_bits[40:][:8])
copy(bits[56:], cmd_bits[56:][:8])
convertBitDepthsToSymbols(depth[64:], 64, bits[64:])
{
/* Create the bit length array for the full command alphabet. */
var i uint
for i := 0; i < int(64); i++ {
cmd_depth[i] = 0
} /* only 64 first values were used */
copy(cmd_depth[:], depth[:8])
copy(cmd_depth[64:][:], depth[8:][:8])
copy(cmd_depth[128:][:], depth[16:][:8])
copy(cmd_depth[192:][:], depth[24:][:8])
copy(cmd_depth[384:][:], depth[32:][:8])
for i = 0; i < 8; i++ {
cmd_depth[128+8*i] = depth[40+i]
cmd_depth[256+8*i] = depth[48+i]
cmd_depth[448+8*i] = depth[56+i]
}
storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], storage_ix, storage)
}
storeHuffmanTree(depth[64:], 64, tree[:], storage_ix, storage)
}
/* REQUIRES: insertlen < 6210 */
func emitInsertLen1(insertlen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
if insertlen < 6 {
var code uint = insertlen + 40
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
histo[code]++
} else if insertlen < 130 {
var tail uint = insertlen - 2
var nbits uint32 = log2FloorNonZero(tail) - 1
var prefix uint = tail >> nbits
var inscode uint = uint((nbits << 1) + uint32(prefix) + 42)
writeBits(uint(depth[inscode]), uint64(bits[inscode]), storage_ix, storage)
writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits), storage_ix, storage)
histo[inscode]++
} else if insertlen < 2114 {
var tail uint = insertlen - 66
var nbits uint32 = log2FloorNonZero(tail)
var code uint = uint(nbits + 50)
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits), storage_ix, storage)
histo[code]++
} else {
writeBits(uint(depth[61]), uint64(bits[61]), storage_ix, storage)
writeBits(12, uint64(insertlen)-2114, storage_ix, storage)
histo[61]++
}
}
func emitLongInsertLen(insertlen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
if insertlen < 22594 {
writeBits(uint(depth[62]), uint64(bits[62]), storage_ix, storage)
writeBits(14, uint64(insertlen)-6210, storage_ix, storage)
histo[62]++
} else {
writeBits(uint(depth[63]), uint64(bits[63]), storage_ix, storage)
writeBits(24, uint64(insertlen)-22594, storage_ix, storage)
histo[63]++
}
}
func emitCopyLen1(copylen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
if copylen < 10 {
writeBits(uint(depth[copylen+14]), uint64(bits[copylen+14]), storage_ix, storage)
histo[copylen+14]++
} else if copylen < 134 {
var tail uint = copylen - 6
var nbits uint32 = log2FloorNonZero(tail) - 1
var prefix uint = tail >> nbits
var code uint = uint((nbits << 1) + uint32(prefix) + 20)
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits), storage_ix, storage)
histo[code]++
} else if copylen < 2118 {
var tail uint = copylen - 70
var nbits uint32 = log2FloorNonZero(tail)
var code uint = uint(nbits + 28)
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits), storage_ix, storage)
histo[code]++
} else {
writeBits(uint(depth[39]), uint64(bits[39]), storage_ix, storage)
writeBits(24, uint64(copylen)-2118, storage_ix, storage)
histo[39]++
}
}
func emitCopyLenLastDistance1(copylen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
if copylen < 12 {
writeBits(uint(depth[copylen-4]), uint64(bits[copylen-4]), storage_ix, storage)
histo[copylen-4]++
} else if copylen < 72 {
var tail uint = copylen - 8
var nbits uint32 = log2FloorNonZero(tail) - 1
var prefix uint = tail >> nbits
var code uint = uint((nbits << 1) + uint32(prefix) + 4)
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits), storage_ix, storage)
histo[code]++
} else if copylen < 136 {
var tail uint = copylen - 8
var code uint = (tail >> 5) + 30
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
writeBits(5, uint64(tail)&31, storage_ix, storage)
writeBits(uint(depth[64]), uint64(bits[64]), storage_ix, storage)
histo[code]++
histo[64]++
} else if copylen < 2120 {
var tail uint = copylen - 72
var nbits uint32 = log2FloorNonZero(tail)
var code uint = uint(nbits + 28)
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits), storage_ix, storage)
writeBits(uint(depth[64]), uint64(bits[64]), storage_ix, storage)
histo[code]++
histo[64]++
} else {
writeBits(uint(depth[39]), uint64(bits[39]), storage_ix, storage)
writeBits(24, uint64(copylen)-2120, storage_ix, storage)
writeBits(uint(depth[64]), uint64(bits[64]), storage_ix, storage)
histo[39]++
histo[64]++
}
}
func emitDistance1(distance uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
var d uint = distance + 3
var nbits uint32 = log2FloorNonZero(d) - 1
var prefix uint = (d >> nbits) & 1
var offset uint = (2 + prefix) << nbits
var distcode uint = uint(2*(nbits-1) + uint32(prefix) + 80)
writeBits(uint(depth[distcode]), uint64(bits[distcode]), storage_ix, storage)
writeBits(uint(nbits), uint64(d)-uint64(offset), storage_ix, storage)
histo[distcode]++
}
func emitLiterals(input []byte, len uint, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
var j uint
for j = 0; j < len; j++ {
var lit byte = input[j]
writeBits(uint(depth[lit]), uint64(bits[lit]), storage_ix, storage)
}
}
/* REQUIRES: len <= 1 << 24. */
func storeMetaBlockHeader1(len uint, is_uncompressed bool, storage_ix *uint, storage []byte) {
var nibbles uint = 6
/* ISLAST */
writeBits(1, 0, storage_ix, storage)
if len <= 1<<16 {
nibbles = 4
} else if len <= 1<<20 {
nibbles = 5
}
writeBits(2, uint64(nibbles)-4, storage_ix, storage)
writeBits(nibbles*4, uint64(len)-1, storage_ix, storage)
/* ISUNCOMPRESSED */
writeSingleBit(is_uncompressed, storage_ix, storage)
}
func updateBits(n_bits uint, bits uint32, pos uint, array []byte) {
for n_bits > 0 {
var byte_pos uint = pos >> 3
var n_unchanged_bits uint = pos & 7
var n_changed_bits uint = brotli_min_size_t(n_bits, 8-n_unchanged_bits)
var total_bits uint = n_unchanged_bits + n_changed_bits
var mask uint32 = (^((1 << total_bits) - 1)) | ((1 << n_unchanged_bits) - 1)
var unchanged_bits uint32 = uint32(array[byte_pos]) & mask
var changed_bits uint32 = bits & ((1 << n_changed_bits) - 1)
array[byte_pos] = byte(changed_bits<<n_unchanged_bits | unchanged_bits)
n_bits -= n_changed_bits
bits >>= n_changed_bits
pos += n_changed_bits
}
}
func rewindBitPosition1(new_storage_ix uint, storage_ix *uint, storage []byte) {
var bitpos uint = new_storage_ix & 7
var mask uint = (1 << bitpos) - 1
storage[new_storage_ix>>3] &= byte(mask)
*storage_ix = new_storage_ix
}
var shouldMergeBlock_kSampleRate uint = 43
func shouldMergeBlock(data []byte, len uint, depths []byte) bool {
var histo = [256]uint{0}
var i uint
for i = 0; i < len; i += shouldMergeBlock_kSampleRate {
histo[data[i]]++
}
{
var total uint = (len + shouldMergeBlock_kSampleRate - 1) / shouldMergeBlock_kSampleRate
var r float64 = (fastLog2(total)+0.5)*float64(total) + 200
for i = 0; i < 256; i++ {
r -= float64(histo[i]) * (float64(depths[i]) + fastLog2(histo[i]))
}
return r >= 0.0
}
}
func shouldUseUncompressedMode(metablock_start []byte, next_emit []byte, insertlen uint, literal_ratio uint) bool {
var compressed uint = uint(-cap(next_emit) + cap(metablock_start))
if compressed*50 > insertlen {
return false
} else {
return literal_ratio > 980
}
}
func emitUncompressedMetaBlock1(begin []byte, end []byte, storage_ix_start uint, storage_ix *uint, storage []byte) {
var len uint = uint(-cap(end) + cap(begin))
rewindBitPosition1(storage_ix_start, storage_ix, storage)
storeMetaBlockHeader1(uint(len), true, storage_ix, storage)
*storage_ix = (*storage_ix + 7) &^ 7
copy(storage[*storage_ix>>3:], begin[:len])
*storage_ix += uint(len << 3)
storage[*storage_ix>>3] = 0
}
var kCmdHistoSeed = [128]uint32{
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
0,
0,
0,
}
var compressFragmentFastImpl_kFirstBlockSize uint = 3 << 15
var compressFragmentFastImpl_kMergeBlockSize uint = 1 << 16
func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []int, table_bits uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, storage_ix *uint, storage []byte) {
var cmd_histo [128]uint32
var ip_end int
var next_emit int = 0
var base_ip int = 0
var input int = 0
const kInputMarginBytes uint = windowGap
const kMinMatchLen uint = 5
var metablock_start int = input
var block_size uint = brotli_min_size_t(input_size, compressFragmentFastImpl_kFirstBlockSize)
var total_block_size uint = block_size
var mlen_storage_ix uint = *storage_ix + 3
var lit_depth [256]byte
var lit_bits [256]uint16
var literal_ratio uint
var ip int
var last_distance int
var shift uint = 64 - table_bits
/* "next_emit" is a pointer to the first byte that is not covered by a
previous copy. Bytes between "next_emit" and the start of the next copy or
the end of the input will be emitted as literal bytes. */
/* Save the start of the first block for position and distance computations.
*/
/* Save the bit position of the MLEN field of the meta-block header, so that
we can update it later if we decide to extend this meta-block. */
storeMetaBlockHeader1(block_size, false, storage_ix, storage)
/* No block splits, no contexts. */
writeBits(13, 0, storage_ix, storage)
literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], storage_ix, storage)
{
/* Store the pre-compressed command and distance prefix codes. */
var i uint
for i = 0; i+7 < *cmd_code_numbits; i += 8 {
writeBits(8, uint64(cmd_code[i>>3]), storage_ix, storage)
}
}
writeBits(*cmd_code_numbits&7, uint64(cmd_code[*cmd_code_numbits>>3]), storage_ix, storage)
/* Initialize the command and distance histograms. We will gather
statistics of command and distance codes during the processing
of this block and use it to update the command and distance
prefix codes for the next block. */
emit_commands:
copy(cmd_histo[:], kCmdHistoSeed[:])
/* "ip" is the input pointer. */
ip = input
last_distance = -1
ip_end = int(uint(input) + block_size)
if block_size >= kInputMarginBytes {
var len_limit uint = brotli_min_size_t(block_size-kMinMatchLen, input_size-kInputMarginBytes)
var ip_limit int = int(uint(input) + len_limit)
/* For the last block, we need to keep a 16 bytes margin so that we can be
sure that all distances are at most window size - 16.
For all other blocks, we only need to keep a margin of 5 bytes so that
we don't go over the block size with a copy. */
var next_hash uint32
ip++
for next_hash = hash5(in[ip:], shift); ; {
var skip uint32 = 32
var next_ip int = ip
/* Step 1: Scan forward in the input looking for a 5-byte-long match.
If we get close to exhausting the input then goto emit_remainder.
Heuristic match skipping: If 32 bytes are scanned with no matches
found, start looking only at every other byte. If 32 more bytes are
scanned, look at every third byte, etc.. When a match is found,
immediately go back to looking at every byte. This is a small loss
(~5% performance, ~0.1% density) for compressible data due to more
bookkeeping, but for non-compressible data (such as JPEG) it's a huge
win since the compressor quickly "realizes" the data is incompressible
and doesn't bother looking for matches everywhere.
The "skip" variable keeps track of how many bytes there are since the
last match; dividing it by 32 (i.e. right-shifting by five) gives the
number of bytes to move ahead for each iteration. */
var candidate int
assert(next_emit < ip)
trawl:
for {
var hash uint32 = next_hash
var bytes_between_hash_lookups uint32 = skip >> 5
skip++
assert(hash == hash5(in[next_ip:], shift))
ip = next_ip
next_ip = int(uint32(ip) + bytes_between_hash_lookups)
if next_ip > ip_limit {
goto emit_remainder
}
next_hash = hash5(in[next_ip:], shift)
candidate = ip - last_distance
if isMatch5(in[ip:], in[candidate:]) {
if candidate < ip {
table[hash] = int(ip - base_ip)
break
}
}
candidate = base_ip + table[hash]
assert(candidate >= base_ip)
assert(candidate < ip)
table[hash] = int(ip - base_ip)
if isMatch5(in[ip:], in[candidate:]) {
break
}
}
/* Check copy distance. If candidate is not feasible, continue search.
Checking is done outside of hot loop to reduce overhead. */
if ip-candidate > maxDistance_compress_fragment {
goto trawl
}
/* Step 2: Emit the found match together with the literal bytes from
"next_emit" to the bit stream, and then see if we can find a next match
immediately afterwards. Repeat until we find no match for the input
without emitting some literal bytes. */
{
var base int = ip
/* > 0 */
var matched uint = 5 + findMatchLengthWithLimit(in[candidate+5:], in[ip+5:], uint(ip_end-ip)-5)
var distance int = int(base - candidate)
/* We have a 5-byte match at ip, and we need to emit bytes in
[next_emit, ip). */
var insert uint = uint(base - next_emit)
ip += int(matched)
if insert < 6210 {
emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
} else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) {
emitUncompressedMetaBlock1(in[metablock_start:], in[base:], mlen_storage_ix-3, storage_ix, storage)
input_size -= uint(base - input)
input = base
next_emit = input
goto next_block
} else {
emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
}
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage)
if distance == last_distance {
writeBits(uint(cmd_depth[64]), uint64(cmd_bits[64]), storage_ix, storage)
cmd_histo[64]++
} else {
emitDistance1(uint(distance), cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
last_distance = distance
}
emitCopyLenLastDistance1(matched, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
next_emit = ip
if ip >= ip_limit {
goto emit_remainder
}
/* We could immediately start working at ip now, but to improve
compression we first update "table" with the hashes of some positions
within the last copy. */
{
var input_bytes uint64 = binary.LittleEndian.Uint64(in[ip-3:])
var prev_hash uint32 = hashBytesAtOffset5(input_bytes, 0, shift)
var cur_hash uint32 = hashBytesAtOffset5(input_bytes, 3, shift)
table[prev_hash] = int(ip - base_ip - 3)
prev_hash = hashBytesAtOffset5(input_bytes, 1, shift)
table[prev_hash] = int(ip - base_ip - 2)
prev_hash = hashBytesAtOffset5(input_bytes, 2, shift)
table[prev_hash] = int(ip - base_ip - 1)
candidate = base_ip + table[cur_hash]
table[cur_hash] = int(ip - base_ip)
}
}
for isMatch5(in[ip:], in[candidate:]) {
var base int = ip
/* We have a 5-byte match at ip, and no need to emit any literal bytes
prior to ip. */
var matched uint = 5 + findMatchLengthWithLimit(in[candidate+5:], in[ip+5:], uint(ip_end-ip)-5)
if ip-candidate > maxDistance_compress_fragment {
break
}
ip += int(matched)
last_distance = int(base - candidate) /* > 0 */
emitCopyLen1(matched, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
emitDistance1(uint(last_distance), cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
next_emit = ip
if ip >= ip_limit {
goto emit_remainder
}
/* We could immediately start working at ip now, but to improve
compression we first update "table" with the hashes of some positions
within the last copy. */
{
var input_bytes uint64 = binary.LittleEndian.Uint64(in[ip-3:])
var prev_hash uint32 = hashBytesAtOffset5(input_bytes, 0, shift)
var cur_hash uint32 = hashBytesAtOffset5(input_bytes, 3, shift)
table[prev_hash] = int(ip - base_ip - 3)
prev_hash = hashBytesAtOffset5(input_bytes, 1, shift)
table[prev_hash] = int(ip - base_ip - 2)
prev_hash = hashBytesAtOffset5(input_bytes, 2, shift)
table[prev_hash] = int(ip - base_ip - 1)
candidate = base_ip + table[cur_hash]
table[cur_hash] = int(ip - base_ip)
}
}
ip++
next_hash = hash5(in[ip:], shift)
}
}
emit_remainder:
assert(next_emit <= ip_end)
input += int(block_size)
input_size -= block_size
block_size = brotli_min_size_t(input_size, compressFragmentFastImpl_kMergeBlockSize)
/* Decide if we want to continue this meta-block instead of emitting the
last insert-only command. */
if input_size > 0 && total_block_size+block_size <= 1<<20 && shouldMergeBlock(in[input:], block_size, lit_depth[:]) {
assert(total_block_size > 1<<16)
/* Update the size of the current meta-block and continue emitting commands.
We can do this because the current size and the new size both have 5
nibbles. */
total_block_size += block_size
updateBits(20, uint32(total_block_size-1), mlen_storage_ix, storage)
goto emit_commands
}
/* Emit the remaining bytes as literals. */
if next_emit < ip_end {
var insert uint = uint(ip_end - next_emit)
if insert < 6210 {
emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage)
} else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) {
emitUncompressedMetaBlock1(in[metablock_start:], in[ip_end:], mlen_storage_ix-3, storage_ix, storage)
} else {
emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage)
}
}
next_emit = ip_end
/* If we have more data, write a new meta-block header and prefix codes and
then continue emitting commands. */
next_block:
if input_size > 0 {
metablock_start = input
block_size = brotli_min_size_t(input_size, compressFragmentFastImpl_kFirstBlockSize)
total_block_size = block_size
/* Save the bit position of the MLEN field of the meta-block header, so that
we can update it later if we decide to extend this meta-block. */
mlen_storage_ix = *storage_ix + 3
storeMetaBlockHeader1(block_size, false, storage_ix, storage)
/* No block splits, no contexts. */
writeBits(13, 0, storage_ix, storage)
literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], storage_ix, storage)
buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, storage_ix, storage)
goto emit_commands
}
if !is_last {
/* If this is not the last block, update the command and distance prefix
codes for the next block and store the compressed forms. */
cmd_code[0] = 0
*cmd_code_numbits = 0
buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, cmd_code_numbits, cmd_code)
}
}
/* Compresses "input" string to the "*storage" buffer as one or more complete
meta-blocks, and updates the "*storage_ix" bit position.
If "is_last" is 1, emits an additional empty last meta-block.
"cmd_depth" and "cmd_bits" contain the command and distance prefix codes
(see comment in encode.h) used for the encoding of this input fragment.
If "is_last" is 0, they are updated to reflect the statistics
of this input fragment, to be used for the encoding of the next fragment.
"*cmd_code_numbits" is the number of bits of the compressed representation
of the command and distance prefix codes, and "cmd_code" is an array of
at least "(*cmd_code_numbits + 7) >> 3" size that contains the compressed
command and distance prefix codes. If "is_last" is 0, these are also
updated to represent the updated "cmd_depth" and "cmd_bits".
REQUIRES: "input_size" is greater than zero, or "is_last" is 1.
REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24).
REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
REQUIRES: "table_size" is an odd (9, 11, 13, 15) power of two
OUTPUT: maximal copy distance <= |input_size|
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */
func compressFragmentFast(input []byte, input_size uint, is_last bool, table []int, table_size uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, storage_ix *uint, storage []byte) {
var initial_storage_ix uint = *storage_ix
var table_bits uint = uint(log2FloorNonZero(table_size))
if input_size == 0 {
assert(is_last)
writeBits(1, 1, storage_ix, storage) /* islast */
writeBits(1, 1, storage_ix, storage) /* isempty */
*storage_ix = (*storage_ix + 7) &^ 7
return
}
compressFragmentFastImpl(input, input_size, is_last, table, table_bits, cmd_depth, cmd_bits, cmd_code_numbits, cmd_code, storage_ix, storage)
/* If output is larger than single uncompressed block, rewrite it. */
if *storage_ix-initial_storage_ix > 31+(input_size<<3) {
emitUncompressedMetaBlock1(input, input[input_size:], initial_storage_ix, storage_ix, storage)
}
if is_last {
writeBits(1, 1, storage_ix, storage) /* islast */
writeBits(1, 1, storage_ix, storage) /* isempty */
*storage_ix = (*storage_ix + 7) &^ 7
}
}

View File

@ -0,0 +1,748 @@
package brotli
import "encoding/binary"
/* Copyright 2015 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Function for fast encoding of an input fragment, independently from the input
history. This function uses two-pass processing: in the first pass we save
the found backward matches and literal bytes into a buffer, and in the
second pass we emit them into the bit stream using prefix codes built based
on the actual command and literal byte histograms. */
const kCompressFragmentTwoPassBlockSize uint = 1 << 17
func hash1(p []byte, shift uint, length uint) uint32 {
var h uint64 = (binary.LittleEndian.Uint64(p) << ((8 - length) * 8)) * uint64(kHashMul32)
return uint32(h >> shift)
}
func hashBytesAtOffset(v uint64, offset uint, shift uint, length uint) uint32 {
assert(offset <= 8-length)
{
var h uint64 = ((v >> (8 * offset)) << ((8 - length) * 8)) * uint64(kHashMul32)
return uint32(h >> shift)
}
}
func isMatch1(p1 []byte, p2 []byte, length uint) bool {
if binary.LittleEndian.Uint32(p1) != binary.LittleEndian.Uint32(p2) {
return false
}
if length == 4 {
return true
}
return p1[4] == p2[4] && p1[5] == p2[5]
}
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and
"bits" based on "histogram" and stores it into the bit stream. */
func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
var tree [129]huffmanTree
var cmd_depth = [numCommandSymbols]byte{0}
/* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */
var cmd_bits [64]uint16
createHuffmanTree(histogram, 64, 15, tree[:], depth)
createHuffmanTree(histogram[64:], 64, 14, tree[:], depth[64:])
/* We have to jump through a few hoops here in order to compute
the command bits because the symbols are in a different order than in
the full alphabet. This looks complicated, but having the symbols
in this order in the command bits saves a few branches in the Emit*
functions. */
copy(cmd_depth[:], depth[24:][:24])
copy(cmd_depth[24:][:], depth[:8])
copy(cmd_depth[32:][:], depth[48:][:8])
copy(cmd_depth[40:][:], depth[8:][:8])
copy(cmd_depth[48:][:], depth[56:][:8])
copy(cmd_depth[56:][:], depth[16:][:8])
convertBitDepthsToSymbols(cmd_depth[:], 64, cmd_bits[:])
copy(bits, cmd_bits[24:][:8])
copy(bits[8:], cmd_bits[40:][:8])
copy(bits[16:], cmd_bits[56:][:8])
copy(bits[24:], cmd_bits[:24])
copy(bits[48:], cmd_bits[32:][:8])
copy(bits[56:], cmd_bits[48:][:8])
convertBitDepthsToSymbols(depth[64:], 64, bits[64:])
{
/* Create the bit length array for the full command alphabet. */
var i uint
for i := 0; i < int(64); i++ {
cmd_depth[i] = 0
} /* only 64 first values were used */
copy(cmd_depth[:], depth[24:][:8])
copy(cmd_depth[64:][:], depth[32:][:8])
copy(cmd_depth[128:][:], depth[40:][:8])
copy(cmd_depth[192:][:], depth[48:][:8])
copy(cmd_depth[384:][:], depth[56:][:8])
for i = 0; i < 8; i++ {
cmd_depth[128+8*i] = depth[i]
cmd_depth[256+8*i] = depth[8+i]
cmd_depth[448+8*i] = depth[16+i]
}
storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], storage_ix, storage)
}
storeHuffmanTree(depth[64:], 64, tree[:], storage_ix, storage)
}
func emitInsertLen(insertlen uint32, commands *[]uint32) {
if insertlen < 6 {
(*commands)[0] = insertlen
} else if insertlen < 130 {
var tail uint32 = insertlen - 2
var nbits uint32 = log2FloorNonZero(uint(tail)) - 1
var prefix uint32 = tail >> nbits
var inscode uint32 = (nbits << 1) + prefix + 2
var extra uint32 = tail - (prefix << nbits)
(*commands)[0] = inscode | extra<<8
} else if insertlen < 2114 {
var tail uint32 = insertlen - 66
var nbits uint32 = log2FloorNonZero(uint(tail))
var code uint32 = nbits + 10
var extra uint32 = tail - (1 << nbits)
(*commands)[0] = code | extra<<8
} else if insertlen < 6210 {
var extra uint32 = insertlen - 2114
(*commands)[0] = 21 | extra<<8
} else if insertlen < 22594 {
var extra uint32 = insertlen - 6210
(*commands)[0] = 22 | extra<<8
} else {
var extra uint32 = insertlen - 22594
(*commands)[0] = 23 | extra<<8
}
*commands = (*commands)[1:]
}
func emitCopyLen(copylen uint, commands *[]uint32) {
if copylen < 10 {
(*commands)[0] = uint32(copylen + 38)
} else if copylen < 134 {
var tail uint = copylen - 6
var nbits uint = uint(log2FloorNonZero(tail) - 1)
var prefix uint = tail >> nbits
var code uint = (nbits << 1) + prefix + 44
var extra uint = tail - (prefix << nbits)
(*commands)[0] = uint32(code | extra<<8)
} else if copylen < 2118 {
var tail uint = copylen - 70
var nbits uint = uint(log2FloorNonZero(tail))
var code uint = nbits + 52
var extra uint = tail - (uint(1) << nbits)
(*commands)[0] = uint32(code | extra<<8)
} else {
var extra uint = copylen - 2118
(*commands)[0] = uint32(63 | extra<<8)
}
*commands = (*commands)[1:]
}
func emitCopyLenLastDistance(copylen uint, commands *[]uint32) {
if copylen < 12 {
(*commands)[0] = uint32(copylen + 20)
*commands = (*commands)[1:]
} else if copylen < 72 {
var tail uint = copylen - 8
var nbits uint = uint(log2FloorNonZero(tail) - 1)
var prefix uint = tail >> nbits
var code uint = (nbits << 1) + prefix + 28
var extra uint = tail - (prefix << nbits)
(*commands)[0] = uint32(code | extra<<8)
*commands = (*commands)[1:]
} else if copylen < 136 {
var tail uint = copylen - 8
var code uint = (tail >> 5) + 54
var extra uint = tail & 31
(*commands)[0] = uint32(code | extra<<8)
*commands = (*commands)[1:]
(*commands)[0] = 64
*commands = (*commands)[1:]
} else if copylen < 2120 {
var tail uint = copylen - 72
var nbits uint = uint(log2FloorNonZero(tail))
var code uint = nbits + 52
var extra uint = tail - (uint(1) << nbits)
(*commands)[0] = uint32(code | extra<<8)
*commands = (*commands)[1:]
(*commands)[0] = 64
*commands = (*commands)[1:]
} else {
var extra uint = copylen - 2120
(*commands)[0] = uint32(63 | extra<<8)
*commands = (*commands)[1:]
(*commands)[0] = 64
*commands = (*commands)[1:]
}
}
func emitDistance(distance uint32, commands *[]uint32) {
var d uint32 = distance + 3
var nbits uint32 = log2FloorNonZero(uint(d)) - 1
var prefix uint32 = (d >> nbits) & 1
var offset uint32 = (2 + prefix) << nbits
var distcode uint32 = 2*(nbits-1) + prefix + 80
var extra uint32 = d - offset
(*commands)[0] = distcode | extra<<8
*commands = (*commands)[1:]
}
/* REQUIRES: len <= 1 << 24. */
func storeMetaBlockHeader(len uint, is_uncompressed bool, storage_ix *uint, storage []byte) {
var nibbles uint = 6
/* ISLAST */
writeBits(1, 0, storage_ix, storage)
if len <= 1<<16 {
nibbles = 4
} else if len <= 1<<20 {
nibbles = 5
}
writeBits(2, uint64(nibbles)-4, storage_ix, storage)
writeBits(nibbles*4, uint64(len)-1, storage_ix, storage)
/* ISUNCOMPRESSED */
writeSingleBit(is_uncompressed, storage_ix, storage)
}
func createCommands(input []byte, block_size uint, input_size uint, base_ip_ptr []byte, table []int, table_bits uint, min_match uint, literals *[]byte, commands *[]uint32) {
var ip int = 0
var shift uint = 64 - table_bits
var ip_end int = int(block_size)
var base_ip int = -cap(base_ip_ptr) + cap(input)
var next_emit int = 0
var last_distance int = -1
/* "ip" is the input pointer. */
const kInputMarginBytes uint = windowGap
/* "next_emit" is a pointer to the first byte that is not covered by a
previous copy. Bytes between "next_emit" and the start of the next copy or
the end of the input will be emitted as literal bytes. */
if block_size >= kInputMarginBytes {
var len_limit uint = brotli_min_size_t(block_size-min_match, input_size-kInputMarginBytes)
var ip_limit int = int(len_limit)
/* For the last block, we need to keep a 16 bytes margin so that we can be
sure that all distances are at most window size - 16.
For all other blocks, we only need to keep a margin of 5 bytes so that
we don't go over the block size with a copy. */
var next_hash uint32
ip++
for next_hash = hash1(input[ip:], shift, min_match); ; {
var skip uint32 = 32
var next_ip int = ip
/* Step 1: Scan forward in the input looking for a 6-byte-long match.
If we get close to exhausting the input then goto emit_remainder.
Heuristic match skipping: If 32 bytes are scanned with no matches
found, start looking only at every other byte. If 32 more bytes are
scanned, look at every third byte, etc.. When a match is found,
immediately go back to looking at every byte. This is a small loss
(~5% performance, ~0.1% density) for compressible data due to more
bookkeeping, but for non-compressible data (such as JPEG) it's a huge
win since the compressor quickly "realizes" the data is incompressible
and doesn't bother looking for matches everywhere.
The "skip" variable keeps track of how many bytes there are since the
last match; dividing it by 32 (ie. right-shifting by five) gives the
number of bytes to move ahead for each iteration. */
var candidate int
assert(next_emit < ip)
trawl:
for {
var hash uint32 = next_hash
var bytes_between_hash_lookups uint32 = skip >> 5
skip++
ip = next_ip
assert(hash == hash1(input[ip:], shift, min_match))
next_ip = int(uint32(ip) + bytes_between_hash_lookups)
if next_ip > ip_limit {
goto emit_remainder
}
next_hash = hash1(input[next_ip:], shift, min_match)
candidate = ip - last_distance
if isMatch1(input[ip:], base_ip_ptr[candidate-base_ip:], min_match) {
if candidate < ip {
table[hash] = int(ip - base_ip)
break
}
}
candidate = base_ip + table[hash]
assert(candidate >= base_ip)
assert(candidate < ip)
table[hash] = int(ip - base_ip)
if isMatch1(input[ip:], base_ip_ptr[candidate-base_ip:], min_match) {
break
}
}
/* Check copy distance. If candidate is not feasible, continue search.
Checking is done outside of hot loop to reduce overhead. */
if ip-candidate > maxDistance_compress_fragment {
goto trawl
}
/* Step 2: Emit the found match together with the literal bytes from
"next_emit", and then see if we can find a next match immediately
afterwards. Repeat until we find no match for the input
without emitting some literal bytes. */
{
var base int = ip
/* > 0 */
var matched uint = min_match + findMatchLengthWithLimit(base_ip_ptr[uint(candidate-base_ip)+min_match:], input[uint(ip)+min_match:], uint(ip_end-ip)-min_match)
var distance int = int(base - candidate)
/* We have a 6-byte match at ip, and we need to emit bytes in
[next_emit, ip). */
var insert int = int(base - next_emit)
ip += int(matched)
emitInsertLen(uint32(insert), commands)
copy(*literals, input[next_emit:][:uint(insert)])
*literals = (*literals)[insert:]
if distance == last_distance {
(*commands)[0] = 64
*commands = (*commands)[1:]
} else {
emitDistance(uint32(distance), commands)
last_distance = distance
}
emitCopyLenLastDistance(matched, commands)
next_emit = ip
if ip >= ip_limit {
goto emit_remainder
}
{
var input_bytes uint64
var cur_hash uint32
/* We could immediately start working at ip now, but to improve
compression we first update "table" with the hashes of some
positions within the last copy. */
var prev_hash uint32
if min_match == 4 {
input_bytes = binary.LittleEndian.Uint64(input[ip-3:])
cur_hash = hashBytesAtOffset(input_bytes, 3, shift, min_match)
prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match)
table[prev_hash] = int(ip - base_ip - 3)
prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match)
table[prev_hash] = int(ip - base_ip - 2)
prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match)
table[prev_hash] = int(ip - base_ip - 1)
} else {
input_bytes = binary.LittleEndian.Uint64(input[ip-5:])
prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match)
table[prev_hash] = int(ip - base_ip - 5)
prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match)
table[prev_hash] = int(ip - base_ip - 4)
prev_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match)
table[prev_hash] = int(ip - base_ip - 3)
input_bytes = binary.LittleEndian.Uint64(input[ip-2:])
cur_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match)
prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match)
table[prev_hash] = int(ip - base_ip - 2)
prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match)
table[prev_hash] = int(ip - base_ip - 1)
}
candidate = base_ip + table[cur_hash]
table[cur_hash] = int(ip - base_ip)
}
}
for ip-candidate <= maxDistance_compress_fragment && isMatch1(input[ip:], base_ip_ptr[candidate-base_ip:], min_match) {
var base int = ip
/* We have a 6-byte match at ip, and no need to emit any
literal bytes prior to ip. */
var matched uint = min_match + findMatchLengthWithLimit(base_ip_ptr[uint(candidate-base_ip)+min_match:], input[uint(ip)+min_match:], uint(ip_end-ip)-min_match)
ip += int(matched)
last_distance = int(base - candidate) /* > 0 */
emitCopyLen(matched, commands)
emitDistance(uint32(last_distance), commands)
next_emit = ip
if ip >= ip_limit {
goto emit_remainder
}
{
var input_bytes uint64
var cur_hash uint32
/* We could immediately start working at ip now, but to improve
compression we first update "table" with the hashes of some
positions within the last copy. */
var prev_hash uint32
if min_match == 4 {
input_bytes = binary.LittleEndian.Uint64(input[ip-3:])
cur_hash = hashBytesAtOffset(input_bytes, 3, shift, min_match)
prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match)
table[prev_hash] = int(ip - base_ip - 3)
prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match)
table[prev_hash] = int(ip - base_ip - 2)
prev_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match)
table[prev_hash] = int(ip - base_ip - 1)
} else {
input_bytes = binary.LittleEndian.Uint64(input[ip-5:])
prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match)
table[prev_hash] = int(ip - base_ip - 5)
prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match)
table[prev_hash] = int(ip - base_ip - 4)
prev_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match)
table[prev_hash] = int(ip - base_ip - 3)
input_bytes = binary.LittleEndian.Uint64(input[ip-2:])
cur_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match)
prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match)
table[prev_hash] = int(ip - base_ip - 2)
prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match)
table[prev_hash] = int(ip - base_ip - 1)
}
candidate = base_ip + table[cur_hash]
table[cur_hash] = int(ip - base_ip)
}
}
ip++
next_hash = hash1(input[ip:], shift, min_match)
}
}
emit_remainder:
assert(next_emit <= ip_end)
/* Emit the remaining bytes as literals. */
if next_emit < ip_end {
var insert uint32 = uint32(ip_end - next_emit)
emitInsertLen(insert, commands)
copy(*literals, input[next_emit:][:insert])
*literals = (*literals)[insert:]
}
}
var storeCommands_kNumExtraBits = [128]uint32{
0,
0,
0,
0,
0,
0,
1,
1,
2,
2,
3,
3,
4,
4,
5,
5,
6,
7,
8,
9,
10,
12,
14,
24,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
2,
2,
3,
3,
4,
4,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
2,
2,
3,
3,
4,
4,
5,
5,
6,
7,
8,
9,
10,
24,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
2,
2,
3,
3,
4,
4,
5,
5,
6,
6,
7,
7,
8,
8,
9,
9,
10,
10,
11,
11,
12,
12,
13,
13,
14,
14,
15,
15,
16,
16,
17,
17,
18,
18,
19,
19,
20,
20,
21,
21,
22,
22,
23,
23,
24,
24,
}
var storeCommands_kInsertOffset = [24]uint32{
0,
1,
2,
3,
4,
5,
6,
8,
10,
14,
18,
26,
34,
50,
66,
98,
130,
194,
322,
578,
1090,
2114,
6210,
22594,
}
func storeCommands(literals []byte, num_literals uint, commands []uint32, num_commands uint, storage_ix *uint, storage []byte) {
var lit_depths [256]byte
var lit_bits [256]uint16
var lit_histo = [256]uint32{0}
var cmd_depths = [128]byte{0}
var cmd_bits = [128]uint16{0}
var cmd_histo = [128]uint32{0}
var i uint
for i = 0; i < num_literals; i++ {
lit_histo[literals[i]]++
}
buildAndStoreHuffmanTreeFast(lit_histo[:], num_literals, /* max_bits = */
8, lit_depths[:], lit_bits[:], storage_ix, storage)
for i = 0; i < num_commands; i++ {
var code uint32 = commands[i] & 0xFF
assert(code < 128)
cmd_histo[code]++
}
cmd_histo[1] += 1
cmd_histo[2] += 1
cmd_histo[64] += 1
cmd_histo[84] += 1
buildAndStoreCommandPrefixCode(cmd_histo[:], cmd_depths[:], cmd_bits[:], storage_ix, storage)
for i = 0; i < num_commands; i++ {
var cmd uint32 = commands[i]
var code uint32 = cmd & 0xFF
var extra uint32 = cmd >> 8
assert(code < 128)
writeBits(uint(cmd_depths[code]), uint64(cmd_bits[code]), storage_ix, storage)
writeBits(uint(storeCommands_kNumExtraBits[code]), uint64(extra), storage_ix, storage)
if code < 24 {
var insert uint32 = storeCommands_kInsertOffset[code] + extra
var j uint32
for j = 0; j < insert; j++ {
var lit byte = literals[0]
writeBits(uint(lit_depths[lit]), uint64(lit_bits[lit]), storage_ix, storage)
literals = literals[1:]
}
}
}
}
/* Acceptable loss for uncompressible speedup is 2% */
const minRatio = 0.98
const sampleRate = 43
func shouldCompress(input []byte, input_size uint, num_literals uint) bool {
var corpus_size float64 = float64(input_size)
if float64(num_literals) < minRatio*corpus_size {
return true
} else {
var literal_histo = [256]uint32{0}
var max_total_bit_cost float64 = corpus_size * 8 * minRatio / sampleRate
var i uint
for i = 0; i < input_size; i += sampleRate {
literal_histo[input[i]]++
}
return bitsEntropy(literal_histo[:], 256) < max_total_bit_cost
}
}
func rewindBitPosition(new_storage_ix uint, storage_ix *uint, storage []byte) {
var bitpos uint = new_storage_ix & 7
var mask uint = (1 << bitpos) - 1
storage[new_storage_ix>>3] &= byte(mask)
*storage_ix = new_storage_ix
}
func emitUncompressedMetaBlock(input []byte, input_size uint, storage_ix *uint, storage []byte) {
storeMetaBlockHeader(input_size, true, storage_ix, storage)
*storage_ix = (*storage_ix + 7) &^ 7
copy(storage[*storage_ix>>3:], input[:input_size])
*storage_ix += input_size << 3
storage[*storage_ix>>3] = 0
}
func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_bits uint, min_match uint, storage_ix *uint, storage []byte) {
/* Save the start of the first block for position and distance computations.
*/
var base_ip []byte = input
for input_size > 0 {
var block_size uint = brotli_min_size_t(input_size, kCompressFragmentTwoPassBlockSize)
var commands []uint32 = command_buf
var literals []byte = literal_buf
var num_literals uint
createCommands(input, block_size, input_size, base_ip, table, table_bits, min_match, &literals, &commands)
num_literals = uint(-cap(literals) + cap(literal_buf))
if shouldCompress(input, block_size, num_literals) {
var num_commands uint = uint(-cap(commands) + cap(command_buf))
storeMetaBlockHeader(block_size, false, storage_ix, storage)
/* No block splits, no contexts. */
writeBits(13, 0, storage_ix, storage)
storeCommands(literal_buf, num_literals, command_buf, num_commands, storage_ix, storage)
} else {
/* Since we did not find many backward references and the entropy of
the data is close to 8 bits, we can simply emit an uncompressed block.
This makes compression speed of uncompressible data about 3x faster. */
emitUncompressedMetaBlock(input, block_size, storage_ix, storage)
}
input = input[block_size:]
input_size -= block_size
}
}
/* Compresses "input" string to the "*storage" buffer as one or more complete
meta-blocks, and updates the "*storage_ix" bit position.
If "is_last" is 1, emits an additional empty last meta-block.
REQUIRES: "input_size" is greater than zero, or "is_last" is 1.
REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24).
REQUIRES: "command_buf" and "literal_buf" point to at least
kCompressFragmentTwoPassBlockSize long arrays.
REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
REQUIRES: "table_size" is a power of two
OUTPUT: maximal copy distance <= |input_size|
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */
func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_size uint, storage_ix *uint, storage []byte) {
var initial_storage_ix uint = *storage_ix
var table_bits uint = uint(log2FloorNonZero(table_size))
var min_match uint
if table_bits <= 15 {
min_match = 4
} else {
min_match = 6
}
compressFragmentTwoPassImpl(input, input_size, is_last, command_buf, literal_buf, table, table_bits, min_match, storage_ix, storage)
/* If output is larger than single uncompressed block, rewrite it. */
if *storage_ix-initial_storage_ix > 31+(input_size<<3) {
rewindBitPosition(initial_storage_ix, storage_ix, storage)
emitUncompressedMetaBlock(input, input_size, storage_ix, storage)
}
if is_last {
writeBits(1, 1, storage_ix, storage) /* islast */
writeBits(1, 1, storage_ix, storage) /* isempty */
*storage_ix = (*storage_ix + 7) &^ 7
}
}

77
vendor/github.com/andybalholm/brotli/constants.go generated vendored Normal file
View File

@ -0,0 +1,77 @@
package brotli
/* Copyright 2016 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Specification: 7.3. Encoding of the context map */
const contextMapMaxRle = 16
/* Specification: 2. Compressed representation overview */
const maxNumberOfBlockTypes = 256
/* Specification: 3.3. Alphabet sizes: insert-and-copy length */
const numLiteralSymbols = 256
const numCommandSymbols = 704
const numBlockLenSymbols = 26
const maxContextMapSymbols = (maxNumberOfBlockTypes + contextMapMaxRle)
const maxBlockTypeSymbols = (maxNumberOfBlockTypes + 2)
/* Specification: 3.5. Complex prefix codes */
const repeatPreviousCodeLength = 16
const repeatZeroCodeLength = 17
const codeLengthCodes = (repeatZeroCodeLength + 1)
/* "code length of 8 is repeated" */
const initialRepeatedCodeLength = 8
/* "Large Window Brotli" */
const largeMaxDistanceBits = 62
const largeMinWbits = 10
const largeMaxWbits = 30
/* Specification: 4. Encoding of distances */
const numDistanceShortCodes = 16
const maxNpostfix = 3
const maxNdirect = 120
const maxDistanceBits = 24
func distanceAlphabetSize(NPOSTFIX uint, NDIRECT uint, MAXNBITS uint) uint {
return numDistanceShortCodes + NDIRECT + uint(MAXNBITS<<(NPOSTFIX+1))
}
/* numDistanceSymbols == 1128 */
const numDistanceSymbols = 1128
const maxDistance = 0x3FFFFFC
const maxAllowedDistance = 0x7FFFFFFC
/* 7.1. Context modes and context ID lookup for literals */
/* "context IDs for literals are in the range of 0..63" */
const literalContextBits = 6
/* 7.2. Context ID for distances */
const distanceContextBits = 2
/* 9.1. Format of the Stream Header */
/* Number of slack bytes for window size. Don't confuse
with BROTLI_NUM_DISTANCE_SHORT_CODES. */
const windowGap = 16
func maxBackwardLimit(W uint) uint {
return (uint(1) << W) - windowGap
}

2176
vendor/github.com/andybalholm/brotli/context.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

2586
vendor/github.com/andybalholm/brotli/decode.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

122890
vendor/github.com/andybalholm/brotli/dictionary.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

32779
vendor/github.com/andybalholm/brotli/dictionary_hash.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1220
vendor/github.com/andybalholm/brotli/encode.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

22
vendor/github.com/andybalholm/brotli/encoder_dict.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
package brotli
/* Dictionary data (words and transforms) for 1 possible context */
type encoderDictionary struct {
words *dictionary
cutoffTransformsCount uint32
cutoffTransforms uint64
hash_table []uint16
buckets []uint16
dict_words []dictWord
}
func initEncoderDictionary(dict *encoderDictionary) {
dict.words = getDictionary()
dict.hash_table = kStaticDictionaryHash[:]
dict.buckets = kStaticDictionaryBuckets[:]
dict.dict_words = kStaticDictionaryWords[:]
dict.cutoffTransformsCount = kCutoffTransformsCount
dict.cutoffTransforms = kCutoffTransforms
}

592
vendor/github.com/andybalholm/brotli/entropy_encode.go generated vendored Normal file
View File

@ -0,0 +1,592 @@
package brotli
import "math"
/* Copyright 2010 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Entropy encoding (Huffman) utilities. */
/* A node of a Huffman tree. */
type huffmanTree struct {
total_count_ uint32
index_left_ int16
index_right_or_value_ int16
}
func initHuffmanTree(self *huffmanTree, count uint32, left int16, right int16) {
self.total_count_ = count
self.index_left_ = left
self.index_right_or_value_ = right
}
/* Input size optimized Shell sort. */
type huffmanTreeComparator func(huffmanTree, huffmanTree) bool
var sortHuffmanTreeItems_gaps = []uint{132, 57, 23, 10, 4, 1}
func sortHuffmanTreeItems(items []huffmanTree, n uint, comparator huffmanTreeComparator) {
if n < 13 {
/* Insertion sort. */
var i uint
for i = 1; i < n; i++ {
var tmp huffmanTree = items[i]
var k uint = i
var j uint = i - 1
for comparator(tmp, items[j]) {
items[k] = items[j]
k = j
if j == 0 {
break
}
j--
}
items[k] = tmp
}
return
} else {
var g int
if n < 57 {
g = 2
} else {
g = 0
}
for ; g < 6; g++ {
var gap uint = sortHuffmanTreeItems_gaps[g]
var i uint
for i = gap; i < n; i++ {
var j uint = i
var tmp huffmanTree = items[i]
for ; j >= gap && comparator(tmp, items[j-gap]); j -= gap {
items[j] = items[j-gap]
}
items[j] = tmp
}
}
}
}
/* Returns 1 if assignment of depths succeeded, otherwise 0. */
func setDepth(p0 int, pool []huffmanTree, depth []byte, max_depth int) bool {
var stack [16]int
var level int = 0
var p int = p0
assert(max_depth <= 15)
stack[0] = -1
for {
if pool[p].index_left_ >= 0 {
level++
if level > max_depth {
return false
}
stack[level] = int(pool[p].index_right_or_value_)
p = int(pool[p].index_left_)
continue
} else {
depth[pool[p].index_right_or_value_] = byte(level)
}
for level >= 0 && stack[level] == -1 {
level--
}
if level < 0 {
return true
}
p = stack[level]
stack[level] = -1
}
}
/* Sort the root nodes, least popular first. */
func sortHuffmanTree(v0 huffmanTree, v1 huffmanTree) bool {
if v0.total_count_ != v1.total_count_ {
return v0.total_count_ < v1.total_count_
}
return v0.index_right_or_value_ > v1.index_right_or_value_
}
/* This function will create a Huffman tree.
The catch here is that the tree cannot be arbitrarily deep.
Brotli specifies a maximum depth of 15 bits for "code trees"
and 7 bits for "code length code trees."
count_limit is the value that is to be faked as the minimum value
and this minimum value is raised until the tree matches the
maximum length requirement.
This algorithm is not of excellent performance for very long data blocks,
especially when population counts are longer than 2**tree_limit, but
we are not planning to use this with extremely long blocks.
See http://en.wikipedia.org/wiki/Huffman_coding */
func createHuffmanTree(data []uint32, length uint, tree_limit int, tree []huffmanTree, depth []byte) {
var count_limit uint32
var sentinel huffmanTree
initHuffmanTree(&sentinel, math.MaxUint32, -1, -1)
/* For block sizes below 64 kB, we never need to do a second iteration
of this loop. Probably all of our block sizes will be smaller than
that, so this loop is mostly of academic interest. If we actually
would need this, we would be better off with the Katajainen algorithm. */
for count_limit = 1; ; count_limit *= 2 {
var n uint = 0
var i uint
var j uint
var k uint
for i = length; i != 0; {
i--
if data[i] != 0 {
var count uint32 = brotli_max_uint32_t(data[i], count_limit)
initHuffmanTree(&tree[n], count, -1, int16(i))
n++
}
}
if n == 1 {
depth[tree[0].index_right_or_value_] = 1 /* Only one element. */
break
}
sortHuffmanTreeItems(tree, n, huffmanTreeComparator(sortHuffmanTree))
/* The nodes are:
[0, n): the sorted leaf nodes that we start with.
[n]: we add a sentinel here.
[n + 1, 2n): new parent nodes are added here, starting from
(n+1). These are naturally in ascending order.
[2n]: we add a sentinel at the end as well.
There will be (2n+1) elements at the end. */
tree[n] = sentinel
tree[n+1] = sentinel
i = 0 /* Points to the next leaf node. */
j = n + 1 /* Points to the next non-leaf node. */
for k = n - 1; k != 0; k-- {
var left uint
var right uint
if tree[i].total_count_ <= tree[j].total_count_ {
left = i
i++
} else {
left = j
j++
}
if tree[i].total_count_ <= tree[j].total_count_ {
right = i
i++
} else {
right = j
j++
}
{
/* The sentinel node becomes the parent node. */
var j_end uint = 2*n - k
tree[j_end].total_count_ = tree[left].total_count_ + tree[right].total_count_
tree[j_end].index_left_ = int16(left)
tree[j_end].index_right_or_value_ = int16(right)
/* Add back the last sentinel node. */
tree[j_end+1] = sentinel
}
}
if setDepth(int(2*n-1), tree[0:], depth, tree_limit) {
/* We need to pack the Huffman tree in tree_limit bits. If this was not
successful, add fake entities to the lowest values and retry. */
break
}
}
}
func reverse(v []byte, start uint, end uint) {
end--
for start < end {
var tmp byte = v[start]
v[start] = v[end]
v[end] = tmp
start++
end--
}
}
func writeHuffmanTreeRepetitions(previous_value byte, value byte, repetitions uint, tree_size *uint, tree []byte, extra_bits_data []byte) {
assert(repetitions > 0)
if previous_value != value {
tree[*tree_size] = value
extra_bits_data[*tree_size] = 0
(*tree_size)++
repetitions--
}
if repetitions == 7 {
tree[*tree_size] = value
extra_bits_data[*tree_size] = 0
(*tree_size)++
repetitions--
}
if repetitions < 3 {
var i uint
for i = 0; i < repetitions; i++ {
tree[*tree_size] = value
extra_bits_data[*tree_size] = 0
(*tree_size)++
}
} else {
var start uint = *tree_size
repetitions -= 3
for {
tree[*tree_size] = repeatPreviousCodeLength
extra_bits_data[*tree_size] = byte(repetitions & 0x3)
(*tree_size)++
repetitions >>= 2
if repetitions == 0 {
break
}
repetitions--
}
reverse(tree, start, *tree_size)
reverse(extra_bits_data, start, *tree_size)
}
}
func writeHuffmanTreeRepetitionsZeros(repetitions uint, tree_size *uint, tree []byte, extra_bits_data []byte) {
if repetitions == 11 {
tree[*tree_size] = 0
extra_bits_data[*tree_size] = 0
(*tree_size)++
repetitions--
}
if repetitions < 3 {
var i uint
for i = 0; i < repetitions; i++ {
tree[*tree_size] = 0
extra_bits_data[*tree_size] = 0
(*tree_size)++
}
} else {
var start uint = *tree_size
repetitions -= 3
for {
tree[*tree_size] = repeatZeroCodeLength
extra_bits_data[*tree_size] = byte(repetitions & 0x7)
(*tree_size)++
repetitions >>= 3
if repetitions == 0 {
break
}
repetitions--
}
reverse(tree, start, *tree_size)
reverse(extra_bits_data, start, *tree_size)
}
}
/* Change the population counts in a way that the consequent
Huffman tree compression, especially its RLE-part will be more
likely to compress this data more efficiently.
length contains the size of the histogram.
counts contains the population counts.
good_for_rle is a buffer of at least length size */
func optimizeHuffmanCountsForRLE(length uint, counts []uint32, good_for_rle []byte) {
var nonzero_count uint = 0
var stride uint
var limit uint
var sum uint
var streak_limit uint = 1240
var i uint
/* Let's make the Huffman code more compatible with RLE encoding. */
for i = 0; i < length; i++ {
if counts[i] != 0 {
nonzero_count++
}
}
if nonzero_count < 16 {
return
}
for length != 0 && counts[length-1] == 0 {
length--
}
if length == 0 {
return /* All zeros. */
}
/* Now counts[0..length - 1] does not have trailing zeros. */
{
var nonzeros uint = 0
var smallest_nonzero uint32 = 1 << 30
for i = 0; i < length; i++ {
if counts[i] != 0 {
nonzeros++
if smallest_nonzero > counts[i] {
smallest_nonzero = counts[i]
}
}
}
if nonzeros < 5 {
/* Small histogram will model it well. */
return
}
if smallest_nonzero < 4 {
var zeros uint = length - nonzeros
if zeros < 6 {
for i = 1; i < length-1; i++ {
if counts[i-1] != 0 && counts[i] == 0 && counts[i+1] != 0 {
counts[i] = 1
}
}
}
}
if nonzeros < 28 {
return
}
}
/* 2) Let's mark all population counts that already can be encoded
with an RLE code. */
for i := 0; i < int(length); i++ {
good_for_rle[i] = 0
}
{
var symbol uint32 = counts[0]
/* Let's not spoil any of the existing good RLE codes.
Mark any seq of 0's that is longer as 5 as a good_for_rle.
Mark any seq of non-0's that is longer as 7 as a good_for_rle. */
var step uint = 0
for i = 0; i <= length; i++ {
if i == length || counts[i] != symbol {
if (symbol == 0 && step >= 5) || (symbol != 0 && step >= 7) {
var k uint
for k = 0; k < step; k++ {
good_for_rle[i-k-1] = 1
}
}
step = 1
if i != length {
symbol = counts[i]
}
} else {
step++
}
}
}
/* 3) Let's replace those population counts that lead to more RLE codes.
Math here is in 24.8 fixed point representation. */
stride = 0
limit = uint(256*(counts[0]+counts[1]+counts[2])/3 + 420)
sum = 0
for i = 0; i <= length; i++ {
if i == length || good_for_rle[i] != 0 || (i != 0 && good_for_rle[i-1] != 0) || (256*counts[i]-uint32(limit)+uint32(streak_limit)) >= uint32(2*streak_limit) {
if stride >= 4 || (stride >= 3 && sum == 0) {
var k uint
var count uint = (sum + stride/2) / stride
/* The stride must end, collapse what we have, if we have enough (4). */
if count == 0 {
count = 1
}
if sum == 0 {
/* Don't make an all zeros stride to be upgraded to ones. */
count = 0
}
for k = 0; k < stride; k++ {
/* We don't want to change value at counts[i],
that is already belonging to the next stride. Thus - 1. */
counts[i-k-1] = uint32(count)
}
}
stride = 0
sum = 0
if i < length-2 {
/* All interesting strides have a count of at least 4, */
/* at least when non-zeros. */
limit = uint(256*(counts[i]+counts[i+1]+counts[i+2])/3 + 420)
} else if i < length {
limit = uint(256 * counts[i])
} else {
limit = 0
}
}
stride++
if i != length {
sum += uint(counts[i])
if stride >= 4 {
limit = (256*sum + stride/2) / stride
}
if stride == 4 {
limit += 120
}
}
}
}
func decideOverRLEUse(depth []byte, length uint, use_rle_for_non_zero *bool, use_rle_for_zero *bool) {
var total_reps_zero uint = 0
var total_reps_non_zero uint = 0
var count_reps_zero uint = 1
var count_reps_non_zero uint = 1
var i uint
for i = 0; i < length; {
var value byte = depth[i]
var reps uint = 1
var k uint
for k = i + 1; k < length && depth[k] == value; k++ {
reps++
}
if reps >= 3 && value == 0 {
total_reps_zero += reps
count_reps_zero++
}
if reps >= 4 && value != 0 {
total_reps_non_zero += reps
count_reps_non_zero++
}
i += reps
}
*use_rle_for_non_zero = total_reps_non_zero > count_reps_non_zero*2
*use_rle_for_zero = total_reps_zero > count_reps_zero*2
}
/* Write a Huffman tree from bit depths into the bit-stream representation
of a Huffman tree. The generated Huffman tree is to be compressed once
more using a Huffman tree */
func writeHuffmanTree(depth []byte, length uint, tree_size *uint, tree []byte, extra_bits_data []byte) {
var previous_value byte = initialRepeatedCodeLength
var i uint
var use_rle_for_non_zero bool = false
var use_rle_for_zero bool = false
var new_length uint = length
/* Throw away trailing zeros. */
for i = 0; i < length; i++ {
if depth[length-i-1] == 0 {
new_length--
} else {
break
}
}
/* First gather statistics on if it is a good idea to do RLE. */
if length > 50 {
/* Find RLE coding for longer codes.
Shorter codes seem not to benefit from RLE. */
decideOverRLEUse(depth, new_length, &use_rle_for_non_zero, &use_rle_for_zero)
}
/* Actual RLE coding. */
for i = 0; i < new_length; {
var value byte = depth[i]
var reps uint = 1
if (value != 0 && use_rle_for_non_zero) || (value == 0 && use_rle_for_zero) {
var k uint
for k = i + 1; k < new_length && depth[k] == value; k++ {
reps++
}
}
if value == 0 {
writeHuffmanTreeRepetitionsZeros(reps, tree_size, tree, extra_bits_data)
} else {
writeHuffmanTreeRepetitions(previous_value, value, reps, tree_size, tree, extra_bits_data)
previous_value = value
}
i += reps
}
}
var reverseBits_kLut = [16]uint{
0x00,
0x08,
0x04,
0x0C,
0x02,
0x0A,
0x06,
0x0E,
0x01,
0x09,
0x05,
0x0D,
0x03,
0x0B,
0x07,
0x0F,
}
func reverseBits(num_bits uint, bits uint16) uint16 {
var retval uint = reverseBits_kLut[bits&0x0F]
var i uint
for i = 4; i < num_bits; i += 4 {
retval <<= 4
bits = uint16(bits >> 4)
retval |= reverseBits_kLut[bits&0x0F]
}
retval >>= ((0 - num_bits) & 0x03)
return uint16(retval)
}
/* 0..15 are values for bits */
const maxHuffmanBits = 16
/* Get the actual bit values for a tree of bit depths. */
func convertBitDepthsToSymbols(depth []byte, len uint, bits []uint16) {
var bl_count = [maxHuffmanBits]uint16{0}
var next_code [maxHuffmanBits]uint16
var i uint
/* In Brotli, all bit depths are [1..15]
0 bit depth means that the symbol does not exist. */
var code int = 0
for i = 0; i < len; i++ {
bl_count[depth[i]]++
}
bl_count[0] = 0
next_code[0] = 0
for i = 1; i < maxHuffmanBits; i++ {
code = (code + int(bl_count[i-1])) << 1
next_code[i] = uint16(code)
}
for i = 0; i < len; i++ {
if depth[i] != 0 {
bits[i] = reverseBits(uint(depth[i]), next_code[depth[i]])
next_code[depth[i]]++
}
}
}

File diff suppressed because it is too large Load Diff

290
vendor/github.com/andybalholm/brotli/fast_log.go generated vendored Normal file
View File

@ -0,0 +1,290 @@
package brotli
import (
"math"
"math/bits"
)
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Utilities for fast computation of logarithms. */
func log2FloorNonZero(n uint) uint32 {
return uint32(bits.Len(n)) - 1
}
/* A lookup table for small values of log2(int) to be used in entropy
computation.
", ".join(["%.16ff" % x for x in [0.0]+[log2(x) for x in range(1, 256)]]) */
var kLog2Table = []float32{
0.0000000000000000,
0.0000000000000000,
1.0000000000000000,
1.5849625007211563,
2.0000000000000000,
2.3219280948873622,
2.5849625007211561,
2.8073549220576042,
3.0000000000000000,
3.1699250014423126,
3.3219280948873626,
3.4594316186372978,
3.5849625007211565,
3.7004397181410922,
3.8073549220576037,
3.9068905956085187,
4.0000000000000000,
4.0874628412503400,
4.1699250014423122,
4.2479275134435852,
4.3219280948873626,
4.3923174227787607,
4.4594316186372973,
4.5235619560570131,
4.5849625007211570,
4.6438561897747244,
4.7004397181410926,
4.7548875021634691,
4.8073549220576037,
4.8579809951275728,
4.9068905956085187,
4.9541963103868758,
5.0000000000000000,
5.0443941193584534,
5.0874628412503400,
5.1292830169449664,
5.1699250014423122,
5.2094533656289501,
5.2479275134435852,
5.2854022188622487,
5.3219280948873626,
5.3575520046180838,
5.3923174227787607,
5.4262647547020979,
5.4594316186372973,
5.4918530963296748,
5.5235619560570131,
5.5545888516776376,
5.5849625007211570,
5.6147098441152083,
5.6438561897747244,
5.6724253419714961,
5.7004397181410926,
5.7279204545631996,
5.7548875021634691,
5.7813597135246599,
5.8073549220576046,
5.8328900141647422,
5.8579809951275719,
5.8826430493618416,
5.9068905956085187,
5.9307373375628867,
5.9541963103868758,
5.9772799234999168,
6.0000000000000000,
6.0223678130284544,
6.0443941193584534,
6.0660891904577721,
6.0874628412503400,
6.1085244567781700,
6.1292830169449672,
6.1497471195046822,
6.1699250014423122,
6.1898245588800176,
6.2094533656289510,
6.2288186904958804,
6.2479275134435861,
6.2667865406949019,
6.2854022188622487,
6.3037807481771031,
6.3219280948873617,
6.3398500028846252,
6.3575520046180847,
6.3750394313469254,
6.3923174227787598,
6.4093909361377026,
6.4262647547020979,
6.4429434958487288,
6.4594316186372982,
6.4757334309663976,
6.4918530963296748,
6.5077946401986964,
6.5235619560570131,
6.5391588111080319,
6.5545888516776376,
6.5698556083309478,
6.5849625007211561,
6.5999128421871278,
6.6147098441152092,
6.6293566200796095,
6.6438561897747253,
6.6582114827517955,
6.6724253419714952,
6.6865005271832185,
6.7004397181410917,
6.7142455176661224,
6.7279204545631988,
6.7414669864011465,
6.7548875021634691,
6.7681843247769260,
6.7813597135246599,
6.7944158663501062,
6.8073549220576037,
6.8201789624151887,
6.8328900141647422,
6.8454900509443757,
6.8579809951275719,
6.8703647195834048,
6.8826430493618416,
6.8948177633079437,
6.9068905956085187,
6.9188632372745955,
6.9307373375628867,
6.9425145053392399,
6.9541963103868758,
6.9657842846620879,
6.9772799234999168,
6.9886846867721664,
7.0000000000000000,
7.0112272554232540,
7.0223678130284544,
7.0334230015374501,
7.0443941193584534,
7.0552824355011898,
7.0660891904577721,
7.0768155970508317,
7.0874628412503400,
7.0980320829605272,
7.1085244567781700,
7.1189410727235076,
7.1292830169449664,
7.1395513523987937,
7.1497471195046822,
7.1598713367783891,
7.1699250014423130,
7.1799090900149345,
7.1898245588800176,
7.1996723448363644,
7.2094533656289492,
7.2191685204621621,
7.2288186904958804,
7.2384047393250794,
7.2479275134435861,
7.2573878426926521,
7.2667865406949019,
7.2761244052742384,
7.2854022188622487,
7.2946207488916270,
7.3037807481771031,
7.3128829552843557,
7.3219280948873617,
7.3309168781146177,
7.3398500028846243,
7.3487281542310781,
7.3575520046180847,
7.3663222142458151,
7.3750394313469254,
7.3837042924740528,
7.3923174227787607,
7.4008794362821844,
7.4093909361377026,
7.4178525148858991,
7.4262647547020979,
7.4346282276367255,
7.4429434958487288,
7.4512111118323299,
7.4594316186372973,
7.4676055500829976,
7.4757334309663976,
7.4838157772642564,
7.4918530963296748,
7.4998458870832057,
7.5077946401986964,
7.5156998382840436,
7.5235619560570131,
7.5313814605163119,
7.5391588111080319,
7.5468944598876373,
7.5545888516776376,
7.5622424242210728,
7.5698556083309478,
7.5774288280357487,
7.5849625007211561,
7.5924570372680806,
7.5999128421871278,
7.6073303137496113,
7.6147098441152075,
7.6220518194563764,
7.6293566200796095,
7.6366246205436488,
7.6438561897747244,
7.6510516911789290,
7.6582114827517955,
7.6653359171851765,
7.6724253419714952,
7.6794800995054464,
7.6865005271832185,
7.6934869574993252,
7.7004397181410926,
7.7073591320808825,
7.7142455176661224,
7.7210991887071856,
7.7279204545631996,
7.7347096202258392,
7.7414669864011465,
7.7481928495894596,
7.7548875021634691,
7.7615512324444795,
7.7681843247769260,
7.7747870596011737,
7.7813597135246608,
7.7879025593914317,
7.7944158663501062,
7.8008998999203047,
7.8073549220576037,
7.8137811912170374,
7.8201789624151887,
7.8265484872909159,
7.8328900141647422,
7.8392037880969445,
7.8454900509443757,
7.8517490414160571,
7.8579809951275719,
7.8641861446542798,
7.8703647195834048,
7.8765169465650002,
7.8826430493618425,
7.8887432488982601,
7.8948177633079446,
7.9008668079807496,
7.9068905956085187,
7.9128893362299619,
7.9188632372745955,
7.9248125036057813,
7.9307373375628867,
7.9366379390025719,
7.9425145053392399,
7.9483672315846778,
7.9541963103868758,
7.9600019320680806,
7.9657842846620870,
7.9715435539507720,
7.9772799234999168,
7.9829935746943104,
7.9886846867721664,
7.9943534368588578,
}
/* Faster logarithm for small integers, with the property of log2(0) == 0. */
func fastLog2(v uint) float64 {
if v < uint(len(kLog2Table)) {
return float64(kLog2Table[v])
}
return math.Log2(float64(v))
}

View File

@ -0,0 +1,45 @@
package brotli
import (
"encoding/binary"
"math/bits"
"runtime"
)
/* Copyright 2010 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Function to find maximal matching prefixes of strings. */
func findMatchLengthWithLimit(s1 []byte, s2 []byte, limit uint) uint {
var matched uint = 0
_, _ = s1[limit-1], s2[limit-1] // bounds check
switch runtime.GOARCH {
case "amd64":
// Compare 8 bytes at at time.
for matched+8 <= limit {
w1 := binary.LittleEndian.Uint64(s1[matched:])
w2 := binary.LittleEndian.Uint64(s2[matched:])
if w1 != w2 {
return matched + uint(bits.TrailingZeros64(w1^w2)>>3)
}
matched += 8
}
case "386":
// Compare 4 bytes at at time.
for matched+4 <= limit {
w1 := binary.LittleEndian.Uint32(s1[matched:])
w2 := binary.LittleEndian.Uint32(s2[matched:])
if w1 != w2 {
return matched + uint(bits.TrailingZeros32(w1^w2)>>3)
}
matched += 4
}
}
for matched < limit && s1[matched] == s2[matched] {
matched++
}
return matched
}

5
vendor/github.com/andybalholm/brotli/go.mod generated vendored Normal file
View File

@ -0,0 +1,5 @@
module github.com/andybalholm/brotli
go 1.12
retract v1.0.1 // occasional panics and data corruption

0
vendor/github.com/andybalholm/brotli/go.sum generated vendored Normal file
View File

287
vendor/github.com/andybalholm/brotli/h10.go generated vendored Normal file
View File

@ -0,0 +1,287 @@
package brotli
import "encoding/binary"
/* Copyright 2016 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
func (*h10) HashTypeLength() uint {
return 4
}
func (*h10) StoreLookahead() uint {
return 128
}
func hashBytesH10(data []byte) uint32 {
var h uint32 = binary.LittleEndian.Uint32(data) * kHashMul32
/* The higher bits contain more mixture from the multiplication,
so we take our results from there. */
return h >> (32 - 17)
}
/* A (forgetful) hash table where each hash bucket contains a binary tree of
sequences whose first 4 bytes share the same hash code.
Each sequence is 128 long and is identified by its starting
position in the input data. The binary tree is sorted by the lexicographic
order of the sequences, and it is also a max-heap with respect to the
starting positions. */
type h10 struct {
hasherCommon
window_mask_ uint
buckets_ [1 << 17]uint32
invalid_pos_ uint32
forest []uint32
}
func (h *h10) Initialize(params *encoderParams) {
h.window_mask_ = (1 << params.lgwin) - 1
h.invalid_pos_ = uint32(0 - h.window_mask_)
var num_nodes uint = uint(1) << params.lgwin
h.forest = make([]uint32, 2*num_nodes)
}
func (h *h10) Prepare(one_shot bool, input_size uint, data []byte) {
var invalid_pos uint32 = h.invalid_pos_
var i uint32
for i = 0; i < 1<<17; i++ {
h.buckets_[i] = invalid_pos
}
}
func leftChildIndexH10(self *h10, pos uint) uint {
return 2 * (pos & self.window_mask_)
}
func rightChildIndexH10(self *h10, pos uint) uint {
return 2*(pos&self.window_mask_) + 1
}
/* Stores the hash of the next 4 bytes and in a single tree-traversal, the
hash bucket's binary tree is searched for matches and is re-rooted at the
current position.
If less than 128 data is available, the hash bucket of the
current position is searched for matches, but the state of the hash table
is not changed, since we can not know the final sorting order of the
current (incomplete) sequence.
This function must be called with increasing cur_ix positions. */
func storeAndFindMatchesH10(self *h10, data []byte, cur_ix uint, ring_buffer_mask uint, max_length uint, max_backward uint, best_len *uint, matches []backwardMatch) []backwardMatch {
var cur_ix_masked uint = cur_ix & ring_buffer_mask
var max_comp_len uint = brotli_min_size_t(max_length, 128)
var should_reroot_tree bool = (max_length >= 128)
var key uint32 = hashBytesH10(data[cur_ix_masked:])
var forest []uint32 = self.forest
var prev_ix uint = uint(self.buckets_[key])
var node_left uint = leftChildIndexH10(self, cur_ix)
var node_right uint = rightChildIndexH10(self, cur_ix)
var best_len_left uint = 0
var best_len_right uint = 0
var depth_remaining uint
/* The forest index of the rightmost node of the left subtree of the new
root, updated as we traverse and re-root the tree of the hash bucket. */
/* The forest index of the leftmost node of the right subtree of the new
root, updated as we traverse and re-root the tree of the hash bucket. */
/* The match length of the rightmost node of the left subtree of the new
root, updated as we traverse and re-root the tree of the hash bucket. */
/* The match length of the leftmost node of the right subtree of the new
root, updated as we traverse and re-root the tree of the hash bucket. */
if should_reroot_tree {
self.buckets_[key] = uint32(cur_ix)
}
for depth_remaining = 64; ; depth_remaining-- {
var backward uint = cur_ix - prev_ix
var prev_ix_masked uint = prev_ix & ring_buffer_mask
if backward == 0 || backward > max_backward || depth_remaining == 0 {
if should_reroot_tree {
forest[node_left] = self.invalid_pos_
forest[node_right] = self.invalid_pos_
}
break
}
{
var cur_len uint = brotli_min_size_t(best_len_left, best_len_right)
var len uint
assert(cur_len <= 128)
len = cur_len + findMatchLengthWithLimit(data[cur_ix_masked+cur_len:], data[prev_ix_masked+cur_len:], max_length-cur_len)
if matches != nil && len > *best_len {
*best_len = uint(len)
initBackwardMatch(&matches[0], backward, uint(len))
matches = matches[1:]
}
if len >= max_comp_len {
if should_reroot_tree {
forest[node_left] = forest[leftChildIndexH10(self, prev_ix)]
forest[node_right] = forest[rightChildIndexH10(self, prev_ix)]
}
break
}
if data[cur_ix_masked+len] > data[prev_ix_masked+len] {
best_len_left = uint(len)
if should_reroot_tree {
forest[node_left] = uint32(prev_ix)
}
node_left = rightChildIndexH10(self, prev_ix)
prev_ix = uint(forest[node_left])
} else {
best_len_right = uint(len)
if should_reroot_tree {
forest[node_right] = uint32(prev_ix)
}
node_right = leftChildIndexH10(self, prev_ix)
prev_ix = uint(forest[node_right])
}
}
}
return matches
}
/* Finds all backward matches of &data[cur_ix & ring_buffer_mask] up to the
length of max_length and stores the position cur_ix in the hash table.
Sets *num_matches to the number of matches found, and stores the found
matches in matches[0] to matches[*num_matches - 1]. The matches will be
sorted by strictly increasing length and (non-strictly) increasing
distance. */
func findAllMatchesH10(handle *h10, dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, cur_ix uint, max_length uint, max_backward uint, gap uint, params *encoderParams, matches []backwardMatch) uint {
var orig_matches []backwardMatch = matches
var cur_ix_masked uint = cur_ix & ring_buffer_mask
var best_len uint = 1
var short_match_max_backward uint
if params.quality != hqZopflificationQuality {
short_match_max_backward = 16
} else {
short_match_max_backward = 64
}
var stop uint = cur_ix - short_match_max_backward
var dict_matches [maxStaticDictionaryMatchLen + 1]uint32
var i uint
if cur_ix < short_match_max_backward {
stop = 0
}
for i = cur_ix - 1; i > stop && best_len <= 2; i-- {
var prev_ix uint = i
var backward uint = cur_ix - prev_ix
if backward > max_backward {
break
}
prev_ix &= ring_buffer_mask
if data[cur_ix_masked] != data[prev_ix] || data[cur_ix_masked+1] != data[prev_ix+1] {
continue
}
{
var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
if len > best_len {
best_len = uint(len)
initBackwardMatch(&matches[0], backward, uint(len))
matches = matches[1:]
}
}
}
if best_len < max_length {
matches = storeAndFindMatchesH10(handle, data, cur_ix, ring_buffer_mask, max_length, max_backward, &best_len, matches)
}
for i = 0; i <= maxStaticDictionaryMatchLen; i++ {
dict_matches[i] = kInvalidMatch
}
{
var minlen uint = brotli_max_size_t(4, best_len+1)
if findAllStaticDictionaryMatches(dictionary, data[cur_ix_masked:], minlen, max_length, dict_matches[0:]) {
var maxlen uint = brotli_min_size_t(maxStaticDictionaryMatchLen, max_length)
var l uint
for l = minlen; l <= maxlen; l++ {
var dict_id uint32 = dict_matches[l]
if dict_id < kInvalidMatch {
var distance uint = max_backward + gap + uint(dict_id>>5) + 1
if distance <= params.dist.max_distance {
initDictionaryBackwardMatch(&matches[0], distance, l, uint(dict_id&31))
matches = matches[1:]
}
}
}
}
}
return uint(-cap(matches) + cap(orig_matches))
}
/* Stores the hash of the next 4 bytes and re-roots the binary tree at the
current sequence, without returning any matches.
REQUIRES: ix + 128 <= end-of-current-block */
func (h *h10) Store(data []byte, mask uint, ix uint) {
var max_backward uint = h.window_mask_ - windowGap + 1
/* Maximum distance is window size - 16, see section 9.1. of the spec. */
storeAndFindMatchesH10(h, data, ix, mask, 128, max_backward, nil, nil)
}
func (h *h10) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) {
var i uint = ix_start
var j uint = ix_start
if ix_start+63 <= ix_end {
i = ix_end - 63
}
if ix_start+512 <= i {
for ; j < i; j += 8 {
h.Store(data, mask, j)
}
}
for ; i < ix_end; i++ {
h.Store(data, mask, i)
}
}
func (h *h10) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
if num_bytes >= h.HashTypeLength()-1 && position >= 128 {
var i_start uint = position - 128 + 1
var i_end uint = brotli_min_size_t(position, i_start+num_bytes)
/* Store the last `128 - 1` positions in the hasher.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
var i uint
for i = i_start; i < i_end; i++ {
/* Maximum distance is window size - 16, see section 9.1. of the spec.
Furthermore, we have to make sure that we don't look further back
from the start of the next block than the window size, otherwise we
could access already overwritten areas of the ring-buffer. */
var max_backward uint = h.window_mask_ - brotli_max_size_t(windowGap-1, position-i)
/* We know that i + 128 <= position + num_bytes, i.e. the
end of the current block and that we have at least
128 tail in the ring-buffer. */
storeAndFindMatchesH10(h, ringbuffer, i, ringbuffer_mask, 128, max_backward, nil, nil)
}
}
}
/* MAX_NUM_MATCHES == 64 + MAX_TREE_SEARCH_DEPTH */
const maxNumMatchesH10 = 128
func (*h10) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
panic("unimplemented")
}
func (*h10) PrepareDistanceCache(distance_cache []int) {
panic("unimplemented")
}

214
vendor/github.com/andybalholm/brotli/h5.go generated vendored Normal file
View File

@ -0,0 +1,214 @@
package brotli
import "encoding/binary"
/* Copyright 2010 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* A (forgetful) hash table to the data seen by the compressor, to
help create backward references to previous data.
This is a hash map of fixed size (bucket_size_) to a ring buffer of
fixed size (block_size_). The ring buffer contains the last block_size_
index positions of the given hash key in the compressed data. */
func (*h5) HashTypeLength() uint {
return 4
}
func (*h5) StoreLookahead() uint {
return 4
}
/* HashBytes is the function that chooses the bucket to place the address in. */
func hashBytesH5(data []byte, shift int) uint32 {
var h uint32 = binary.LittleEndian.Uint32(data) * kHashMul32
/* The higher bits contain more mixture from the multiplication,
so we take our results from there. */
return uint32(h >> uint(shift))
}
type h5 struct {
hasherCommon
bucket_size_ uint
block_size_ uint
hash_shift_ int
block_mask_ uint32
num []uint16
buckets []uint32
}
func (h *h5) Initialize(params *encoderParams) {
h.hash_shift_ = 32 - h.params.bucket_bits
h.bucket_size_ = uint(1) << uint(h.params.bucket_bits)
h.block_size_ = uint(1) << uint(h.params.block_bits)
h.block_mask_ = uint32(h.block_size_ - 1)
h.num = make([]uint16, h.bucket_size_)
h.buckets = make([]uint32, h.block_size_*h.bucket_size_)
}
func (h *h5) Prepare(one_shot bool, input_size uint, data []byte) {
var num []uint16 = h.num
var partial_prepare_threshold uint = h.bucket_size_ >> 6
/* Partial preparation is 100 times slower (per socket). */
if one_shot && input_size <= partial_prepare_threshold {
var i uint
for i = 0; i < input_size; i++ {
var key uint32 = hashBytesH5(data[i:], h.hash_shift_)
num[key] = 0
}
} else {
for i := 0; i < int(h.bucket_size_); i++ {
num[i] = 0
}
}
}
/* Look at 4 bytes at &data[ix & mask].
Compute a hash from these, and store the value of ix at that position. */
func (h *h5) Store(data []byte, mask uint, ix uint) {
var num []uint16 = h.num
var key uint32 = hashBytesH5(data[ix&mask:], h.hash_shift_)
var minor_ix uint = uint(num[key]) & uint(h.block_mask_)
var offset uint = minor_ix + uint(key<<uint(h.params.block_bits))
h.buckets[offset] = uint32(ix)
num[key]++
}
func (h *h5) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) {
var i uint
for i = ix_start; i < ix_end; i++ {
h.Store(data, mask, i)
}
}
func (h *h5) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
if num_bytes >= h.HashTypeLength()-1 && position >= 3 {
/* Prepare the hashes for three last bytes of the last write.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
h.Store(ringbuffer, ringbuffer_mask, position-3)
h.Store(ringbuffer, ringbuffer_mask, position-2)
h.Store(ringbuffer, ringbuffer_mask, position-1)
}
}
func (h *h5) PrepareDistanceCache(distance_cache []int) {
prepareDistanceCache(distance_cache, h.params.num_last_distances_to_check)
}
/* Find a longest backward match of &data[cur_ix] up to the length of
max_length and stores the position cur_ix in the hash table.
REQUIRES: PrepareDistanceCacheH5 must be invoked for current distance cache
values; if this method is invoked repeatedly with the same distance
cache values, it is enough to invoke PrepareDistanceCacheH5 once.
Does not look for matches longer than max_length.
Does not look for matches further away than max_backward.
Writes the best match into |out|.
|out|->score is updated only if a better match is found. */
func (h *h5) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
var num []uint16 = h.num
var buckets []uint32 = h.buckets
var cur_ix_masked uint = cur_ix & ring_buffer_mask
var min_score uint = out.score
var best_score uint = out.score
var best_len uint = out.len
var i uint
var bucket []uint32
/* Don't accept a short copy from far away. */
out.len = 0
out.len_code_delta = 0
/* Try last distance first. */
for i = 0; i < uint(h.params.num_last_distances_to_check); i++ {
var backward uint = uint(distance_cache[i])
var prev_ix uint = uint(cur_ix - backward)
if prev_ix >= cur_ix {
continue
}
if backward > max_backward {
continue
}
prev_ix &= ring_buffer_mask
if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] {
continue
}
{
var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
if len >= 3 || (len == 2 && i < 2) {
/* Comparing for >= 2 does not change the semantics, but just saves for
a few unnecessary binary logarithms in backward reference score,
since we are not interested in such short matches. */
var score uint = backwardReferenceScoreUsingLastDistance(uint(len))
if best_score < score {
if i != 0 {
score -= backwardReferencePenaltyUsingLastDistance(i)
}
if best_score < score {
best_score = score
best_len = uint(len)
out.len = best_len
out.distance = backward
out.score = best_score
}
}
}
}
}
{
var key uint32 = hashBytesH5(data[cur_ix_masked:], h.hash_shift_)
bucket = buckets[key<<uint(h.params.block_bits):]
var down uint
if uint(num[key]) > h.block_size_ {
down = uint(num[key]) - h.block_size_
} else {
down = 0
}
for i = uint(num[key]); i > down; {
var prev_ix uint
i--
prev_ix = uint(bucket[uint32(i)&h.block_mask_])
var backward uint = cur_ix - prev_ix
if backward > max_backward {
break
}
prev_ix &= ring_buffer_mask
if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] {
continue
}
{
var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
if len >= 4 {
/* Comparing for >= 3 does not change the semantics, but just saves
for a few unnecessary binary logarithms in backward reference
score, since we are not interested in such short matches. */
var score uint = backwardReferenceScore(uint(len), backward)
if best_score < score {
best_score = score
best_len = uint(len)
out.len = best_len
out.distance = backward
out.score = best_score
}
}
}
}
bucket[uint32(num[key])&h.block_mask_] = uint32(cur_ix)
num[key]++
}
if min_score == out.score {
searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, false)
}
}

216
vendor/github.com/andybalholm/brotli/h6.go generated vendored Normal file
View File

@ -0,0 +1,216 @@
package brotli
import "encoding/binary"
/* Copyright 2010 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* A (forgetful) hash table to the data seen by the compressor, to
help create backward references to previous data.
This is a hash map of fixed size (bucket_size_) to a ring buffer of
fixed size (block_size_). The ring buffer contains the last block_size_
index positions of the given hash key in the compressed data. */
func (*h6) HashTypeLength() uint {
return 8
}
func (*h6) StoreLookahead() uint {
return 8
}
/* HashBytes is the function that chooses the bucket to place the address in. */
func hashBytesH6(data []byte, mask uint64, shift int) uint32 {
var h uint64 = (binary.LittleEndian.Uint64(data) & mask) * kHashMul64Long
/* The higher bits contain more mixture from the multiplication,
so we take our results from there. */
return uint32(h >> uint(shift))
}
type h6 struct {
hasherCommon
bucket_size_ uint
block_size_ uint
hash_shift_ int
hash_mask_ uint64
block_mask_ uint32
num []uint16
buckets []uint32
}
func (h *h6) Initialize(params *encoderParams) {
h.hash_shift_ = 64 - h.params.bucket_bits
h.hash_mask_ = (^(uint64(0))) >> uint(64-8*h.params.hash_len)
h.bucket_size_ = uint(1) << uint(h.params.bucket_bits)
h.block_size_ = uint(1) << uint(h.params.block_bits)
h.block_mask_ = uint32(h.block_size_ - 1)
h.num = make([]uint16, h.bucket_size_)
h.buckets = make([]uint32, h.block_size_*h.bucket_size_)
}
func (h *h6) Prepare(one_shot bool, input_size uint, data []byte) {
var num []uint16 = h.num
var partial_prepare_threshold uint = h.bucket_size_ >> 6
/* Partial preparation is 100 times slower (per socket). */
if one_shot && input_size <= partial_prepare_threshold {
var i uint
for i = 0; i < input_size; i++ {
var key uint32 = hashBytesH6(data[i:], h.hash_mask_, h.hash_shift_)
num[key] = 0
}
} else {
for i := 0; i < int(h.bucket_size_); i++ {
num[i] = 0
}
}
}
/* Look at 4 bytes at &data[ix & mask].
Compute a hash from these, and store the value of ix at that position. */
func (h *h6) Store(data []byte, mask uint, ix uint) {
var num []uint16 = h.num
var key uint32 = hashBytesH6(data[ix&mask:], h.hash_mask_, h.hash_shift_)
var minor_ix uint = uint(num[key]) & uint(h.block_mask_)
var offset uint = minor_ix + uint(key<<uint(h.params.block_bits))
h.buckets[offset] = uint32(ix)
num[key]++
}
func (h *h6) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) {
var i uint
for i = ix_start; i < ix_end; i++ {
h.Store(data, mask, i)
}
}
func (h *h6) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
if num_bytes >= h.HashTypeLength()-1 && position >= 3 {
/* Prepare the hashes for three last bytes of the last write.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
h.Store(ringbuffer, ringbuffer_mask, position-3)
h.Store(ringbuffer, ringbuffer_mask, position-2)
h.Store(ringbuffer, ringbuffer_mask, position-1)
}
}
func (h *h6) PrepareDistanceCache(distance_cache []int) {
prepareDistanceCache(distance_cache, h.params.num_last_distances_to_check)
}
/* Find a longest backward match of &data[cur_ix] up to the length of
max_length and stores the position cur_ix in the hash table.
REQUIRES: PrepareDistanceCacheH6 must be invoked for current distance cache
values; if this method is invoked repeatedly with the same distance
cache values, it is enough to invoke PrepareDistanceCacheH6 once.
Does not look for matches longer than max_length.
Does not look for matches further away than max_backward.
Writes the best match into |out|.
|out|->score is updated only if a better match is found. */
func (h *h6) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
var num []uint16 = h.num
var buckets []uint32 = h.buckets
var cur_ix_masked uint = cur_ix & ring_buffer_mask
var min_score uint = out.score
var best_score uint = out.score
var best_len uint = out.len
var i uint
var bucket []uint32
/* Don't accept a short copy from far away. */
out.len = 0
out.len_code_delta = 0
/* Try last distance first. */
for i = 0; i < uint(h.params.num_last_distances_to_check); i++ {
var backward uint = uint(distance_cache[i])
var prev_ix uint = uint(cur_ix - backward)
if prev_ix >= cur_ix {
continue
}
if backward > max_backward {
continue
}
prev_ix &= ring_buffer_mask
if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] {
continue
}
{
var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
if len >= 3 || (len == 2 && i < 2) {
/* Comparing for >= 2 does not change the semantics, but just saves for
a few unnecessary binary logarithms in backward reference score,
since we are not interested in such short matches. */
var score uint = backwardReferenceScoreUsingLastDistance(uint(len))
if best_score < score {
if i != 0 {
score -= backwardReferencePenaltyUsingLastDistance(i)
}
if best_score < score {
best_score = score
best_len = uint(len)
out.len = best_len
out.distance = backward
out.score = best_score
}
}
}
}
}
{
var key uint32 = hashBytesH6(data[cur_ix_masked:], h.hash_mask_, h.hash_shift_)
bucket = buckets[key<<uint(h.params.block_bits):]
var down uint
if uint(num[key]) > h.block_size_ {
down = uint(num[key]) - h.block_size_
} else {
down = 0
}
for i = uint(num[key]); i > down; {
var prev_ix uint
i--
prev_ix = uint(bucket[uint32(i)&h.block_mask_])
var backward uint = cur_ix - prev_ix
if backward > max_backward {
break
}
prev_ix &= ring_buffer_mask
if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] {
continue
}
{
var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
if len >= 4 {
/* Comparing for >= 3 does not change the semantics, but just saves
for a few unnecessary binary logarithms in backward reference
score, since we are not interested in such short matches. */
var score uint = backwardReferenceScore(uint(len), backward)
if best_score < score {
best_score = score
best_len = uint(len)
out.len = best_len
out.distance = backward
out.score = best_score
}
}
}
}
bucket[uint32(num[key])&h.block_mask_] = uint32(cur_ix)
num[key]++
}
if min_score == out.score {
searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, false)
}
}

342
vendor/github.com/andybalholm/brotli/hash.go generated vendored Normal file
View File

@ -0,0 +1,342 @@
package brotli
import (
"encoding/binary"
"fmt"
)
type hasherCommon struct {
params hasherParams
is_prepared_ bool
dict_num_lookups uint
dict_num_matches uint
}
func (h *hasherCommon) Common() *hasherCommon {
return h
}
type hasherHandle interface {
Common() *hasherCommon
Initialize(params *encoderParams)
Prepare(one_shot bool, input_size uint, data []byte)
StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint)
HashTypeLength() uint
StoreLookahead() uint
PrepareDistanceCache(distance_cache []int)
FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult)
StoreRange(data []byte, mask uint, ix_start uint, ix_end uint)
Store(data []byte, mask uint, ix uint)
}
const kCutoffTransformsCount uint32 = 10
/* 0, 12, 27, 23, 42, 63, 56, 48, 59, 64 */
/* 0+0, 4+8, 8+19, 12+11, 16+26, 20+43, 24+32, 28+20, 32+27, 36+28 */
const kCutoffTransforms uint64 = 0x071B520ADA2D3200
type hasherSearchResult struct {
len uint
distance uint
score uint
len_code_delta int
}
/* kHashMul32 multiplier has these properties:
* The multiplier must be odd. Otherwise we may lose the highest bit.
* No long streaks of ones or zeros.
* There is no effort to ensure that it is a prime, the oddity is enough
for this use.
* The number has been tuned heuristically against compression benchmarks. */
const kHashMul32 uint32 = 0x1E35A7BD
const kHashMul64 uint64 = 0x1E35A7BD1E35A7BD
const kHashMul64Long uint64 = 0x1FE35A7BD3579BD3
func hash14(data []byte) uint32 {
var h uint32 = binary.LittleEndian.Uint32(data) * kHashMul32
/* The higher bits contain more mixture from the multiplication,
so we take our results from there. */
return h >> (32 - 14)
}
func prepareDistanceCache(distance_cache []int, num_distances int) {
if num_distances > 4 {
var last_distance int = distance_cache[0]
distance_cache[4] = last_distance - 1
distance_cache[5] = last_distance + 1
distance_cache[6] = last_distance - 2
distance_cache[7] = last_distance + 2
distance_cache[8] = last_distance - 3
distance_cache[9] = last_distance + 3
if num_distances > 10 {
var next_last_distance int = distance_cache[1]
distance_cache[10] = next_last_distance - 1
distance_cache[11] = next_last_distance + 1
distance_cache[12] = next_last_distance - 2
distance_cache[13] = next_last_distance + 2
distance_cache[14] = next_last_distance - 3
distance_cache[15] = next_last_distance + 3
}
}
}
const literalByteScore = 135
const distanceBitPenalty = 30
/* Score must be positive after applying maximal penalty. */
const scoreBase = (distanceBitPenalty * 8 * 8)
/* Usually, we always choose the longest backward reference. This function
allows for the exception of that rule.
If we choose a backward reference that is further away, it will
usually be coded with more bits. We approximate this by assuming
log2(distance). If the distance can be expressed in terms of the
last four distances, we use some heuristic constants to estimate
the bits cost. For the first up to four literals we use the bit
cost of the literals from the literal cost model, after that we
use the average bit cost of the cost model.
This function is used to sometimes discard a longer backward reference
when it is not much longer and the bit cost for encoding it is more
than the saved literals.
backward_reference_offset MUST be positive. */
func backwardReferenceScore(copy_length uint, backward_reference_offset uint) uint {
return scoreBase + literalByteScore*uint(copy_length) - distanceBitPenalty*uint(log2FloorNonZero(backward_reference_offset))
}
func backwardReferenceScoreUsingLastDistance(copy_length uint) uint {
return literalByteScore*uint(copy_length) + scoreBase + 15
}
func backwardReferencePenaltyUsingLastDistance(distance_short_code uint) uint {
return uint(39) + ((0x1CA10 >> (distance_short_code & 0xE)) & 0xE)
}
func testStaticDictionaryItem(dictionary *encoderDictionary, item uint, data []byte, max_length uint, max_backward uint, max_distance uint, out *hasherSearchResult) bool {
var len uint
var word_idx uint
var offset uint
var matchlen uint
var backward uint
var score uint
len = item & 0x1F
word_idx = item >> 5
offset = uint(dictionary.words.offsets_by_length[len]) + len*word_idx
if len > max_length {
return false
}
matchlen = findMatchLengthWithLimit(data, dictionary.words.data[offset:], uint(len))
if matchlen+uint(dictionary.cutoffTransformsCount) <= len || matchlen == 0 {
return false
}
{
var cut uint = len - matchlen
var transform_id uint = (cut << 2) + uint((dictionary.cutoffTransforms>>(cut*6))&0x3F)
backward = max_backward + 1 + word_idx + (transform_id << dictionary.words.size_bits_by_length[len])
}
if backward > max_distance {
return false
}
score = backwardReferenceScore(matchlen, backward)
if score < out.score {
return false
}
out.len = matchlen
out.len_code_delta = int(len) - int(matchlen)
out.distance = backward
out.score = score
return true
}
func searchInStaticDictionary(dictionary *encoderDictionary, handle hasherHandle, data []byte, max_length uint, max_backward uint, max_distance uint, out *hasherSearchResult, shallow bool) {
var key uint
var i uint
var self *hasherCommon = handle.Common()
if self.dict_num_matches < self.dict_num_lookups>>7 {
return
}
key = uint(hash14(data) << 1)
for i = 0; ; (func() { i++; key++ })() {
var tmp uint
if shallow {
tmp = 1
} else {
tmp = 2
}
if i >= tmp {
break
}
var item uint = uint(dictionary.hash_table[key])
self.dict_num_lookups++
if item != 0 {
var item_matches bool = testStaticDictionaryItem(dictionary, item, data, max_length, max_backward, max_distance, out)
if item_matches {
self.dict_num_matches++
}
}
}
}
type backwardMatch struct {
distance uint32
length_and_code uint32
}
func initBackwardMatch(self *backwardMatch, dist uint, len uint) {
self.distance = uint32(dist)
self.length_and_code = uint32(len << 5)
}
func initDictionaryBackwardMatch(self *backwardMatch, dist uint, len uint, len_code uint) {
self.distance = uint32(dist)
var tmp uint
if len == len_code {
tmp = 0
} else {
tmp = len_code
}
self.length_and_code = uint32(len<<5 | tmp)
}
func backwardMatchLength(self *backwardMatch) uint {
return uint(self.length_and_code >> 5)
}
func backwardMatchLengthCode(self *backwardMatch) uint {
var code uint = uint(self.length_and_code) & 31
if code != 0 {
return code
} else {
return backwardMatchLength(self)
}
}
func hasherReset(handle hasherHandle) {
if handle == nil {
return
}
handle.Common().is_prepared_ = false
}
func newHasher(typ int) hasherHandle {
switch typ {
case 2:
return &hashLongestMatchQuickly{
bucketBits: 16,
bucketSweep: 1,
hashLen: 5,
useDictionary: true,
}
case 3:
return &hashLongestMatchQuickly{
bucketBits: 16,
bucketSweep: 2,
hashLen: 5,
useDictionary: false,
}
case 4:
return &hashLongestMatchQuickly{
bucketBits: 17,
bucketSweep: 4,
hashLen: 5,
useDictionary: true,
}
case 5:
return new(h5)
case 6:
return new(h6)
case 10:
return new(h10)
case 35:
return &hashComposite{
ha: newHasher(3),
hb: &hashRolling{jump: 4},
}
case 40:
return &hashForgetfulChain{
bucketBits: 15,
numBanks: 1,
bankBits: 16,
numLastDistancesToCheck: 4,
}
case 41:
return &hashForgetfulChain{
bucketBits: 15,
numBanks: 1,
bankBits: 16,
numLastDistancesToCheck: 10,
}
case 42:
return &hashForgetfulChain{
bucketBits: 15,
numBanks: 512,
bankBits: 9,
numLastDistancesToCheck: 16,
}
case 54:
return &hashLongestMatchQuickly{
bucketBits: 20,
bucketSweep: 4,
hashLen: 7,
useDictionary: false,
}
case 55:
return &hashComposite{
ha: newHasher(54),
hb: &hashRolling{jump: 4},
}
case 65:
return &hashComposite{
ha: newHasher(6),
hb: &hashRolling{jump: 1},
}
}
panic(fmt.Sprintf("unknown hasher type: %d", typ))
}
func hasherSetup(handle *hasherHandle, params *encoderParams, data []byte, position uint, input_size uint, is_last bool) {
var self hasherHandle = nil
var common *hasherCommon = nil
var one_shot bool = (position == 0 && is_last)
if *handle == nil {
chooseHasher(params, &params.hasher)
self = newHasher(params.hasher.type_)
*handle = self
common = self.Common()
common.params = params.hasher
self.Initialize(params)
}
self = *handle
common = self.Common()
if !common.is_prepared_ {
self.Prepare(one_shot, input_size, data)
if position == 0 {
common.dict_num_lookups = 0
common.dict_num_matches = 0
}
common.is_prepared_ = true
}
}
func initOrStitchToPreviousBlock(handle *hasherHandle, data []byte, mask uint, params *encoderParams, position uint, input_size uint, is_last bool) {
var self hasherHandle
hasherSetup(handle, params, data, position, input_size, is_last)
self = *handle
self.StitchToPreviousBlock(input_size, position, data, mask)
}

93
vendor/github.com/andybalholm/brotli/hash_composite.go generated vendored Normal file
View File

@ -0,0 +1,93 @@
package brotli
/* Copyright 2018 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
func (h *hashComposite) HashTypeLength() uint {
var a uint = h.ha.HashTypeLength()
var b uint = h.hb.HashTypeLength()
if a > b {
return a
} else {
return b
}
}
func (h *hashComposite) StoreLookahead() uint {
var a uint = h.ha.StoreLookahead()
var b uint = h.hb.StoreLookahead()
if a > b {
return a
} else {
return b
}
}
/* Composite hasher: This hasher allows to combine two other hashers, HASHER_A
and HASHER_B. */
type hashComposite struct {
hasherCommon
ha hasherHandle
hb hasherHandle
params *encoderParams
}
func (h *hashComposite) Initialize(params *encoderParams) {
h.params = params
}
/* TODO: Initialize of the hashers is defered to Prepare (and params
remembered here) because we don't get the one_shot and input_size params
here that are needed to know the memory size of them. Instead provide
those params to all hashers InitializehashComposite */
func (h *hashComposite) Prepare(one_shot bool, input_size uint, data []byte) {
if h.ha == nil {
var common_a *hasherCommon
var common_b *hasherCommon
common_a = h.ha.Common()
common_a.params = h.params.hasher
common_a.is_prepared_ = false
common_a.dict_num_lookups = 0
common_a.dict_num_matches = 0
h.ha.Initialize(h.params)
common_b = h.hb.Common()
common_b.params = h.params.hasher
common_b.is_prepared_ = false
common_b.dict_num_lookups = 0
common_b.dict_num_matches = 0
h.hb.Initialize(h.params)
}
h.ha.Prepare(one_shot, input_size, data)
h.hb.Prepare(one_shot, input_size, data)
}
func (h *hashComposite) Store(data []byte, mask uint, ix uint) {
h.ha.Store(data, mask, ix)
h.hb.Store(data, mask, ix)
}
func (h *hashComposite) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) {
h.ha.StoreRange(data, mask, ix_start, ix_end)
h.hb.StoreRange(data, mask, ix_start, ix_end)
}
func (h *hashComposite) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
h.ha.StitchToPreviousBlock(num_bytes, position, ringbuffer, ring_buffer_mask)
h.hb.StitchToPreviousBlock(num_bytes, position, ringbuffer, ring_buffer_mask)
}
func (h *hashComposite) PrepareDistanceCache(distance_cache []int) {
h.ha.PrepareDistanceCache(distance_cache)
h.hb.PrepareDistanceCache(distance_cache)
}
func (h *hashComposite) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
h.ha.FindLongestMatch(dictionary, data, ring_buffer_mask, distance_cache, cur_ix, max_length, max_backward, gap, max_distance, out)
h.hb.FindLongestMatch(dictionary, data, ring_buffer_mask, distance_cache, cur_ix, max_length, max_backward, gap, max_distance, out)
}

View File

@ -0,0 +1,252 @@
package brotli
import "encoding/binary"
/* Copyright 2016 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
func (*hashForgetfulChain) HashTypeLength() uint {
return 4
}
func (*hashForgetfulChain) StoreLookahead() uint {
return 4
}
/* HashBytes is the function that chooses the bucket to place the address in.*/
func (h *hashForgetfulChain) HashBytes(data []byte) uint {
var hash uint32 = binary.LittleEndian.Uint32(data) * kHashMul32
/* The higher bits contain more mixture from the multiplication,
so we take our results from there. */
return uint(hash >> (32 - h.bucketBits))
}
type slot struct {
delta uint16
next uint16
}
/* A (forgetful) hash table to the data seen by the compressor, to
help create backward references to previous data.
Hashes are stored in chains which are bucketed to groups. Group of chains
share a storage "bank". When more than "bank size" chain nodes are added,
oldest nodes are replaced; this way several chains may share a tail. */
type hashForgetfulChain struct {
hasherCommon
bucketBits uint
numBanks uint
bankBits uint
numLastDistancesToCheck int
addr []uint32
head []uint16
tiny_hash [65536]byte
banks [][]slot
free_slot_idx []uint16
max_hops uint
}
func (h *hashForgetfulChain) Initialize(params *encoderParams) {
var q uint
if params.quality > 6 {
q = 7
} else {
q = 8
}
h.max_hops = q << uint(params.quality-4)
bankSize := 1 << h.bankBits
bucketSize := 1 << h.bucketBits
h.addr = make([]uint32, bucketSize)
h.head = make([]uint16, bucketSize)
h.banks = make([][]slot, h.numBanks)
for i := range h.banks {
h.banks[i] = make([]slot, bankSize)
}
h.free_slot_idx = make([]uint16, h.numBanks)
}
func (h *hashForgetfulChain) Prepare(one_shot bool, input_size uint, data []byte) {
var partial_prepare_threshold uint = (1 << h.bucketBits) >> 6
/* Partial preparation is 100 times slower (per socket). */
if one_shot && input_size <= partial_prepare_threshold {
var i uint
for i = 0; i < input_size; i++ {
var bucket uint = h.HashBytes(data[i:])
/* See InitEmpty comment. */
h.addr[bucket] = 0xCCCCCCCC
h.head[bucket] = 0xCCCC
}
} else {
/* Fill |addr| array with 0xCCCCCCCC value. Because of wrapping, position
processed by hasher never reaches 3GB + 64M; this makes all new chains
to be terminated after the first node. */
for i := range h.addr {
h.addr[i] = 0xCCCCCCCC
}
for i := range h.head {
h.head[i] = 0
}
}
h.tiny_hash = [65536]byte{}
for i := range h.free_slot_idx {
h.free_slot_idx[i] = 0
}
}
/* Look at 4 bytes at &data[ix & mask]. Compute a hash from these, and prepend
node to corresponding chain; also update tiny_hash for current position. */
func (h *hashForgetfulChain) Store(data []byte, mask uint, ix uint) {
var key uint = h.HashBytes(data[ix&mask:])
var bank uint = key & (h.numBanks - 1)
idx := uint(h.free_slot_idx[bank]) & ((1 << h.bankBits) - 1)
h.free_slot_idx[bank]++
var delta uint = ix - uint(h.addr[key])
h.tiny_hash[uint16(ix)] = byte(key)
if delta > 0xFFFF {
delta = 0xFFFF
}
h.banks[bank][idx].delta = uint16(delta)
h.banks[bank][idx].next = h.head[key]
h.addr[key] = uint32(ix)
h.head[key] = uint16(idx)
}
func (h *hashForgetfulChain) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) {
var i uint
for i = ix_start; i < ix_end; i++ {
h.Store(data, mask, i)
}
}
func (h *hashForgetfulChain) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
if num_bytes >= h.HashTypeLength()-1 && position >= 3 {
/* Prepare the hashes for three last bytes of the last write.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
h.Store(ringbuffer, ring_buffer_mask, position-3)
h.Store(ringbuffer, ring_buffer_mask, position-2)
h.Store(ringbuffer, ring_buffer_mask, position-1)
}
}
func (h *hashForgetfulChain) PrepareDistanceCache(distance_cache []int) {
prepareDistanceCache(distance_cache, h.numLastDistancesToCheck)
}
/* Find a longest backward match of &data[cur_ix] up to the length of
max_length and stores the position cur_ix in the hash table.
REQUIRES: PrepareDistanceCachehashForgetfulChain must be invoked for current distance cache
values; if this method is invoked repeatedly with the same distance
cache values, it is enough to invoke PrepareDistanceCachehashForgetfulChain once.
Does not look for matches longer than max_length.
Does not look for matches further away than max_backward.
Writes the best match into |out|.
|out|->score is updated only if a better match is found. */
func (h *hashForgetfulChain) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
var cur_ix_masked uint = cur_ix & ring_buffer_mask
var min_score uint = out.score
var best_score uint = out.score
var best_len uint = out.len
var key uint = h.HashBytes(data[cur_ix_masked:])
var tiny_hash byte = byte(key)
/* Don't accept a short copy from far away. */
out.len = 0
out.len_code_delta = 0
/* Try last distance first. */
for i := 0; i < h.numLastDistancesToCheck; i++ {
var backward uint = uint(distance_cache[i])
var prev_ix uint = (cur_ix - backward)
/* For distance code 0 we want to consider 2-byte matches. */
if i > 0 && h.tiny_hash[uint16(prev_ix)] != tiny_hash {
continue
}
if prev_ix >= cur_ix || backward > max_backward {
continue
}
prev_ix &= ring_buffer_mask
{
var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
if len >= 2 {
var score uint = backwardReferenceScoreUsingLastDistance(uint(len))
if best_score < score {
if i != 0 {
score -= backwardReferencePenaltyUsingLastDistance(uint(i))
}
if best_score < score {
best_score = score
best_len = uint(len)
out.len = best_len
out.distance = backward
out.score = best_score
}
}
}
}
}
{
var bank uint = key & (h.numBanks - 1)
var backward uint = 0
var hops uint = h.max_hops
var delta uint = cur_ix - uint(h.addr[key])
var slot uint = uint(h.head[key])
for {
tmp6 := hops
hops--
if tmp6 == 0 {
break
}
var prev_ix uint
var last uint = slot
backward += delta
if backward > max_backward {
break
}
prev_ix = (cur_ix - backward) & ring_buffer_mask
slot = uint(h.banks[bank][last].next)
delta = uint(h.banks[bank][last].delta)
if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] {
continue
}
{
var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
if len >= 4 {
/* Comparing for >= 3 does not change the semantics, but just saves
for a few unnecessary binary logarithms in backward reference
score, since we are not interested in such short matches. */
var score uint = backwardReferenceScore(uint(len), backward)
if best_score < score {
best_score = score
best_len = uint(len)
out.len = best_len
out.distance = backward
out.score = best_score
}
}
}
}
h.Store(data, ring_buffer_mask, cur_ix)
}
if out.score == min_score {
searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, false)
}
}

View File

@ -0,0 +1,214 @@
package brotli
import "encoding/binary"
/* Copyright 2010 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* For BUCKET_SWEEP == 1, enabling the dictionary lookup makes compression
a little faster (0.5% - 1%) and it compresses 0.15% better on small text
and HTML inputs. */
func (*hashLongestMatchQuickly) HashTypeLength() uint {
return 8
}
func (*hashLongestMatchQuickly) StoreLookahead() uint {
return 8
}
/* HashBytes is the function that chooses the bucket to place
the address in. The HashLongestMatch and hashLongestMatchQuickly
classes have separate, different implementations of hashing. */
func (h *hashLongestMatchQuickly) HashBytes(data []byte) uint32 {
var hash uint64 = ((binary.LittleEndian.Uint64(data) << (64 - 8*h.hashLen)) * kHashMul64)
/* The higher bits contain more mixture from the multiplication,
so we take our results from there. */
return uint32(hash >> (64 - h.bucketBits))
}
/* A (forgetful) hash table to the data seen by the compressor, to
help create backward references to previous data.
This is a hash map of fixed size (1 << 16). Starting from the
given index, 1 buckets are used to store values of a key. */
type hashLongestMatchQuickly struct {
hasherCommon
bucketBits uint
bucketSweep int
hashLen uint
useDictionary bool
buckets []uint32
}
func (h *hashLongestMatchQuickly) Initialize(params *encoderParams) {
h.buckets = make([]uint32, 1<<h.bucketBits+h.bucketSweep)
}
func (h *hashLongestMatchQuickly) Prepare(one_shot bool, input_size uint, data []byte) {
var partial_prepare_threshold uint = (4 << h.bucketBits) >> 7
/* Partial preparation is 100 times slower (per socket). */
if one_shot && input_size <= partial_prepare_threshold {
var i uint
for i = 0; i < input_size; i++ {
var key uint32 = h.HashBytes(data[i:])
for j := 0; j < h.bucketSweep; j++ {
h.buckets[key+uint32(j)] = 0
}
}
} else {
/* It is not strictly necessary to fill this buffer here, but
not filling will make the results of the compression stochastic
(but correct). This is because random data would cause the
system to find accidentally good backward references here and there. */
for i := range h.buckets {
h.buckets[i] = 0
}
}
}
/* Look at 5 bytes at &data[ix & mask].
Compute a hash from these, and store the value somewhere within
[ix .. ix+3]. */
func (h *hashLongestMatchQuickly) Store(data []byte, mask uint, ix uint) {
var key uint32 = h.HashBytes(data[ix&mask:])
var off uint32 = uint32(ix>>3) % uint32(h.bucketSweep)
/* Wiggle the value with the bucket sweep range. */
h.buckets[key+off] = uint32(ix)
}
func (h *hashLongestMatchQuickly) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) {
var i uint
for i = ix_start; i < ix_end; i++ {
h.Store(data, mask, i)
}
}
func (h *hashLongestMatchQuickly) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
if num_bytes >= h.HashTypeLength()-1 && position >= 3 {
/* Prepare the hashes for three last bytes of the last write.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
h.Store(ringbuffer, ringbuffer_mask, position-3)
h.Store(ringbuffer, ringbuffer_mask, position-2)
h.Store(ringbuffer, ringbuffer_mask, position-1)
}
}
func (*hashLongestMatchQuickly) PrepareDistanceCache(distance_cache []int) {
}
/* Find a longest backward match of &data[cur_ix & ring_buffer_mask]
up to the length of max_length and stores the position cur_ix in the
hash table.
Does not look for matches longer than max_length.
Does not look for matches further away than max_backward.
Writes the best match into |out|.
|out|->score is updated only if a better match is found. */
func (h *hashLongestMatchQuickly) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
var best_len_in uint = out.len
var cur_ix_masked uint = cur_ix & ring_buffer_mask
var key uint32 = h.HashBytes(data[cur_ix_masked:])
var compare_char int = int(data[cur_ix_masked+best_len_in])
var min_score uint = out.score
var best_score uint = out.score
var best_len uint = best_len_in
var cached_backward uint = uint(distance_cache[0])
var prev_ix uint = cur_ix - cached_backward
var bucket []uint32
out.len_code_delta = 0
if prev_ix < cur_ix {
prev_ix &= uint(uint32(ring_buffer_mask))
if compare_char == int(data[prev_ix+best_len]) {
var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
if len >= 4 {
var score uint = backwardReferenceScoreUsingLastDistance(uint(len))
if best_score < score {
best_score = score
best_len = uint(len)
out.len = uint(len)
out.distance = cached_backward
out.score = best_score
compare_char = int(data[cur_ix_masked+best_len])
if h.bucketSweep == 1 {
h.buckets[key] = uint32(cur_ix)
return
}
}
}
}
}
if h.bucketSweep == 1 {
var backward uint
var len uint
/* Only one to look for, don't bother to prepare for a loop. */
prev_ix = uint(h.buckets[key])
h.buckets[key] = uint32(cur_ix)
backward = cur_ix - prev_ix
prev_ix &= uint(uint32(ring_buffer_mask))
if compare_char != int(data[prev_ix+best_len_in]) {
return
}
if backward == 0 || backward > max_backward {
return
}
len = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
if len >= 4 {
var score uint = backwardReferenceScore(uint(len), backward)
if best_score < score {
out.len = uint(len)
out.distance = backward
out.score = score
return
}
}
} else {
bucket = h.buckets[key:]
var i int
prev_ix = uint(bucket[0])
bucket = bucket[1:]
for i = 0; i < h.bucketSweep; (func() { i++; tmp3 := bucket; bucket = bucket[1:]; prev_ix = uint(tmp3[0]) })() {
var backward uint = cur_ix - prev_ix
var len uint
prev_ix &= uint(uint32(ring_buffer_mask))
if compare_char != int(data[prev_ix+best_len]) {
continue
}
if backward == 0 || backward > max_backward {
continue
}
len = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
if len >= 4 {
var score uint = backwardReferenceScore(uint(len), backward)
if best_score < score {
best_score = score
best_len = uint(len)
out.len = best_len
out.distance = backward
out.score = score
compare_char = int(data[cur_ix_masked+best_len])
}
}
}
}
if h.useDictionary && min_score == out.score {
searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, true)
}
h.buckets[key+uint32((cur_ix>>3)%uint(h.bucketSweep))] = uint32(cur_ix)
}

168
vendor/github.com/andybalholm/brotli/hash_rolling.go generated vendored Normal file
View File

@ -0,0 +1,168 @@
package brotli
/* Copyright 2018 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* NOTE: this hasher does not search in the dictionary. It is used as
backup-hasher, the main hasher already searches in it. */
const kRollingHashMul32 uint32 = 69069
const kInvalidPosHashRolling uint32 = 0xffffffff
/* This hasher uses a longer forward length, but returning a higher value here
will hurt compression by the main hasher when combined with a composite
hasher. The hasher tests for forward itself instead. */
func (*hashRolling) HashTypeLength() uint {
return 4
}
func (*hashRolling) StoreLookahead() uint {
return 4
}
/* Computes a code from a single byte. A lookup table of 256 values could be
used, but simply adding 1 works about as good. */
func (*hashRolling) HashByte(b byte) uint32 {
return uint32(b) + 1
}
func (h *hashRolling) HashRollingFunctionInitial(state uint32, add byte, factor uint32) uint32 {
return uint32(factor*state + h.HashByte(add))
}
func (h *hashRolling) HashRollingFunction(state uint32, add byte, rem byte, factor uint32, factor_remove uint32) uint32 {
return uint32(factor*state + h.HashByte(add) - factor_remove*h.HashByte(rem))
}
/* Rolling hash for long distance long string matches. Stores one position
per bucket, bucket key is computed over a long region. */
type hashRolling struct {
hasherCommon
jump int
state uint32
table []uint32
next_ix uint
factor uint32
factor_remove uint32
}
func (h *hashRolling) Initialize(params *encoderParams) {
h.state = 0
h.next_ix = 0
h.factor = kRollingHashMul32
/* Compute the factor of the oldest byte to remove: factor**steps modulo
0xffffffff (the multiplications rely on 32-bit overflow) */
h.factor_remove = 1
for i := 0; i < 32; i += h.jump {
h.factor_remove *= h.factor
}
h.table = make([]uint32, 16777216)
for i := 0; i < 16777216; i++ {
h.table[i] = kInvalidPosHashRolling
}
}
func (h *hashRolling) Prepare(one_shot bool, input_size uint, data []byte) {
/* Too small size, cannot use this hasher. */
if input_size < 32 {
return
}
h.state = 0
for i := 0; i < 32; i += h.jump {
h.state = h.HashRollingFunctionInitial(h.state, data[i], h.factor)
}
}
func (*hashRolling) Store(data []byte, mask uint, ix uint) {
}
func (*hashRolling) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) {
}
func (h *hashRolling) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
var position_masked uint
/* In this case we must re-initialize the hasher from scratch from the
current position. */
var available uint = num_bytes
if position&uint(h.jump-1) != 0 {
var diff uint = uint(h.jump) - (position & uint(h.jump-1))
if diff > available {
available = 0
} else {
available = available - diff
}
position += diff
}
position_masked = position & ring_buffer_mask
/* wrapping around ringbuffer not handled. */
if available > ring_buffer_mask-position_masked {
available = ring_buffer_mask - position_masked
}
h.Prepare(false, available, ringbuffer[position&ring_buffer_mask:])
h.next_ix = position
}
func (*hashRolling) PrepareDistanceCache(distance_cache []int) {
}
func (h *hashRolling) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
var cur_ix_masked uint = cur_ix & ring_buffer_mask
var pos uint = h.next_ix
if cur_ix&uint(h.jump-1) != 0 {
return
}
/* Not enough lookahead */
if max_length < 32 {
return
}
for pos = h.next_ix; pos <= cur_ix; pos += uint(h.jump) {
var code uint32 = h.state & ((16777216 * 64) - 1)
var rem byte = data[pos&ring_buffer_mask]
var add byte = data[(pos+32)&ring_buffer_mask]
var found_ix uint = uint(kInvalidPosHashRolling)
h.state = h.HashRollingFunction(h.state, add, rem, h.factor, h.factor_remove)
if code < 16777216 {
found_ix = uint(h.table[code])
h.table[code] = uint32(pos)
if pos == cur_ix && uint32(found_ix) != kInvalidPosHashRolling {
/* The cast to 32-bit makes backward distances up to 4GB work even
if cur_ix is above 4GB, despite using 32-bit values in the table. */
var backward uint = uint(uint32(cur_ix - found_ix))
if backward <= max_backward {
var found_ix_masked uint = found_ix & ring_buffer_mask
var len uint = findMatchLengthWithLimit(data[found_ix_masked:], data[cur_ix_masked:], max_length)
if len >= 4 && len > out.len {
var score uint = backwardReferenceScore(uint(len), backward)
if score > out.score {
out.len = uint(len)
out.distance = backward
out.score = score
out.len_code_delta = 0
}
}
}
}
}
}
h.next_ix = cur_ix + uint(h.jump)
}

226
vendor/github.com/andybalholm/brotli/histogram.go generated vendored Normal file
View File

@ -0,0 +1,226 @@
package brotli
import "math"
/* The distance symbols effectively used by "Large Window Brotli" (32-bit). */
const numHistogramDistanceSymbols = 544
type histogramLiteral struct {
data_ [numLiteralSymbols]uint32
total_count_ uint
bit_cost_ float64
}
func histogramClearLiteral(self *histogramLiteral) {
self.data_ = [numLiteralSymbols]uint32{}
self.total_count_ = 0
self.bit_cost_ = math.MaxFloat64
}
func clearHistogramsLiteral(array []histogramLiteral, length uint) {
var i uint
for i = 0; i < length; i++ {
histogramClearLiteral(&array[i:][0])
}
}
func histogramAddLiteral(self *histogramLiteral, val uint) {
self.data_[val]++
self.total_count_++
}
func histogramAddVectorLiteral(self *histogramLiteral, p []byte, n uint) {
self.total_count_ += n
n += 1
for {
n--
if n == 0 {
break
}
self.data_[p[0]]++
p = p[1:]
}
}
func histogramAddHistogramLiteral(self *histogramLiteral, v *histogramLiteral) {
var i uint
self.total_count_ += v.total_count_
for i = 0; i < numLiteralSymbols; i++ {
self.data_[i] += v.data_[i]
}
}
func histogramDataSizeLiteral() uint {
return numLiteralSymbols
}
type histogramCommand struct {
data_ [numCommandSymbols]uint32
total_count_ uint
bit_cost_ float64
}
func histogramClearCommand(self *histogramCommand) {
self.data_ = [numCommandSymbols]uint32{}
self.total_count_ = 0
self.bit_cost_ = math.MaxFloat64
}
func clearHistogramsCommand(array []histogramCommand, length uint) {
var i uint
for i = 0; i < length; i++ {
histogramClearCommand(&array[i:][0])
}
}
func histogramAddCommand(self *histogramCommand, val uint) {
self.data_[val]++
self.total_count_++
}
func histogramAddVectorCommand(self *histogramCommand, p []uint16, n uint) {
self.total_count_ += n
n += 1
for {
n--
if n == 0 {
break
}
self.data_[p[0]]++
p = p[1:]
}
}
func histogramAddHistogramCommand(self *histogramCommand, v *histogramCommand) {
var i uint
self.total_count_ += v.total_count_
for i = 0; i < numCommandSymbols; i++ {
self.data_[i] += v.data_[i]
}
}
func histogramDataSizeCommand() uint {
return numCommandSymbols
}
type histogramDistance struct {
data_ [numDistanceSymbols]uint32
total_count_ uint
bit_cost_ float64
}
func histogramClearDistance(self *histogramDistance) {
self.data_ = [numDistanceSymbols]uint32{}
self.total_count_ = 0
self.bit_cost_ = math.MaxFloat64
}
func clearHistogramsDistance(array []histogramDistance, length uint) {
var i uint
for i = 0; i < length; i++ {
histogramClearDistance(&array[i:][0])
}
}
func histogramAddDistance(self *histogramDistance, val uint) {
self.data_[val]++
self.total_count_++
}
func histogramAddVectorDistance(self *histogramDistance, p []uint16, n uint) {
self.total_count_ += n
n += 1
for {
n--
if n == 0 {
break
}
self.data_[p[0]]++
p = p[1:]
}
}
func histogramAddHistogramDistance(self *histogramDistance, v *histogramDistance) {
var i uint
self.total_count_ += v.total_count_
for i = 0; i < numDistanceSymbols; i++ {
self.data_[i] += v.data_[i]
}
}
func histogramDataSizeDistance() uint {
return numDistanceSymbols
}
type blockSplitIterator struct {
split_ *blockSplit
idx_ uint
type_ uint
length_ uint
}
func initBlockSplitIterator(self *blockSplitIterator, split *blockSplit) {
self.split_ = split
self.idx_ = 0
self.type_ = 0
if len(split.lengths) > 0 {
self.length_ = uint(split.lengths[0])
} else {
self.length_ = 0
}
}
func blockSplitIteratorNext(self *blockSplitIterator) {
if self.length_ == 0 {
self.idx_++
self.type_ = uint(self.split_.types[self.idx_])
self.length_ = uint(self.split_.lengths[self.idx_])
}
self.length_--
}
func buildHistogramsWithContext(cmds []command, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit, ringbuffer []byte, start_pos uint, mask uint, prev_byte byte, prev_byte2 byte, context_modes []int, literal_histograms []histogramLiteral, insert_and_copy_histograms []histogramCommand, copy_dist_histograms []histogramDistance) {
var pos uint = start_pos
var literal_it blockSplitIterator
var insert_and_copy_it blockSplitIterator
var dist_it blockSplitIterator
initBlockSplitIterator(&literal_it, literal_split)
initBlockSplitIterator(&insert_and_copy_it, insert_and_copy_split)
initBlockSplitIterator(&dist_it, dist_split)
for i := range cmds {
var cmd *command = &cmds[i]
var j uint
blockSplitIteratorNext(&insert_and_copy_it)
histogramAddCommand(&insert_and_copy_histograms[insert_and_copy_it.type_], uint(cmd.cmd_prefix_))
/* TODO: unwrap iterator blocks. */
for j = uint(cmd.insert_len_); j != 0; j-- {
var context uint
blockSplitIteratorNext(&literal_it)
context = literal_it.type_
if context_modes != nil {
var lut contextLUT = getContextLUT(context_modes[context])
context = (context << literalContextBits) + uint(getContext(prev_byte, prev_byte2, lut))
}
histogramAddLiteral(&literal_histograms[context], uint(ringbuffer[pos&mask]))
prev_byte2 = prev_byte
prev_byte = ringbuffer[pos&mask]
pos++
}
pos += uint(commandCopyLen(cmd))
if commandCopyLen(cmd) != 0 {
prev_byte2 = ringbuffer[(pos-2)&mask]
prev_byte = ringbuffer[(pos-1)&mask]
if cmd.cmd_prefix_ >= 128 {
var context uint
blockSplitIteratorNext(&dist_it)
context = uint(uint32(dist_it.type_<<distanceContextBits) + commandDistanceContext(cmd))
histogramAddDistance(&copy_dist_histograms[context], uint(cmd.dist_prefix_)&0x3FF)
}
}
}
}

192
vendor/github.com/andybalholm/brotli/http.go generated vendored Normal file
View File

@ -0,0 +1,192 @@
package brotli
import (
"compress/gzip"
"io"
"net/http"
"strings"
)
// HTTPCompressor chooses a compression method (brotli, gzip, or none) based on
// the Accept-Encoding header, sets the Content-Encoding header, and returns a
// WriteCloser that implements that compression. The Close method must be called
// before the current HTTP handler returns.
//
// Due to https://github.com/golang/go/issues/31753, the response will not be
// compressed unless you set a Content-Type header before you call
// HTTPCompressor.
func HTTPCompressor(w http.ResponseWriter, r *http.Request) io.WriteCloser {
if w.Header().Get("Content-Type") == "" {
return nopCloser{w}
}
if w.Header().Get("Vary") == "" {
w.Header().Set("Vary", "Accept-Encoding")
}
encoding := negotiateContentEncoding(r, []string{"br", "gzip"})
switch encoding {
case "br":
w.Header().Set("Content-Encoding", "br")
return NewWriter(w)
case "gzip":
w.Header().Set("Content-Encoding", "gzip")
return gzip.NewWriter(w)
}
return nopCloser{w}
}
// negotiateContentEncoding returns the best offered content encoding for the
// request's Accept-Encoding header. If two offers match with equal weight and
// then the offer earlier in the list is preferred. If no offers are
// acceptable, then "" is returned.
func negotiateContentEncoding(r *http.Request, offers []string) string {
bestOffer := "identity"
bestQ := -1.0
specs := parseAccept(r.Header, "Accept-Encoding")
for _, offer := range offers {
for _, spec := range specs {
if spec.Q > bestQ &&
(spec.Value == "*" || spec.Value == offer) {
bestQ = spec.Q
bestOffer = offer
}
}
}
if bestQ == 0 {
bestOffer = ""
}
return bestOffer
}
// acceptSpec describes an Accept* header.
type acceptSpec struct {
Value string
Q float64
}
// parseAccept parses Accept* headers.
func parseAccept(header http.Header, key string) (specs []acceptSpec) {
loop:
for _, s := range header[key] {
for {
var spec acceptSpec
spec.Value, s = expectTokenSlash(s)
if spec.Value == "" {
continue loop
}
spec.Q = 1.0
s = skipSpace(s)
if strings.HasPrefix(s, ";") {
s = skipSpace(s[1:])
if !strings.HasPrefix(s, "q=") {
continue loop
}
spec.Q, s = expectQuality(s[2:])
if spec.Q < 0.0 {
continue loop
}
}
specs = append(specs, spec)
s = skipSpace(s)
if !strings.HasPrefix(s, ",") {
continue loop
}
s = skipSpace(s[1:])
}
}
return
}
func skipSpace(s string) (rest string) {
i := 0
for ; i < len(s); i++ {
if octetTypes[s[i]]&isSpace == 0 {
break
}
}
return s[i:]
}
func expectTokenSlash(s string) (token, rest string) {
i := 0
for ; i < len(s); i++ {
b := s[i]
if (octetTypes[b]&isToken == 0) && b != '/' {
break
}
}
return s[:i], s[i:]
}
func expectQuality(s string) (q float64, rest string) {
switch {
case len(s) == 0:
return -1, ""
case s[0] == '0':
q = 0
case s[0] == '1':
q = 1
default:
return -1, ""
}
s = s[1:]
if !strings.HasPrefix(s, ".") {
return q, s
}
s = s[1:]
i := 0
n := 0
d := 1
for ; i < len(s); i++ {
b := s[i]
if b < '0' || b > '9' {
break
}
n = n*10 + int(b) - '0'
d *= 10
}
return q + float64(n)/float64(d), s[i:]
}
// Octet types from RFC 2616.
var octetTypes [256]octetType
type octetType byte
const (
isToken octetType = 1 << iota
isSpace
)
func init() {
// OCTET = <any 8-bit sequence of data>
// CHAR = <any US-ASCII character (octets 0 - 127)>
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
// CR = <US-ASCII CR, carriage return (13)>
// LF = <US-ASCII LF, linefeed (10)>
// SP = <US-ASCII SP, space (32)>
// HT = <US-ASCII HT, horizontal-tab (9)>
// <"> = <US-ASCII double-quote mark (34)>
// CRLF = CR LF
// LWS = [CRLF] 1*( SP | HT )
// TEXT = <any OCTET except CTLs, but including LWS>
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
// token = 1*<any CHAR except CTLs or separators>
// qdtext = <any TEXT except <">>
for c := 0; c < 256; c++ {
var t octetType
isCtl := c <= 31 || c == 127
isChar := 0 <= c && c <= 127
isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
if strings.ContainsRune(" \t\r\n", rune(c)) {
t |= isSpace
}
if isChar && !isCtl && !isSeparator {
t |= isToken
}
octetTypes[c] = t
}
}

653
vendor/github.com/andybalholm/brotli/huffman.go generated vendored Normal file
View File

@ -0,0 +1,653 @@
package brotli
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Utilities for building Huffman decoding tables. */
const huffmanMaxCodeLength = 15
/* Maximum possible Huffman table size for an alphabet size of (index * 32),
max code length 15 and root table bits 8. */
var kMaxHuffmanTableSize = []uint16{
256,
402,
436,
468,
500,
534,
566,
598,
630,
662,
694,
726,
758,
790,
822,
854,
886,
920,
952,
984,
1016,
1048,
1080,
1112,
1144,
1176,
1208,
1240,
1272,
1304,
1336,
1368,
1400,
1432,
1464,
1496,
1528,
}
/* BROTLI_NUM_BLOCK_LEN_SYMBOLS == 26 */
const huffmanMaxSize26 = 396
/* BROTLI_MAX_BLOCK_TYPE_SYMBOLS == 258 */
const huffmanMaxSize258 = 632
/* BROTLI_MAX_CONTEXT_MAP_SYMBOLS == 272 */
const huffmanMaxSize272 = 646
const huffmanMaxCodeLengthCodeLength = 5
/* Do not create this struct directly - use the ConstructHuffmanCode
* constructor below! */
type huffmanCode struct {
bits byte
value uint16
}
func constructHuffmanCode(bits byte, value uint16) huffmanCode {
var h huffmanCode
h.bits = bits
h.value = value
return h
}
/* Builds Huffman lookup table assuming code lengths are in symbol order. */
/* Builds Huffman lookup table assuming code lengths are in symbol order.
Returns size of resulting table. */
/* Builds a simple Huffman table. The |num_symbols| parameter is to be
interpreted as follows: 0 means 1 symbol, 1 means 2 symbols,
2 means 3 symbols, 3 means 4 symbols with lengths [2, 2, 2, 2],
4 means 4 symbols with lengths [1, 2, 3, 3]. */
/* Contains a collection of Huffman trees with the same alphabet size. */
/* max_symbol is needed due to simple codes since log2(alphabet_size) could be
greater than log2(max_symbol). */
type huffmanTreeGroup struct {
htrees [][]huffmanCode
codes []huffmanCode
alphabet_size uint16
max_symbol uint16
num_htrees uint16
}
const reverseBitsMax = 8
const reverseBitsBase = 0
var kReverseBits = [1 << reverseBitsMax]byte{
0x00,
0x80,
0x40,
0xC0,
0x20,
0xA0,
0x60,
0xE0,
0x10,
0x90,
0x50,
0xD0,
0x30,
0xB0,
0x70,
0xF0,
0x08,
0x88,
0x48,
0xC8,
0x28,
0xA8,
0x68,
0xE8,
0x18,
0x98,
0x58,
0xD8,
0x38,
0xB8,
0x78,
0xF8,
0x04,
0x84,
0x44,
0xC4,
0x24,
0xA4,
0x64,
0xE4,
0x14,
0x94,
0x54,
0xD4,
0x34,
0xB4,
0x74,
0xF4,
0x0C,
0x8C,
0x4C,
0xCC,
0x2C,
0xAC,
0x6C,
0xEC,
0x1C,
0x9C,
0x5C,
0xDC,
0x3C,
0xBC,
0x7C,
0xFC,
0x02,
0x82,
0x42,
0xC2,
0x22,
0xA2,
0x62,
0xE2,
0x12,
0x92,
0x52,
0xD2,
0x32,
0xB2,
0x72,
0xF2,
0x0A,
0x8A,
0x4A,
0xCA,
0x2A,
0xAA,
0x6A,
0xEA,
0x1A,
0x9A,
0x5A,
0xDA,
0x3A,
0xBA,
0x7A,
0xFA,
0x06,
0x86,
0x46,
0xC6,
0x26,
0xA6,
0x66,
0xE6,
0x16,
0x96,
0x56,
0xD6,
0x36,
0xB6,
0x76,
0xF6,
0x0E,
0x8E,
0x4E,
0xCE,
0x2E,
0xAE,
0x6E,
0xEE,
0x1E,
0x9E,
0x5E,
0xDE,
0x3E,
0xBE,
0x7E,
0xFE,
0x01,
0x81,
0x41,
0xC1,
0x21,
0xA1,
0x61,
0xE1,
0x11,
0x91,
0x51,
0xD1,
0x31,
0xB1,
0x71,
0xF1,
0x09,
0x89,
0x49,
0xC9,
0x29,
0xA9,
0x69,
0xE9,
0x19,
0x99,
0x59,
0xD9,
0x39,
0xB9,
0x79,
0xF9,
0x05,
0x85,
0x45,
0xC5,
0x25,
0xA5,
0x65,
0xE5,
0x15,
0x95,
0x55,
0xD5,
0x35,
0xB5,
0x75,
0xF5,
0x0D,
0x8D,
0x4D,
0xCD,
0x2D,
0xAD,
0x6D,
0xED,
0x1D,
0x9D,
0x5D,
0xDD,
0x3D,
0xBD,
0x7D,
0xFD,
0x03,
0x83,
0x43,
0xC3,
0x23,
0xA3,
0x63,
0xE3,
0x13,
0x93,
0x53,
0xD3,
0x33,
0xB3,
0x73,
0xF3,
0x0B,
0x8B,
0x4B,
0xCB,
0x2B,
0xAB,
0x6B,
0xEB,
0x1B,
0x9B,
0x5B,
0xDB,
0x3B,
0xBB,
0x7B,
0xFB,
0x07,
0x87,
0x47,
0xC7,
0x27,
0xA7,
0x67,
0xE7,
0x17,
0x97,
0x57,
0xD7,
0x37,
0xB7,
0x77,
0xF7,
0x0F,
0x8F,
0x4F,
0xCF,
0x2F,
0xAF,
0x6F,
0xEF,
0x1F,
0x9F,
0x5F,
0xDF,
0x3F,
0xBF,
0x7F,
0xFF,
}
const reverseBitsLowest = (uint64(1) << (reverseBitsMax - 1 + reverseBitsBase))
/* Returns reverse(num >> BROTLI_REVERSE_BITS_BASE, BROTLI_REVERSE_BITS_MAX),
where reverse(value, len) is the bit-wise reversal of the len least
significant bits of value. */
func reverseBits8(num uint64) uint64 {
return uint64(kReverseBits[num])
}
/* Stores code in table[0], table[step], table[2*step], ..., table[end] */
/* Assumes that end is an integer multiple of step */
func replicateValue(table []huffmanCode, step int, end int, code huffmanCode) {
for {
end -= step
table[end] = code
if end <= 0 {
break
}
}
}
/* Returns the table width of the next 2nd level table. |count| is the histogram
of bit lengths for the remaining symbols, |len| is the code length of the
next processed symbol. */
func nextTableBitSize(count []uint16, len int, root_bits int) int {
var left int = 1 << uint(len-root_bits)
for len < huffmanMaxCodeLength {
left -= int(count[len])
if left <= 0 {
break
}
len++
left <<= 1
}
return len - root_bits
}
func buildCodeLengthsHuffmanTable(table []huffmanCode, code_lengths []byte, count []uint16) {
var code huffmanCode /* current table entry */ /* symbol index in original or sorted table */ /* prefix code */ /* prefix code addend */ /* step size to replicate values in current table */ /* size of current table */ /* symbols sorted by code length */
var symbol int
var key uint64
var key_step uint64
var step int
var table_size int
var sorted [codeLengthCodes]int
var offset [huffmanMaxCodeLengthCodeLength + 1]int
var bits int
var bits_count int
/* offsets in sorted table for each length */
assert(huffmanMaxCodeLengthCodeLength <= reverseBitsMax)
/* Generate offsets into sorted symbol table by code length. */
symbol = -1
bits = 1
var i int
for i = 0; i < huffmanMaxCodeLengthCodeLength; i++ {
symbol += int(count[bits])
offset[bits] = symbol
bits++
}
/* Symbols with code length 0 are placed after all other symbols. */
offset[0] = codeLengthCodes - 1
/* Sort symbols by length, by symbol order within each length. */
symbol = codeLengthCodes
for {
var i int
for i = 0; i < 6; i++ {
symbol--
sorted[offset[code_lengths[symbol]]] = symbol
offset[code_lengths[symbol]]--
}
if symbol == 0 {
break
}
}
table_size = 1 << huffmanMaxCodeLengthCodeLength
/* Special case: all symbols but one have 0 code length. */
if offset[0] == 0 {
code = constructHuffmanCode(0, uint16(sorted[0]))
for key = 0; key < uint64(table_size); key++ {
table[key] = code
}
return
}
/* Fill in table. */
key = 0
key_step = reverseBitsLowest
symbol = 0
bits = 1
step = 2
for {
for bits_count = int(count[bits]); bits_count != 0; bits_count-- {
code = constructHuffmanCode(byte(bits), uint16(sorted[symbol]))
symbol++
replicateValue(table[reverseBits8(key):], step, table_size, code)
key += key_step
}
step <<= 1
key_step >>= 1
bits++
if bits > huffmanMaxCodeLengthCodeLength {
break
}
}
}
func buildHuffmanTable(root_table []huffmanCode, root_bits int, symbol_lists symbolList, count []uint16) uint32 {
var code huffmanCode /* current table entry */ /* next available space in table */ /* current code length */ /* symbol index in original or sorted table */ /* prefix code */ /* prefix code addend */ /* 2nd level table prefix code */ /* 2nd level table prefix code addend */ /* step size to replicate values in current table */ /* key length of current table */ /* size of current table */ /* sum of root table size and 2nd level table sizes */
var table []huffmanCode
var len int
var symbol int
var key uint64
var key_step uint64
var sub_key uint64
var sub_key_step uint64
var step int
var table_bits int
var table_size int
var total_size int
var max_length int = -1
var bits int
var bits_count int
assert(root_bits <= reverseBitsMax)
assert(huffmanMaxCodeLength-root_bits <= reverseBitsMax)
for symbolListGet(symbol_lists, max_length) == 0xFFFF {
max_length--
}
max_length += huffmanMaxCodeLength + 1
table = root_table
table_bits = root_bits
table_size = 1 << uint(table_bits)
total_size = table_size
/* Fill in the root table. Reduce the table size to if possible,
and create the repetitions by memcpy. */
if table_bits > max_length {
table_bits = max_length
table_size = 1 << uint(table_bits)
}
key = 0
key_step = reverseBitsLowest
bits = 1
step = 2
for {
symbol = bits - (huffmanMaxCodeLength + 1)
for bits_count = int(count[bits]); bits_count != 0; bits_count-- {
symbol = int(symbolListGet(symbol_lists, symbol))
code = constructHuffmanCode(byte(bits), uint16(symbol))
replicateValue(table[reverseBits8(key):], step, table_size, code)
key += key_step
}
step <<= 1
key_step >>= 1
bits++
if bits > table_bits {
break
}
}
/* If root_bits != table_bits then replicate to fill the remaining slots. */
for total_size != table_size {
copy(table[table_size:], table[:uint(table_size)])
table_size <<= 1
}
/* Fill in 2nd level tables and add pointers to root table. */
key_step = reverseBitsLowest >> uint(root_bits-1)
sub_key = reverseBitsLowest << 1
sub_key_step = reverseBitsLowest
len = root_bits + 1
step = 2
for ; len <= max_length; len++ {
symbol = len - (huffmanMaxCodeLength + 1)
for ; count[len] != 0; count[len]-- {
if sub_key == reverseBitsLowest<<1 {
table = table[table_size:]
table_bits = nextTableBitSize(count, int(len), root_bits)
table_size = 1 << uint(table_bits)
total_size += table_size
sub_key = reverseBits8(key)
key += key_step
root_table[sub_key] = constructHuffmanCode(byte(table_bits+root_bits), uint16(uint64(uint(-cap(table)+cap(root_table)))-sub_key))
sub_key = 0
}
symbol = int(symbolListGet(symbol_lists, symbol))
code = constructHuffmanCode(byte(len-root_bits), uint16(symbol))
replicateValue(table[reverseBits8(sub_key):], step, table_size, code)
sub_key += sub_key_step
}
step <<= 1
sub_key_step >>= 1
}
return uint32(total_size)
}
func buildSimpleHuffmanTable(table []huffmanCode, root_bits int, val []uint16, num_symbols uint32) uint32 {
var table_size uint32 = 1
var goal_size uint32 = 1 << uint(root_bits)
switch num_symbols {
case 0:
table[0] = constructHuffmanCode(0, val[0])
case 1:
if val[1] > val[0] {
table[0] = constructHuffmanCode(1, val[0])
table[1] = constructHuffmanCode(1, val[1])
} else {
table[0] = constructHuffmanCode(1, val[1])
table[1] = constructHuffmanCode(1, val[0])
}
table_size = 2
case 2:
table[0] = constructHuffmanCode(1, val[0])
table[2] = constructHuffmanCode(1, val[0])
if val[2] > val[1] {
table[1] = constructHuffmanCode(2, val[1])
table[3] = constructHuffmanCode(2, val[2])
} else {
table[1] = constructHuffmanCode(2, val[2])
table[3] = constructHuffmanCode(2, val[1])
}
table_size = 4
case 3:
var i int
var k int
for i = 0; i < 3; i++ {
for k = i + 1; k < 4; k++ {
if val[k] < val[i] {
var t uint16 = val[k]
val[k] = val[i]
val[i] = t
}
}
}
table[0] = constructHuffmanCode(2, val[0])
table[2] = constructHuffmanCode(2, val[1])
table[1] = constructHuffmanCode(2, val[2])
table[3] = constructHuffmanCode(2, val[3])
table_size = 4
case 4:
if val[3] < val[2] {
var t uint16 = val[3]
val[3] = val[2]
val[2] = t
}
table[0] = constructHuffmanCode(1, val[0])
table[1] = constructHuffmanCode(2, val[1])
table[2] = constructHuffmanCode(1, val[0])
table[3] = constructHuffmanCode(3, val[2])
table[4] = constructHuffmanCode(1, val[0])
table[5] = constructHuffmanCode(2, val[1])
table[6] = constructHuffmanCode(1, val[0])
table[7] = constructHuffmanCode(3, val[3])
table_size = 8
}
for table_size != goal_size {
copy(table[table_size:], table[:uint(table_size)])
table_size <<= 1
}
return goal_size
}

182
vendor/github.com/andybalholm/brotli/literal_cost.go generated vendored Normal file
View File

@ -0,0 +1,182 @@
package brotli
func utf8Position(last uint, c uint, clamp uint) uint {
if c < 128 {
return 0 /* Next one is the 'Byte 1' again. */
} else if c >= 192 { /* Next one is the 'Byte 2' of utf-8 encoding. */
return brotli_min_size_t(1, clamp)
} else {
/* Let's decide over the last byte if this ends the sequence. */
if last < 0xE0 {
return 0 /* Completed two or three byte coding. */ /* Next one is the 'Byte 3' of utf-8 encoding. */
} else {
return brotli_min_size_t(2, clamp)
}
}
}
func decideMultiByteStatsLevel(pos uint, len uint, mask uint, data []byte) uint {
var counts = [3]uint{0} /* should be 2, but 1 compresses better. */
var max_utf8 uint = 1
var last_c uint = 0
var i uint
for i = 0; i < len; i++ {
var c uint = uint(data[(pos+i)&mask])
counts[utf8Position(last_c, c, 2)]++
last_c = c
}
if counts[2] < 500 {
max_utf8 = 1
}
if counts[1]+counts[2] < 25 {
max_utf8 = 0
}
return max_utf8
}
func estimateBitCostsForLiteralsUTF8(pos uint, len uint, mask uint, data []byte, cost []float32) {
var max_utf8 uint = decideMultiByteStatsLevel(pos, uint(len), mask, data)
/* Bootstrap histograms. */
var histogram = [3][256]uint{[256]uint{0}}
var window_half uint = 495
var in_window uint = brotli_min_size_t(window_half, uint(len))
var in_window_utf8 = [3]uint{0}
/* max_utf8 is 0 (normal ASCII single byte modeling),
1 (for 2-byte UTF-8 modeling), or 2 (for 3-byte UTF-8 modeling). */
var i uint
{
var last_c uint = 0
var utf8_pos uint = 0
for i = 0; i < in_window; i++ {
var c uint = uint(data[(pos+i)&mask])
histogram[utf8_pos][c]++
in_window_utf8[utf8_pos]++
utf8_pos = utf8Position(last_c, c, max_utf8)
last_c = c
}
}
/* Compute bit costs with sliding window. */
for i = 0; i < len; i++ {
if i >= window_half {
var c uint
var last_c uint
if i < window_half+1 {
c = 0
} else {
c = uint(data[(pos+i-window_half-1)&mask])
}
if i < window_half+2 {
last_c = 0
} else {
last_c = uint(data[(pos+i-window_half-2)&mask])
}
/* Remove a byte in the past. */
var utf8_pos2 uint = utf8Position(last_c, c, max_utf8)
histogram[utf8_pos2][data[(pos+i-window_half)&mask]]--
in_window_utf8[utf8_pos2]--
}
if i+window_half < len {
var c uint = uint(data[(pos+i+window_half-1)&mask])
var last_c uint = uint(data[(pos+i+window_half-2)&mask])
/* Add a byte in the future. */
var utf8_pos2 uint = utf8Position(last_c, c, max_utf8)
histogram[utf8_pos2][data[(pos+i+window_half)&mask]]++
in_window_utf8[utf8_pos2]++
}
{
var c uint
var last_c uint
if i < 1 {
c = 0
} else {
c = uint(data[(pos+i-1)&mask])
}
if i < 2 {
last_c = 0
} else {
last_c = uint(data[(pos+i-2)&mask])
}
var utf8_pos uint = utf8Position(last_c, c, max_utf8)
var masked_pos uint = (pos + i) & mask
var histo uint = histogram[utf8_pos][data[masked_pos]]
var lit_cost float64
if histo == 0 {
histo = 1
}
lit_cost = fastLog2(in_window_utf8[utf8_pos]) - fastLog2(histo)
lit_cost += 0.02905
if lit_cost < 1.0 {
lit_cost *= 0.5
lit_cost += 0.5
}
/* Make the first bytes more expensive -- seems to help, not sure why.
Perhaps because the entropy source is changing its properties
rapidly in the beginning of the file, perhaps because the beginning
of the data is a statistical "anomaly". */
if i < 2000 {
lit_cost += 0.7 - (float64(2000-i) / 2000.0 * 0.35)
}
cost[i] = float32(lit_cost)
}
}
}
func estimateBitCostsForLiterals(pos uint, len uint, mask uint, data []byte, cost []float32) {
if isMostlyUTF8(data, pos, mask, uint(len), kMinUTF8Ratio) {
estimateBitCostsForLiteralsUTF8(pos, uint(len), mask, data, cost)
return
} else {
var histogram = [256]uint{0}
var window_half uint = 2000
var in_window uint = brotli_min_size_t(window_half, uint(len))
var i uint
/* Bootstrap histogram. */
for i = 0; i < in_window; i++ {
histogram[data[(pos+i)&mask]]++
}
/* Compute bit costs with sliding window. */
for i = 0; i < len; i++ {
var histo uint
if i >= window_half {
/* Remove a byte in the past. */
histogram[data[(pos+i-window_half)&mask]]--
in_window--
}
if i+window_half < len {
/* Add a byte in the future. */
histogram[data[(pos+i+window_half)&mask]]++
in_window++
}
histo = histogram[data[(pos+i)&mask]]
if histo == 0 {
histo = 1
}
{
var lit_cost float64 = fastLog2(in_window) - fastLog2(histo)
lit_cost += 0.029
if lit_cost < 1.0 {
lit_cost *= 0.5
lit_cost += 0.5
}
cost[i] = float32(lit_cost)
}
}
}
}

66
vendor/github.com/andybalholm/brotli/memory.go generated vendored Normal file
View File

@ -0,0 +1,66 @@
package brotli
/* Copyright 2016 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/*
Dynamically grows array capacity to at least the requested size
T: data type
A: array
C: capacity
R: requested size
*/
func brotli_ensure_capacity_uint8_t(a *[]byte, c *uint, r uint) {
if *c < r {
var new_size uint = *c
if new_size == 0 {
new_size = r
}
for new_size < r {
new_size *= 2
}
if cap(*a) < int(new_size) {
var new_array []byte = make([]byte, new_size)
if *c != 0 {
copy(new_array, (*a)[:*c])
}
*a = new_array
} else {
*a = (*a)[:new_size]
}
*c = new_size
}
}
func brotli_ensure_capacity_uint32_t(a *[]uint32, c *uint, r uint) {
var new_array []uint32
if *c < r {
var new_size uint = *c
if new_size == 0 {
new_size = r
}
for new_size < r {
new_size *= 2
}
if cap(*a) < int(new_size) {
new_array = make([]uint32, new_size)
if *c != 0 {
copy(new_array, (*a)[:*c])
}
*a = new_array
} else {
*a = (*a)[:new_size]
}
*c = new_size
}
}

574
vendor/github.com/andybalholm/brotli/metablock.go generated vendored Normal file
View File

@ -0,0 +1,574 @@
package brotli
import (
"sync"
)
/* Copyright 2014 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Algorithms for distributing the literals and commands of a metablock between
block types and contexts. */
type metaBlockSplit struct {
literal_split blockSplit
command_split blockSplit
distance_split blockSplit
literal_context_map []uint32
literal_context_map_size uint
distance_context_map []uint32
distance_context_map_size uint
literal_histograms []histogramLiteral
literal_histograms_size uint
command_histograms []histogramCommand
command_histograms_size uint
distance_histograms []histogramDistance
distance_histograms_size uint
}
var metaBlockPool sync.Pool
func getMetaBlockSplit() *metaBlockSplit {
mb, _ := metaBlockPool.Get().(*metaBlockSplit)
if mb == nil {
mb = &metaBlockSplit{}
} else {
initBlockSplit(&mb.literal_split)
initBlockSplit(&mb.command_split)
initBlockSplit(&mb.distance_split)
mb.literal_context_map = mb.literal_context_map[:0]
mb.literal_context_map_size = 0
mb.distance_context_map = mb.distance_context_map[:0]
mb.distance_context_map_size = 0
mb.literal_histograms = mb.literal_histograms[:0]
mb.command_histograms = mb.command_histograms[:0]
mb.distance_histograms = mb.distance_histograms[:0]
}
return mb
}
func freeMetaBlockSplit(mb *metaBlockSplit) {
metaBlockPool.Put(mb)
}
func initDistanceParams(params *encoderParams, npostfix uint32, ndirect uint32) {
var dist_params *distanceParams = &params.dist
var alphabet_size uint32
var max_distance uint32
dist_params.distance_postfix_bits = npostfix
dist_params.num_direct_distance_codes = ndirect
alphabet_size = uint32(distanceAlphabetSize(uint(npostfix), uint(ndirect), maxDistanceBits))
max_distance = ndirect + (1 << (maxDistanceBits + npostfix + 2)) - (1 << (npostfix + 2))
if params.large_window {
var bound = [maxNpostfix + 1]uint32{0, 4, 12, 28}
var postfix uint32 = 1 << npostfix
alphabet_size = uint32(distanceAlphabetSize(uint(npostfix), uint(ndirect), largeMaxDistanceBits))
/* The maximum distance is set so that no distance symbol used can encode
a distance larger than BROTLI_MAX_ALLOWED_DISTANCE with all
its extra bits set. */
if ndirect < bound[npostfix] {
max_distance = maxAllowedDistance - (bound[npostfix] - ndirect)
} else if ndirect >= bound[npostfix]+postfix {
max_distance = (3 << 29) - 4 + (ndirect - bound[npostfix])
} else {
max_distance = maxAllowedDistance
}
}
dist_params.alphabet_size = alphabet_size
dist_params.max_distance = uint(max_distance)
}
func recomputeDistancePrefixes(cmds []command, orig_params *distanceParams, new_params *distanceParams) {
if orig_params.distance_postfix_bits == new_params.distance_postfix_bits && orig_params.num_direct_distance_codes == new_params.num_direct_distance_codes {
return
}
for i := range cmds {
var cmd *command = &cmds[i]
if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 {
prefixEncodeCopyDistance(uint(commandRestoreDistanceCode(cmd, orig_params)), uint(new_params.num_direct_distance_codes), uint(new_params.distance_postfix_bits), &cmd.dist_prefix_, &cmd.dist_extra_)
}
}
}
func computeDistanceCost(cmds []command, orig_params *distanceParams, new_params *distanceParams, cost *float64) bool {
var equal_params bool = false
var dist_prefix uint16
var dist_extra uint32
var extra_bits float64 = 0.0
var histo histogramDistance
histogramClearDistance(&histo)
if orig_params.distance_postfix_bits == new_params.distance_postfix_bits && orig_params.num_direct_distance_codes == new_params.num_direct_distance_codes {
equal_params = true
}
for i := range cmds {
cmd := &cmds[i]
if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 {
if equal_params {
dist_prefix = cmd.dist_prefix_
} else {
var distance uint32 = commandRestoreDistanceCode(cmd, orig_params)
if distance > uint32(new_params.max_distance) {
return false
}
prefixEncodeCopyDistance(uint(distance), uint(new_params.num_direct_distance_codes), uint(new_params.distance_postfix_bits), &dist_prefix, &dist_extra)
}
histogramAddDistance(&histo, uint(dist_prefix)&0x3FF)
extra_bits += float64(dist_prefix >> 10)
}
}
*cost = populationCostDistance(&histo) + extra_bits
return true
}
var buildMetaBlock_kMaxNumberOfHistograms uint = 256
func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParams, prev_byte byte, prev_byte2 byte, cmds []command, literal_context_mode int, mb *metaBlockSplit) {
var distance_histograms []histogramDistance
var literal_histograms []histogramLiteral
var literal_context_modes []int = nil
var literal_histograms_size uint
var distance_histograms_size uint
var i uint
var literal_context_multiplier uint = 1
var npostfix uint32
var ndirect_msb uint32 = 0
var check_orig bool = true
var best_dist_cost float64 = 1e99
var orig_params encoderParams = *params
/* Histogram ids need to fit in one byte. */
var new_params encoderParams = *params
for npostfix = 0; npostfix <= maxNpostfix; npostfix++ {
for ; ndirect_msb < 16; ndirect_msb++ {
var ndirect uint32 = ndirect_msb << npostfix
var skip bool
var dist_cost float64
initDistanceParams(&new_params, npostfix, ndirect)
if npostfix == orig_params.dist.distance_postfix_bits && ndirect == orig_params.dist.num_direct_distance_codes {
check_orig = false
}
skip = !computeDistanceCost(cmds, &orig_params.dist, &new_params.dist, &dist_cost)
if skip || (dist_cost > best_dist_cost) {
break
}
best_dist_cost = dist_cost
params.dist = new_params.dist
}
if ndirect_msb > 0 {
ndirect_msb--
}
ndirect_msb /= 2
}
if check_orig {
var dist_cost float64
computeDistanceCost(cmds, &orig_params.dist, &orig_params.dist, &dist_cost)
if dist_cost < best_dist_cost {
/* NB: currently unused; uncomment when more param tuning is added. */
/* best_dist_cost = dist_cost; */
params.dist = orig_params.dist
}
}
recomputeDistancePrefixes(cmds, &orig_params.dist, &params.dist)
splitBlock(cmds, ringbuffer, pos, mask, params, &mb.literal_split, &mb.command_split, &mb.distance_split)
if !params.disable_literal_context_modeling {
literal_context_multiplier = 1 << literalContextBits
literal_context_modes = make([]int, (mb.literal_split.num_types))
for i = 0; i < mb.literal_split.num_types; i++ {
literal_context_modes[i] = literal_context_mode
}
}
literal_histograms_size = mb.literal_split.num_types * literal_context_multiplier
literal_histograms = make([]histogramLiteral, literal_histograms_size)
clearHistogramsLiteral(literal_histograms, literal_histograms_size)
distance_histograms_size = mb.distance_split.num_types << distanceContextBits
distance_histograms = make([]histogramDistance, distance_histograms_size)
clearHistogramsDistance(distance_histograms, distance_histograms_size)
mb.command_histograms_size = mb.command_split.num_types
if cap(mb.command_histograms) < int(mb.command_histograms_size) {
mb.command_histograms = make([]histogramCommand, (mb.command_histograms_size))
} else {
mb.command_histograms = mb.command_histograms[:mb.command_histograms_size]
}
clearHistogramsCommand(mb.command_histograms, mb.command_histograms_size)
buildHistogramsWithContext(cmds, &mb.literal_split, &mb.command_split, &mb.distance_split, ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_modes, literal_histograms, mb.command_histograms, distance_histograms)
literal_context_modes = nil
mb.literal_context_map_size = mb.literal_split.num_types << literalContextBits
if cap(mb.literal_context_map) < int(mb.literal_context_map_size) {
mb.literal_context_map = make([]uint32, (mb.literal_context_map_size))
} else {
mb.literal_context_map = mb.literal_context_map[:mb.literal_context_map_size]
}
mb.literal_histograms_size = mb.literal_context_map_size
if cap(mb.literal_histograms) < int(mb.literal_histograms_size) {
mb.literal_histograms = make([]histogramLiteral, (mb.literal_histograms_size))
} else {
mb.literal_histograms = mb.literal_histograms[:mb.literal_histograms_size]
}
clusterHistogramsLiteral(literal_histograms, literal_histograms_size, buildMetaBlock_kMaxNumberOfHistograms, mb.literal_histograms, &mb.literal_histograms_size, mb.literal_context_map)
literal_histograms = nil
if params.disable_literal_context_modeling {
/* Distribute assignment to all contexts. */
for i = mb.literal_split.num_types; i != 0; {
var j uint = 0
i--
for ; j < 1<<literalContextBits; j++ {
mb.literal_context_map[(i<<literalContextBits)+j] = mb.literal_context_map[i]
}
}
}
mb.distance_context_map_size = mb.distance_split.num_types << distanceContextBits
if cap(mb.distance_context_map) < int(mb.distance_context_map_size) {
mb.distance_context_map = make([]uint32, (mb.distance_context_map_size))
} else {
mb.distance_context_map = mb.distance_context_map[:mb.distance_context_map_size]
}
mb.distance_histograms_size = mb.distance_context_map_size
if cap(mb.distance_histograms) < int(mb.distance_histograms_size) {
mb.distance_histograms = make([]histogramDistance, (mb.distance_histograms_size))
} else {
mb.distance_histograms = mb.distance_histograms[:mb.distance_histograms_size]
}
clusterHistogramsDistance(distance_histograms, mb.distance_context_map_size, buildMetaBlock_kMaxNumberOfHistograms, mb.distance_histograms, &mb.distance_histograms_size, mb.distance_context_map)
distance_histograms = nil
}
const maxStaticContexts = 13
/* Greedy block splitter for one block category (literal, command or distance).
Gathers histograms for all context buckets. */
type contextBlockSplitter struct {
alphabet_size_ uint
num_contexts_ uint
max_block_types_ uint
min_block_size_ uint
split_threshold_ float64
num_blocks_ uint
split_ *blockSplit
histograms_ []histogramLiteral
histograms_size_ *uint
target_block_size_ uint
block_size_ uint
curr_histogram_ix_ uint
last_histogram_ix_ [2]uint
last_entropy_ [2 * maxStaticContexts]float64
merge_last_count_ uint
}
func initContextBlockSplitter(self *contextBlockSplitter, alphabet_size uint, num_contexts uint, min_block_size uint, split_threshold float64, num_symbols uint, split *blockSplit, histograms *[]histogramLiteral, histograms_size *uint) {
var max_num_blocks uint = num_symbols/min_block_size + 1
var max_num_types uint
assert(num_contexts <= maxStaticContexts)
self.alphabet_size_ = alphabet_size
self.num_contexts_ = num_contexts
self.max_block_types_ = maxNumberOfBlockTypes / num_contexts
self.min_block_size_ = min_block_size
self.split_threshold_ = split_threshold
self.num_blocks_ = 0
self.split_ = split
self.histograms_size_ = histograms_size
self.target_block_size_ = min_block_size
self.block_size_ = 0
self.curr_histogram_ix_ = 0
self.merge_last_count_ = 0
/* We have to allocate one more histogram than the maximum number of block
types for the current histogram when the meta-block is too big. */
max_num_types = brotli_min_size_t(max_num_blocks, self.max_block_types_+1)
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
split.num_blocks = max_num_blocks
*histograms_size = max_num_types * num_contexts
if histograms == nil || cap(*histograms) < int(*histograms_size) {
*histograms = make([]histogramLiteral, (*histograms_size))
} else {
*histograms = (*histograms)[:*histograms_size]
}
self.histograms_ = *histograms
/* Clear only current histogram. */
clearHistogramsLiteral(self.histograms_[0:], num_contexts)
self.last_histogram_ix_[1] = 0
self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
}
/* Does either of three things:
(1) emits the current block with a new block type;
(2) emits the current block with the type of the second last block;
(3) merges the current block with the last block. */
func contextBlockSplitterFinishBlock(self *contextBlockSplitter, is_final bool) {
var split *blockSplit = self.split_
var num_contexts uint = self.num_contexts_
var last_entropy []float64 = self.last_entropy_[:]
var histograms []histogramLiteral = self.histograms_
if self.block_size_ < self.min_block_size_ {
self.block_size_ = self.min_block_size_
}
if self.num_blocks_ == 0 {
var i uint
/* Create first block. */
split.lengths[0] = uint32(self.block_size_)
split.types[0] = 0
for i = 0; i < num_contexts; i++ {
last_entropy[i] = bitsEntropy(histograms[i].data_[:], self.alphabet_size_)
last_entropy[num_contexts+i] = last_entropy[i]
}
self.num_blocks_++
split.num_types++
self.curr_histogram_ix_ += num_contexts
if self.curr_histogram_ix_ < *self.histograms_size_ {
clearHistogramsLiteral(self.histograms_[self.curr_histogram_ix_:], self.num_contexts_)
}
self.block_size_ = 0
} else if self.block_size_ > 0 {
var entropy [maxStaticContexts]float64
var combined_histo []histogramLiteral = make([]histogramLiteral, (2 * num_contexts))
var combined_entropy [2 * maxStaticContexts]float64
var diff = [2]float64{0.0}
/* Try merging the set of histograms for the current block type with the
respective set of histograms for the last and second last block types.
Decide over the split based on the total reduction of entropy across
all contexts. */
var i uint
for i = 0; i < num_contexts; i++ {
var curr_histo_ix uint = self.curr_histogram_ix_ + i
var j uint
entropy[i] = bitsEntropy(histograms[curr_histo_ix].data_[:], self.alphabet_size_)
for j = 0; j < 2; j++ {
var jx uint = j*num_contexts + i
var last_histogram_ix uint = self.last_histogram_ix_[j] + i
combined_histo[jx] = histograms[curr_histo_ix]
histogramAddHistogramLiteral(&combined_histo[jx], &histograms[last_histogram_ix])
combined_entropy[jx] = bitsEntropy(combined_histo[jx].data_[0:], self.alphabet_size_)
diff[j] += combined_entropy[jx] - entropy[i] - last_entropy[jx]
}
}
if split.num_types < self.max_block_types_ && diff[0] > self.split_threshold_ && diff[1] > self.split_threshold_ {
/* Create new block. */
split.lengths[self.num_blocks_] = uint32(self.block_size_)
split.types[self.num_blocks_] = byte(split.num_types)
self.last_histogram_ix_[1] = self.last_histogram_ix_[0]
self.last_histogram_ix_[0] = split.num_types * num_contexts
for i = 0; i < num_contexts; i++ {
last_entropy[num_contexts+i] = last_entropy[i]
last_entropy[i] = entropy[i]
}
self.num_blocks_++
split.num_types++
self.curr_histogram_ix_ += num_contexts
if self.curr_histogram_ix_ < *self.histograms_size_ {
clearHistogramsLiteral(self.histograms_[self.curr_histogram_ix_:], self.num_contexts_)
}
self.block_size_ = 0
self.merge_last_count_ = 0
self.target_block_size_ = self.min_block_size_
} else if diff[1] < diff[0]-20.0 {
split.lengths[self.num_blocks_] = uint32(self.block_size_)
split.types[self.num_blocks_] = split.types[self.num_blocks_-2]
/* Combine this block with second last block. */
var tmp uint = self.last_histogram_ix_[0]
self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
self.last_histogram_ix_[1] = tmp
for i = 0; i < num_contexts; i++ {
histograms[self.last_histogram_ix_[0]+i] = combined_histo[num_contexts+i]
last_entropy[num_contexts+i] = last_entropy[i]
last_entropy[i] = combined_entropy[num_contexts+i]
histogramClearLiteral(&histograms[self.curr_histogram_ix_+i])
}
self.num_blocks_++
self.block_size_ = 0
self.merge_last_count_ = 0
self.target_block_size_ = self.min_block_size_
} else {
/* Combine this block with last block. */
split.lengths[self.num_blocks_-1] += uint32(self.block_size_)
for i = 0; i < num_contexts; i++ {
histograms[self.last_histogram_ix_[0]+i] = combined_histo[i]
last_entropy[i] = combined_entropy[i]
if split.num_types == 1 {
last_entropy[num_contexts+i] = last_entropy[i]
}
histogramClearLiteral(&histograms[self.curr_histogram_ix_+i])
}
self.block_size_ = 0
self.merge_last_count_++
if self.merge_last_count_ > 1 {
self.target_block_size_ += self.min_block_size_
}
}
combined_histo = nil
}
if is_final {
*self.histograms_size_ = split.num_types * num_contexts
split.num_blocks = self.num_blocks_
}
}
/* Adds the next symbol to the current block type and context. When the
current block reaches the target size, decides on merging the block. */
func contextBlockSplitterAddSymbol(self *contextBlockSplitter, symbol uint, context uint) {
histogramAddLiteral(&self.histograms_[self.curr_histogram_ix_+context], symbol)
self.block_size_++
if self.block_size_ == self.target_block_size_ {
contextBlockSplitterFinishBlock(self, false) /* is_final = */
}
}
func mapStaticContexts(num_contexts uint, static_context_map []uint32, mb *metaBlockSplit) {
var i uint
mb.literal_context_map_size = mb.literal_split.num_types << literalContextBits
if cap(mb.literal_context_map) < int(mb.literal_context_map_size) {
mb.literal_context_map = make([]uint32, (mb.literal_context_map_size))
} else {
mb.literal_context_map = mb.literal_context_map[:mb.literal_context_map_size]
}
for i = 0; i < mb.literal_split.num_types; i++ {
var offset uint32 = uint32(i * num_contexts)
var j uint
for j = 0; j < 1<<literalContextBits; j++ {
mb.literal_context_map[(i<<literalContextBits)+j] = offset + static_context_map[j]
}
}
}
func buildMetaBlockGreedyInternal(ringbuffer []byte, pos uint, mask uint, prev_byte byte, prev_byte2 byte, literal_context_lut contextLUT, num_contexts uint, static_context_map []uint32, commands []command, mb *metaBlockSplit) {
var lit_blocks struct {
plain blockSplitterLiteral
ctx contextBlockSplitter
}
var cmd_blocks blockSplitterCommand
var dist_blocks blockSplitterDistance
var num_literals uint = 0
for i := range commands {
num_literals += uint(commands[i].insert_len_)
}
if num_contexts == 1 {
initBlockSplitterLiteral(&lit_blocks.plain, 256, 512, 400.0, num_literals, &mb.literal_split, &mb.literal_histograms, &mb.literal_histograms_size)
} else {
initContextBlockSplitter(&lit_blocks.ctx, 256, num_contexts, 512, 400.0, num_literals, &mb.literal_split, &mb.literal_histograms, &mb.literal_histograms_size)
}
initBlockSplitterCommand(&cmd_blocks, numCommandSymbols, 1024, 500.0, uint(len(commands)), &mb.command_split, &mb.command_histograms, &mb.command_histograms_size)
initBlockSplitterDistance(&dist_blocks, 64, 512, 100.0, uint(len(commands)), &mb.distance_split, &mb.distance_histograms, &mb.distance_histograms_size)
for _, cmd := range commands {
var j uint
blockSplitterAddSymbolCommand(&cmd_blocks, uint(cmd.cmd_prefix_))
for j = uint(cmd.insert_len_); j != 0; j-- {
var literal byte = ringbuffer[pos&mask]
if num_contexts == 1 {
blockSplitterAddSymbolLiteral(&lit_blocks.plain, uint(literal))
} else {
var context uint = uint(getContext(prev_byte, prev_byte2, literal_context_lut))
contextBlockSplitterAddSymbol(&lit_blocks.ctx, uint(literal), uint(static_context_map[context]))
}
prev_byte2 = prev_byte
prev_byte = literal
pos++
}
pos += uint(commandCopyLen(&cmd))
if commandCopyLen(&cmd) != 0 {
prev_byte2 = ringbuffer[(pos-2)&mask]
prev_byte = ringbuffer[(pos-1)&mask]
if cmd.cmd_prefix_ >= 128 {
blockSplitterAddSymbolDistance(&dist_blocks, uint(cmd.dist_prefix_)&0x3FF)
}
}
}
if num_contexts == 1 {
blockSplitterFinishBlockLiteral(&lit_blocks.plain, true) /* is_final = */
} else {
contextBlockSplitterFinishBlock(&lit_blocks.ctx, true) /* is_final = */
}
blockSplitterFinishBlockCommand(&cmd_blocks, true) /* is_final = */
blockSplitterFinishBlockDistance(&dist_blocks, true) /* is_final = */
if num_contexts > 1 {
mapStaticContexts(num_contexts, static_context_map, mb)
}
}
func buildMetaBlockGreedy(ringbuffer []byte, pos uint, mask uint, prev_byte byte, prev_byte2 byte, literal_context_lut contextLUT, num_contexts uint, static_context_map []uint32, commands []command, mb *metaBlockSplit) {
if num_contexts == 1 {
buildMetaBlockGreedyInternal(ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_lut, 1, nil, commands, mb)
} else {
buildMetaBlockGreedyInternal(ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_lut, num_contexts, static_context_map, commands, mb)
}
}
func optimizeHistograms(num_distance_codes uint32, mb *metaBlockSplit) {
var good_for_rle [numCommandSymbols]byte
var i uint
for i = 0; i < mb.literal_histograms_size; i++ {
optimizeHuffmanCountsForRLE(256, mb.literal_histograms[i].data_[:], good_for_rle[:])
}
for i = 0; i < mb.command_histograms_size; i++ {
optimizeHuffmanCountsForRLE(numCommandSymbols, mb.command_histograms[i].data_[:], good_for_rle[:])
}
for i = 0; i < mb.distance_histograms_size; i++ {
optimizeHuffmanCountsForRLE(uint(num_distance_codes), mb.distance_histograms[i].data_[:], good_for_rle[:])
}
}

View File

@ -0,0 +1,165 @@
package brotli
/* Copyright 2015 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Greedy block splitter for one block category (literal, command or distance).
*/
type blockSplitterCommand struct {
alphabet_size_ uint
min_block_size_ uint
split_threshold_ float64
num_blocks_ uint
split_ *blockSplit
histograms_ []histogramCommand
histograms_size_ *uint
target_block_size_ uint
block_size_ uint
curr_histogram_ix_ uint
last_histogram_ix_ [2]uint
last_entropy_ [2]float64
merge_last_count_ uint
}
func initBlockSplitterCommand(self *blockSplitterCommand, alphabet_size uint, min_block_size uint, split_threshold float64, num_symbols uint, split *blockSplit, histograms *[]histogramCommand, histograms_size *uint) {
var max_num_blocks uint = num_symbols/min_block_size + 1
var max_num_types uint = brotli_min_size_t(max_num_blocks, maxNumberOfBlockTypes+1)
/* We have to allocate one more histogram than the maximum number of block
types for the current histogram when the meta-block is too big. */
self.alphabet_size_ = alphabet_size
self.min_block_size_ = min_block_size
self.split_threshold_ = split_threshold
self.num_blocks_ = 0
self.split_ = split
self.histograms_size_ = histograms_size
self.target_block_size_ = min_block_size
self.block_size_ = 0
self.curr_histogram_ix_ = 0
self.merge_last_count_ = 0
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
self.split_.num_blocks = max_num_blocks
*histograms_size = max_num_types
if histograms == nil || cap(*histograms) < int(*histograms_size) {
*histograms = make([]histogramCommand, (*histograms_size))
} else {
*histograms = (*histograms)[:*histograms_size]
}
self.histograms_ = *histograms
/* Clear only current histogram. */
histogramClearCommand(&self.histograms_[0])
self.last_histogram_ix_[1] = 0
self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
}
/* Does either of three things:
(1) emits the current block with a new block type;
(2) emits the current block with the type of the second last block;
(3) merges the current block with the last block. */
func blockSplitterFinishBlockCommand(self *blockSplitterCommand, is_final bool) {
var split *blockSplit = self.split_
var last_entropy []float64 = self.last_entropy_[:]
var histograms []histogramCommand = self.histograms_
self.block_size_ = brotli_max_size_t(self.block_size_, self.min_block_size_)
if self.num_blocks_ == 0 {
/* Create first block. */
split.lengths[0] = uint32(self.block_size_)
split.types[0] = 0
last_entropy[0] = bitsEntropy(histograms[0].data_[:], self.alphabet_size_)
last_entropy[1] = last_entropy[0]
self.num_blocks_++
split.num_types++
self.curr_histogram_ix_++
if self.curr_histogram_ix_ < *self.histograms_size_ {
histogramClearCommand(&histograms[self.curr_histogram_ix_])
}
self.block_size_ = 0
} else if self.block_size_ > 0 {
var entropy float64 = bitsEntropy(histograms[self.curr_histogram_ix_].data_[:], self.alphabet_size_)
var combined_histo [2]histogramCommand
var combined_entropy [2]float64
var diff [2]float64
var j uint
for j = 0; j < 2; j++ {
var last_histogram_ix uint = self.last_histogram_ix_[j]
combined_histo[j] = histograms[self.curr_histogram_ix_]
histogramAddHistogramCommand(&combined_histo[j], &histograms[last_histogram_ix])
combined_entropy[j] = bitsEntropy(combined_histo[j].data_[0:], self.alphabet_size_)
diff[j] = combined_entropy[j] - entropy - last_entropy[j]
}
if split.num_types < maxNumberOfBlockTypes && diff[0] > self.split_threshold_ && diff[1] > self.split_threshold_ {
/* Create new block. */
split.lengths[self.num_blocks_] = uint32(self.block_size_)
split.types[self.num_blocks_] = byte(split.num_types)
self.last_histogram_ix_[1] = self.last_histogram_ix_[0]
self.last_histogram_ix_[0] = uint(byte(split.num_types))
last_entropy[1] = last_entropy[0]
last_entropy[0] = entropy
self.num_blocks_++
split.num_types++
self.curr_histogram_ix_++
if self.curr_histogram_ix_ < *self.histograms_size_ {
histogramClearCommand(&histograms[self.curr_histogram_ix_])
}
self.block_size_ = 0
self.merge_last_count_ = 0
self.target_block_size_ = self.min_block_size_
} else if diff[1] < diff[0]-20.0 {
split.lengths[self.num_blocks_] = uint32(self.block_size_)
split.types[self.num_blocks_] = split.types[self.num_blocks_-2]
/* Combine this block with second last block. */
var tmp uint = self.last_histogram_ix_[0]
self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
self.last_histogram_ix_[1] = tmp
histograms[self.last_histogram_ix_[0]] = combined_histo[1]
last_entropy[1] = last_entropy[0]
last_entropy[0] = combined_entropy[1]
self.num_blocks_++
self.block_size_ = 0
histogramClearCommand(&histograms[self.curr_histogram_ix_])
self.merge_last_count_ = 0
self.target_block_size_ = self.min_block_size_
} else {
/* Combine this block with last block. */
split.lengths[self.num_blocks_-1] += uint32(self.block_size_)
histograms[self.last_histogram_ix_[0]] = combined_histo[0]
last_entropy[0] = combined_entropy[0]
if split.num_types == 1 {
last_entropy[1] = last_entropy[0]
}
self.block_size_ = 0
histogramClearCommand(&histograms[self.curr_histogram_ix_])
self.merge_last_count_++
if self.merge_last_count_ > 1 {
self.target_block_size_ += self.min_block_size_
}
}
}
if is_final {
*self.histograms_size_ = split.num_types
split.num_blocks = self.num_blocks_
}
}
/* Adds the next symbol to the current histogram. When the current histogram
reaches the target size, decides on merging the block. */
func blockSplitterAddSymbolCommand(self *blockSplitterCommand, symbol uint) {
histogramAddCommand(&self.histograms_[self.curr_histogram_ix_], symbol)
self.block_size_++
if self.block_size_ == self.target_block_size_ {
blockSplitterFinishBlockCommand(self, false) /* is_final = */
}
}

View File

@ -0,0 +1,165 @@
package brotli
/* Copyright 2015 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Greedy block splitter for one block category (literal, command or distance).
*/
type blockSplitterDistance struct {
alphabet_size_ uint
min_block_size_ uint
split_threshold_ float64
num_blocks_ uint
split_ *blockSplit
histograms_ []histogramDistance
histograms_size_ *uint
target_block_size_ uint
block_size_ uint
curr_histogram_ix_ uint
last_histogram_ix_ [2]uint
last_entropy_ [2]float64
merge_last_count_ uint
}
func initBlockSplitterDistance(self *blockSplitterDistance, alphabet_size uint, min_block_size uint, split_threshold float64, num_symbols uint, split *blockSplit, histograms *[]histogramDistance, histograms_size *uint) {
var max_num_blocks uint = num_symbols/min_block_size + 1
var max_num_types uint = brotli_min_size_t(max_num_blocks, maxNumberOfBlockTypes+1)
/* We have to allocate one more histogram than the maximum number of block
types for the current histogram when the meta-block is too big. */
self.alphabet_size_ = alphabet_size
self.min_block_size_ = min_block_size
self.split_threshold_ = split_threshold
self.num_blocks_ = 0
self.split_ = split
self.histograms_size_ = histograms_size
self.target_block_size_ = min_block_size
self.block_size_ = 0
self.curr_histogram_ix_ = 0
self.merge_last_count_ = 0
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
self.split_.num_blocks = max_num_blocks
*histograms_size = max_num_types
if histograms == nil || cap(*histograms) < int(*histograms_size) {
*histograms = make([]histogramDistance, *histograms_size)
} else {
*histograms = (*histograms)[:*histograms_size]
}
self.histograms_ = *histograms
/* Clear only current histogram. */
histogramClearDistance(&self.histograms_[0])
self.last_histogram_ix_[1] = 0
self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
}
/* Does either of three things:
(1) emits the current block with a new block type;
(2) emits the current block with the type of the second last block;
(3) merges the current block with the last block. */
func blockSplitterFinishBlockDistance(self *blockSplitterDistance, is_final bool) {
var split *blockSplit = self.split_
var last_entropy []float64 = self.last_entropy_[:]
var histograms []histogramDistance = self.histograms_
self.block_size_ = brotli_max_size_t(self.block_size_, self.min_block_size_)
if self.num_blocks_ == 0 {
/* Create first block. */
split.lengths[0] = uint32(self.block_size_)
split.types[0] = 0
last_entropy[0] = bitsEntropy(histograms[0].data_[:], self.alphabet_size_)
last_entropy[1] = last_entropy[0]
self.num_blocks_++
split.num_types++
self.curr_histogram_ix_++
if self.curr_histogram_ix_ < *self.histograms_size_ {
histogramClearDistance(&histograms[self.curr_histogram_ix_])
}
self.block_size_ = 0
} else if self.block_size_ > 0 {
var entropy float64 = bitsEntropy(histograms[self.curr_histogram_ix_].data_[:], self.alphabet_size_)
var combined_histo [2]histogramDistance
var combined_entropy [2]float64
var diff [2]float64
var j uint
for j = 0; j < 2; j++ {
var last_histogram_ix uint = self.last_histogram_ix_[j]
combined_histo[j] = histograms[self.curr_histogram_ix_]
histogramAddHistogramDistance(&combined_histo[j], &histograms[last_histogram_ix])
combined_entropy[j] = bitsEntropy(combined_histo[j].data_[0:], self.alphabet_size_)
diff[j] = combined_entropy[j] - entropy - last_entropy[j]
}
if split.num_types < maxNumberOfBlockTypes && diff[0] > self.split_threshold_ && diff[1] > self.split_threshold_ {
/* Create new block. */
split.lengths[self.num_blocks_] = uint32(self.block_size_)
split.types[self.num_blocks_] = byte(split.num_types)
self.last_histogram_ix_[1] = self.last_histogram_ix_[0]
self.last_histogram_ix_[0] = uint(byte(split.num_types))
last_entropy[1] = last_entropy[0]
last_entropy[0] = entropy
self.num_blocks_++
split.num_types++
self.curr_histogram_ix_++
if self.curr_histogram_ix_ < *self.histograms_size_ {
histogramClearDistance(&histograms[self.curr_histogram_ix_])
}
self.block_size_ = 0
self.merge_last_count_ = 0
self.target_block_size_ = self.min_block_size_
} else if diff[1] < diff[0]-20.0 {
split.lengths[self.num_blocks_] = uint32(self.block_size_)
split.types[self.num_blocks_] = split.types[self.num_blocks_-2]
/* Combine this block with second last block. */
var tmp uint = self.last_histogram_ix_[0]
self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
self.last_histogram_ix_[1] = tmp
histograms[self.last_histogram_ix_[0]] = combined_histo[1]
last_entropy[1] = last_entropy[0]
last_entropy[0] = combined_entropy[1]
self.num_blocks_++
self.block_size_ = 0
histogramClearDistance(&histograms[self.curr_histogram_ix_])
self.merge_last_count_ = 0
self.target_block_size_ = self.min_block_size_
} else {
/* Combine this block with last block. */
split.lengths[self.num_blocks_-1] += uint32(self.block_size_)
histograms[self.last_histogram_ix_[0]] = combined_histo[0]
last_entropy[0] = combined_entropy[0]
if split.num_types == 1 {
last_entropy[1] = last_entropy[0]
}
self.block_size_ = 0
histogramClearDistance(&histograms[self.curr_histogram_ix_])
self.merge_last_count_++
if self.merge_last_count_ > 1 {
self.target_block_size_ += self.min_block_size_
}
}
}
if is_final {
*self.histograms_size_ = split.num_types
split.num_blocks = self.num_blocks_
}
}
/* Adds the next symbol to the current histogram. When the current histogram
reaches the target size, decides on merging the block. */
func blockSplitterAddSymbolDistance(self *blockSplitterDistance, symbol uint) {
histogramAddDistance(&self.histograms_[self.curr_histogram_ix_], symbol)
self.block_size_++
if self.block_size_ == self.target_block_size_ {
blockSplitterFinishBlockDistance(self, false) /* is_final = */
}
}

View File

@ -0,0 +1,165 @@
package brotli
/* Copyright 2015 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Greedy block splitter for one block category (literal, command or distance).
*/
type blockSplitterLiteral struct {
alphabet_size_ uint
min_block_size_ uint
split_threshold_ float64
num_blocks_ uint
split_ *blockSplit
histograms_ []histogramLiteral
histograms_size_ *uint
target_block_size_ uint
block_size_ uint
curr_histogram_ix_ uint
last_histogram_ix_ [2]uint
last_entropy_ [2]float64
merge_last_count_ uint
}
func initBlockSplitterLiteral(self *blockSplitterLiteral, alphabet_size uint, min_block_size uint, split_threshold float64, num_symbols uint, split *blockSplit, histograms *[]histogramLiteral, histograms_size *uint) {
var max_num_blocks uint = num_symbols/min_block_size + 1
var max_num_types uint = brotli_min_size_t(max_num_blocks, maxNumberOfBlockTypes+1)
/* We have to allocate one more histogram than the maximum number of block
types for the current histogram when the meta-block is too big. */
self.alphabet_size_ = alphabet_size
self.min_block_size_ = min_block_size
self.split_threshold_ = split_threshold
self.num_blocks_ = 0
self.split_ = split
self.histograms_size_ = histograms_size
self.target_block_size_ = min_block_size
self.block_size_ = 0
self.curr_histogram_ix_ = 0
self.merge_last_count_ = 0
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
self.split_.num_blocks = max_num_blocks
*histograms_size = max_num_types
if histograms == nil || cap(*histograms) < int(*histograms_size) {
*histograms = make([]histogramLiteral, *histograms_size)
} else {
*histograms = (*histograms)[:*histograms_size]
}
self.histograms_ = *histograms
/* Clear only current histogram. */
histogramClearLiteral(&self.histograms_[0])
self.last_histogram_ix_[1] = 0
self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
}
/* Does either of three things:
(1) emits the current block with a new block type;
(2) emits the current block with the type of the second last block;
(3) merges the current block with the last block. */
func blockSplitterFinishBlockLiteral(self *blockSplitterLiteral, is_final bool) {
var split *blockSplit = self.split_
var last_entropy []float64 = self.last_entropy_[:]
var histograms []histogramLiteral = self.histograms_
self.block_size_ = brotli_max_size_t(self.block_size_, self.min_block_size_)
if self.num_blocks_ == 0 {
/* Create first block. */
split.lengths[0] = uint32(self.block_size_)
split.types[0] = 0
last_entropy[0] = bitsEntropy(histograms[0].data_[:], self.alphabet_size_)
last_entropy[1] = last_entropy[0]
self.num_blocks_++
split.num_types++
self.curr_histogram_ix_++
if self.curr_histogram_ix_ < *self.histograms_size_ {
histogramClearLiteral(&histograms[self.curr_histogram_ix_])
}
self.block_size_ = 0
} else if self.block_size_ > 0 {
var entropy float64 = bitsEntropy(histograms[self.curr_histogram_ix_].data_[:], self.alphabet_size_)
var combined_histo [2]histogramLiteral
var combined_entropy [2]float64
var diff [2]float64
var j uint
for j = 0; j < 2; j++ {
var last_histogram_ix uint = self.last_histogram_ix_[j]
combined_histo[j] = histograms[self.curr_histogram_ix_]
histogramAddHistogramLiteral(&combined_histo[j], &histograms[last_histogram_ix])
combined_entropy[j] = bitsEntropy(combined_histo[j].data_[0:], self.alphabet_size_)
diff[j] = combined_entropy[j] - entropy - last_entropy[j]
}
if split.num_types < maxNumberOfBlockTypes && diff[0] > self.split_threshold_ && diff[1] > self.split_threshold_ {
/* Create new block. */
split.lengths[self.num_blocks_] = uint32(self.block_size_)
split.types[self.num_blocks_] = byte(split.num_types)
self.last_histogram_ix_[1] = self.last_histogram_ix_[0]
self.last_histogram_ix_[0] = uint(byte(split.num_types))
last_entropy[1] = last_entropy[0]
last_entropy[0] = entropy
self.num_blocks_++
split.num_types++
self.curr_histogram_ix_++
if self.curr_histogram_ix_ < *self.histograms_size_ {
histogramClearLiteral(&histograms[self.curr_histogram_ix_])
}
self.block_size_ = 0
self.merge_last_count_ = 0
self.target_block_size_ = self.min_block_size_
} else if diff[1] < diff[0]-20.0 {
split.lengths[self.num_blocks_] = uint32(self.block_size_)
split.types[self.num_blocks_] = split.types[self.num_blocks_-2]
/* Combine this block with second last block. */
var tmp uint = self.last_histogram_ix_[0]
self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
self.last_histogram_ix_[1] = tmp
histograms[self.last_histogram_ix_[0]] = combined_histo[1]
last_entropy[1] = last_entropy[0]
last_entropy[0] = combined_entropy[1]
self.num_blocks_++
self.block_size_ = 0
histogramClearLiteral(&histograms[self.curr_histogram_ix_])
self.merge_last_count_ = 0
self.target_block_size_ = self.min_block_size_
} else {
/* Combine this block with last block. */
split.lengths[self.num_blocks_-1] += uint32(self.block_size_)
histograms[self.last_histogram_ix_[0]] = combined_histo[0]
last_entropy[0] = combined_entropy[0]
if split.num_types == 1 {
last_entropy[1] = last_entropy[0]
}
self.block_size_ = 0
histogramClearLiteral(&histograms[self.curr_histogram_ix_])
self.merge_last_count_++
if self.merge_last_count_ > 1 {
self.target_block_size_ += self.min_block_size_
}
}
}
if is_final {
*self.histograms_size_ = split.num_types
split.num_blocks = self.num_blocks_
}
}
/* Adds the next symbol to the current histogram. When the current histogram
reaches the target size, decides on merging the block. */
func blockSplitterAddSymbolLiteral(self *blockSplitterLiteral, symbol uint) {
histogramAddLiteral(&self.histograms_[self.curr_histogram_ix_], symbol)
self.block_size_++
if self.block_size_ == self.target_block_size_ {
blockSplitterFinishBlockLiteral(self, false) /* is_final = */
}
}

37
vendor/github.com/andybalholm/brotli/params.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
package brotli
/* Copyright 2017 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Parameters for the Brotli encoder with chosen quality levels. */
type hasherParams struct {
type_ int
bucket_bits int
block_bits int
hash_len int
num_last_distances_to_check int
}
type distanceParams struct {
distance_postfix_bits uint32
num_direct_distance_codes uint32
alphabet_size uint32
max_distance uint
}
/* Encoding parameters */
type encoderParams struct {
mode int
quality int
lgwin uint
lgblock int
size_hint uint
disable_literal_context_modeling bool
large_window bool
hasher hasherParams
dist distanceParams
dictionary encoderDictionary
}

103
vendor/github.com/andybalholm/brotli/platform.go generated vendored Normal file
View File

@ -0,0 +1,103 @@
package brotli
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
func brotli_min_double(a float64, b float64) float64 {
if a < b {
return a
} else {
return b
}
}
func brotli_max_double(a float64, b float64) float64 {
if a > b {
return a
} else {
return b
}
}
func brotli_min_float(a float32, b float32) float32 {
if a < b {
return a
} else {
return b
}
}
func brotli_max_float(a float32, b float32) float32 {
if a > b {
return a
} else {
return b
}
}
func brotli_min_int(a int, b int) int {
if a < b {
return a
} else {
return b
}
}
func brotli_max_int(a int, b int) int {
if a > b {
return a
} else {
return b
}
}
func brotli_min_size_t(a uint, b uint) uint {
if a < b {
return a
} else {
return b
}
}
func brotli_max_size_t(a uint, b uint) uint {
if a > b {
return a
} else {
return b
}
}
func brotli_min_uint32_t(a uint32, b uint32) uint32 {
if a < b {
return a
} else {
return b
}
}
func brotli_max_uint32_t(a uint32, b uint32) uint32 {
if a > b {
return a
} else {
return b
}
}
func brotli_min_uint8_t(a byte, b byte) byte {
if a < b {
return a
} else {
return b
}
}
func brotli_max_uint8_t(a byte, b byte) byte {
if a > b {
return a
} else {
return b
}
}

30
vendor/github.com/andybalholm/brotli/prefix.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package brotli
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Functions for encoding of integers into prefix codes the amount of extra
bits, and the actual values of the extra bits. */
/* Here distance_code is an intermediate code, i.e. one of the special codes or
the actual distance increased by BROTLI_NUM_DISTANCE_SHORT_CODES - 1. */
func prefixEncodeCopyDistance(distance_code uint, num_direct_codes uint, postfix_bits uint, code *uint16, extra_bits *uint32) {
if distance_code < numDistanceShortCodes+num_direct_codes {
*code = uint16(distance_code)
*extra_bits = 0
return
} else {
var dist uint = (uint(1) << (postfix_bits + 2)) + (distance_code - numDistanceShortCodes - num_direct_codes)
var bucket uint = uint(log2FloorNonZero(dist) - 1)
var postfix_mask uint = (1 << postfix_bits) - 1
var postfix uint = dist & postfix_mask
var prefix uint = (dist >> bucket) & 1
var offset uint = (2 + prefix) << bucket
var nbits uint = bucket - postfix_bits
*code = uint16(nbits<<10 | (numDistanceShortCodes + num_direct_codes + ((2*(nbits-1) + prefix) << postfix_bits) + postfix))
*extra_bits = uint32((dist - offset) >> postfix_bits)
}
}

723
vendor/github.com/andybalholm/brotli/prefix_dec.go generated vendored Normal file
View File

@ -0,0 +1,723 @@
package brotli
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
type cmdLutElement struct {
insert_len_extra_bits byte
copy_len_extra_bits byte
distance_code int8
context byte
insert_len_offset uint16
copy_len_offset uint16
}
var kCmdLut = [numCommandSymbols]cmdLutElement{
cmdLutElement{0x00, 0x00, 0, 0x00, 0x0000, 0x0002},
cmdLutElement{0x00, 0x00, 0, 0x01, 0x0000, 0x0003},
cmdLutElement{0x00, 0x00, 0, 0x02, 0x0000, 0x0004},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0000, 0x0005},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0000, 0x0006},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0000, 0x0007},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0000, 0x0008},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0000, 0x0009},
cmdLutElement{0x00, 0x00, 0, 0x00, 0x0001, 0x0002},
cmdLutElement{0x00, 0x00, 0, 0x01, 0x0001, 0x0003},
cmdLutElement{0x00, 0x00, 0, 0x02, 0x0001, 0x0004},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0001, 0x0005},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0001, 0x0006},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0001, 0x0007},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0001, 0x0008},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0001, 0x0009},
cmdLutElement{0x00, 0x00, 0, 0x00, 0x0002, 0x0002},
cmdLutElement{0x00, 0x00, 0, 0x01, 0x0002, 0x0003},
cmdLutElement{0x00, 0x00, 0, 0x02, 0x0002, 0x0004},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0002, 0x0005},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0002, 0x0006},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0002, 0x0007},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0002, 0x0008},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0002, 0x0009},
cmdLutElement{0x00, 0x00, 0, 0x00, 0x0003, 0x0002},
cmdLutElement{0x00, 0x00, 0, 0x01, 0x0003, 0x0003},
cmdLutElement{0x00, 0x00, 0, 0x02, 0x0003, 0x0004},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0003, 0x0005},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0003, 0x0006},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0003, 0x0007},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0003, 0x0008},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0003, 0x0009},
cmdLutElement{0x00, 0x00, 0, 0x00, 0x0004, 0x0002},
cmdLutElement{0x00, 0x00, 0, 0x01, 0x0004, 0x0003},
cmdLutElement{0x00, 0x00, 0, 0x02, 0x0004, 0x0004},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0004, 0x0005},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0004, 0x0006},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0004, 0x0007},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0004, 0x0008},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0004, 0x0009},
cmdLutElement{0x00, 0x00, 0, 0x00, 0x0005, 0x0002},
cmdLutElement{0x00, 0x00, 0, 0x01, 0x0005, 0x0003},
cmdLutElement{0x00, 0x00, 0, 0x02, 0x0005, 0x0004},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0005, 0x0005},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0005, 0x0006},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0005, 0x0007},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0005, 0x0008},
cmdLutElement{0x00, 0x00, 0, 0x03, 0x0005, 0x0009},
cmdLutElement{0x01, 0x00, 0, 0x00, 0x0006, 0x0002},
cmdLutElement{0x01, 0x00, 0, 0x01, 0x0006, 0x0003},
cmdLutElement{0x01, 0x00, 0, 0x02, 0x0006, 0x0004},
cmdLutElement{0x01, 0x00, 0, 0x03, 0x0006, 0x0005},
cmdLutElement{0x01, 0x00, 0, 0x03, 0x0006, 0x0006},
cmdLutElement{0x01, 0x00, 0, 0x03, 0x0006, 0x0007},
cmdLutElement{0x01, 0x00, 0, 0x03, 0x0006, 0x0008},
cmdLutElement{0x01, 0x00, 0, 0x03, 0x0006, 0x0009},
cmdLutElement{0x01, 0x00, 0, 0x00, 0x0008, 0x0002},
cmdLutElement{0x01, 0x00, 0, 0x01, 0x0008, 0x0003},
cmdLutElement{0x01, 0x00, 0, 0x02, 0x0008, 0x0004},
cmdLutElement{0x01, 0x00, 0, 0x03, 0x0008, 0x0005},
cmdLutElement{0x01, 0x00, 0, 0x03, 0x0008, 0x0006},
cmdLutElement{0x01, 0x00, 0, 0x03, 0x0008, 0x0007},
cmdLutElement{0x01, 0x00, 0, 0x03, 0x0008, 0x0008},
cmdLutElement{0x01, 0x00, 0, 0x03, 0x0008, 0x0009},
cmdLutElement{0x00, 0x01, 0, 0x03, 0x0000, 0x000a},
cmdLutElement{0x00, 0x01, 0, 0x03, 0x0000, 0x000c},
cmdLutElement{0x00, 0x02, 0, 0x03, 0x0000, 0x000e},
cmdLutElement{0x00, 0x02, 0, 0x03, 0x0000, 0x0012},
cmdLutElement{0x00, 0x03, 0, 0x03, 0x0000, 0x0016},
cmdLutElement{0x00, 0x03, 0, 0x03, 0x0000, 0x001e},
cmdLutElement{0x00, 0x04, 0, 0x03, 0x0000, 0x0026},
cmdLutElement{0x00, 0x04, 0, 0x03, 0x0000, 0x0036},
cmdLutElement{0x00, 0x01, 0, 0x03, 0x0001, 0x000a},
cmdLutElement{0x00, 0x01, 0, 0x03, 0x0001, 0x000c},
cmdLutElement{0x00, 0x02, 0, 0x03, 0x0001, 0x000e},
cmdLutElement{0x00, 0x02, 0, 0x03, 0x0001, 0x0012},
cmdLutElement{0x00, 0x03, 0, 0x03, 0x0001, 0x0016},
cmdLutElement{0x00, 0x03, 0, 0x03, 0x0001, 0x001e},
cmdLutElement{0x00, 0x04, 0, 0x03, 0x0001, 0x0026},
cmdLutElement{0x00, 0x04, 0, 0x03, 0x0001, 0x0036},
cmdLutElement{0x00, 0x01, 0, 0x03, 0x0002, 0x000a},
cmdLutElement{0x00, 0x01, 0, 0x03, 0x0002, 0x000c},
cmdLutElement{0x00, 0x02, 0, 0x03, 0x0002, 0x000e},
cmdLutElement{0x00, 0x02, 0, 0x03, 0x0002, 0x0012},
cmdLutElement{0x00, 0x03, 0, 0x03, 0x0002, 0x0016},
cmdLutElement{0x00, 0x03, 0, 0x03, 0x0002, 0x001e},
cmdLutElement{0x00, 0x04, 0, 0x03, 0x0002, 0x0026},
cmdLutElement{0x00, 0x04, 0, 0x03, 0x0002, 0x0036},
cmdLutElement{0x00, 0x01, 0, 0x03, 0x0003, 0x000a},
cmdLutElement{0x00, 0x01, 0, 0x03, 0x0003, 0x000c},
cmdLutElement{0x00, 0x02, 0, 0x03, 0x0003, 0x000e},
cmdLutElement{0x00, 0x02, 0, 0x03, 0x0003, 0x0012},
cmdLutElement{0x00, 0x03, 0, 0x03, 0x0003, 0x0016},
cmdLutElement{0x00, 0x03, 0, 0x03, 0x0003, 0x001e},
cmdLutElement{0x00, 0x04, 0, 0x03, 0x0003, 0x0026},
cmdLutElement{0x00, 0x04, 0, 0x03, 0x0003, 0x0036},
cmdLutElement{0x00, 0x01, 0, 0x03, 0x0004, 0x000a},
cmdLutElement{0x00, 0x01, 0, 0x03, 0x0004, 0x000c},
cmdLutElement{0x00, 0x02, 0, 0x03, 0x0004, 0x000e},
cmdLutElement{0x00, 0x02, 0, 0x03, 0x0004, 0x0012},
cmdLutElement{0x00, 0x03, 0, 0x03, 0x0004, 0x0016},
cmdLutElement{0x00, 0x03, 0, 0x03, 0x0004, 0x001e},
cmdLutElement{0x00, 0x04, 0, 0x03, 0x0004, 0x0026},
cmdLutElement{0x00, 0x04, 0, 0x03, 0x0004, 0x0036},
cmdLutElement{0x00, 0x01, 0, 0x03, 0x0005, 0x000a},
cmdLutElement{0x00, 0x01, 0, 0x03, 0x0005, 0x000c},
cmdLutElement{0x00, 0x02, 0, 0x03, 0x0005, 0x000e},
cmdLutElement{0x00, 0x02, 0, 0x03, 0x0005, 0x0012},
cmdLutElement{0x00, 0x03, 0, 0x03, 0x0005, 0x0016},
cmdLutElement{0x00, 0x03, 0, 0x03, 0x0005, 0x001e},
cmdLutElement{0x00, 0x04, 0, 0x03, 0x0005, 0x0026},
cmdLutElement{0x00, 0x04, 0, 0x03, 0x0005, 0x0036},
cmdLutElement{0x01, 0x01, 0, 0x03, 0x0006, 0x000a},
cmdLutElement{0x01, 0x01, 0, 0x03, 0x0006, 0x000c},
cmdLutElement{0x01, 0x02, 0, 0x03, 0x0006, 0x000e},
cmdLutElement{0x01, 0x02, 0, 0x03, 0x0006, 0x0012},
cmdLutElement{0x01, 0x03, 0, 0x03, 0x0006, 0x0016},
cmdLutElement{0x01, 0x03, 0, 0x03, 0x0006, 0x001e},
cmdLutElement{0x01, 0x04, 0, 0x03, 0x0006, 0x0026},
cmdLutElement{0x01, 0x04, 0, 0x03, 0x0006, 0x0036},
cmdLutElement{0x01, 0x01, 0, 0x03, 0x0008, 0x000a},
cmdLutElement{0x01, 0x01, 0, 0x03, 0x0008, 0x000c},
cmdLutElement{0x01, 0x02, 0, 0x03, 0x0008, 0x000e},
cmdLutElement{0x01, 0x02, 0, 0x03, 0x0008, 0x0012},
cmdLutElement{0x01, 0x03, 0, 0x03, 0x0008, 0x0016},
cmdLutElement{0x01, 0x03, 0, 0x03, 0x0008, 0x001e},
cmdLutElement{0x01, 0x04, 0, 0x03, 0x0008, 0x0026},
cmdLutElement{0x01, 0x04, 0, 0x03, 0x0008, 0x0036},
cmdLutElement{0x00, 0x00, -1, 0x00, 0x0000, 0x0002},
cmdLutElement{0x00, 0x00, -1, 0x01, 0x0000, 0x0003},
cmdLutElement{0x00, 0x00, -1, 0x02, 0x0000, 0x0004},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0000, 0x0005},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0000, 0x0006},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0000, 0x0007},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0000, 0x0008},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0000, 0x0009},
cmdLutElement{0x00, 0x00, -1, 0x00, 0x0001, 0x0002},
cmdLutElement{0x00, 0x00, -1, 0x01, 0x0001, 0x0003},
cmdLutElement{0x00, 0x00, -1, 0x02, 0x0001, 0x0004},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0001, 0x0005},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0001, 0x0006},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0001, 0x0007},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0001, 0x0008},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0001, 0x0009},
cmdLutElement{0x00, 0x00, -1, 0x00, 0x0002, 0x0002},
cmdLutElement{0x00, 0x00, -1, 0x01, 0x0002, 0x0003},
cmdLutElement{0x00, 0x00, -1, 0x02, 0x0002, 0x0004},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0002, 0x0005},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0002, 0x0006},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0002, 0x0007},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0002, 0x0008},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0002, 0x0009},
cmdLutElement{0x00, 0x00, -1, 0x00, 0x0003, 0x0002},
cmdLutElement{0x00, 0x00, -1, 0x01, 0x0003, 0x0003},
cmdLutElement{0x00, 0x00, -1, 0x02, 0x0003, 0x0004},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0003, 0x0005},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0003, 0x0006},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0003, 0x0007},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0003, 0x0008},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0003, 0x0009},
cmdLutElement{0x00, 0x00, -1, 0x00, 0x0004, 0x0002},
cmdLutElement{0x00, 0x00, -1, 0x01, 0x0004, 0x0003},
cmdLutElement{0x00, 0x00, -1, 0x02, 0x0004, 0x0004},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0004, 0x0005},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0004, 0x0006},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0004, 0x0007},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0004, 0x0008},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0004, 0x0009},
cmdLutElement{0x00, 0x00, -1, 0x00, 0x0005, 0x0002},
cmdLutElement{0x00, 0x00, -1, 0x01, 0x0005, 0x0003},
cmdLutElement{0x00, 0x00, -1, 0x02, 0x0005, 0x0004},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0005, 0x0005},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0005, 0x0006},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0005, 0x0007},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0005, 0x0008},
cmdLutElement{0x00, 0x00, -1, 0x03, 0x0005, 0x0009},
cmdLutElement{0x01, 0x00, -1, 0x00, 0x0006, 0x0002},
cmdLutElement{0x01, 0x00, -1, 0x01, 0x0006, 0x0003},
cmdLutElement{0x01, 0x00, -1, 0x02, 0x0006, 0x0004},
cmdLutElement{0x01, 0x00, -1, 0x03, 0x0006, 0x0005},
cmdLutElement{0x01, 0x00, -1, 0x03, 0x0006, 0x0006},
cmdLutElement{0x01, 0x00, -1, 0x03, 0x0006, 0x0007},
cmdLutElement{0x01, 0x00, -1, 0x03, 0x0006, 0x0008},
cmdLutElement{0x01, 0x00, -1, 0x03, 0x0006, 0x0009},
cmdLutElement{0x01, 0x00, -1, 0x00, 0x0008, 0x0002},
cmdLutElement{0x01, 0x00, -1, 0x01, 0x0008, 0x0003},
cmdLutElement{0x01, 0x00, -1, 0x02, 0x0008, 0x0004},
cmdLutElement{0x01, 0x00, -1, 0x03, 0x0008, 0x0005},
cmdLutElement{0x01, 0x00, -1, 0x03, 0x0008, 0x0006},
cmdLutElement{0x01, 0x00, -1, 0x03, 0x0008, 0x0007},
cmdLutElement{0x01, 0x00, -1, 0x03, 0x0008, 0x0008},
cmdLutElement{0x01, 0x00, -1, 0x03, 0x0008, 0x0009},
cmdLutElement{0x00, 0x01, -1, 0x03, 0x0000, 0x000a},
cmdLutElement{0x00, 0x01, -1, 0x03, 0x0000, 0x000c},
cmdLutElement{0x00, 0x02, -1, 0x03, 0x0000, 0x000e},
cmdLutElement{0x00, 0x02, -1, 0x03, 0x0000, 0x0012},
cmdLutElement{0x00, 0x03, -1, 0x03, 0x0000, 0x0016},
cmdLutElement{0x00, 0x03, -1, 0x03, 0x0000, 0x001e},
cmdLutElement{0x00, 0x04, -1, 0x03, 0x0000, 0x0026},
cmdLutElement{0x00, 0x04, -1, 0x03, 0x0000, 0x0036},
cmdLutElement{0x00, 0x01, -1, 0x03, 0x0001, 0x000a},
cmdLutElement{0x00, 0x01, -1, 0x03, 0x0001, 0x000c},
cmdLutElement{0x00, 0x02, -1, 0x03, 0x0001, 0x000e},
cmdLutElement{0x00, 0x02, -1, 0x03, 0x0001, 0x0012},
cmdLutElement{0x00, 0x03, -1, 0x03, 0x0001, 0x0016},
cmdLutElement{0x00, 0x03, -1, 0x03, 0x0001, 0x001e},
cmdLutElement{0x00, 0x04, -1, 0x03, 0x0001, 0x0026},
cmdLutElement{0x00, 0x04, -1, 0x03, 0x0001, 0x0036},
cmdLutElement{0x00, 0x01, -1, 0x03, 0x0002, 0x000a},
cmdLutElement{0x00, 0x01, -1, 0x03, 0x0002, 0x000c},
cmdLutElement{0x00, 0x02, -1, 0x03, 0x0002, 0x000e},
cmdLutElement{0x00, 0x02, -1, 0x03, 0x0002, 0x0012},
cmdLutElement{0x00, 0x03, -1, 0x03, 0x0002, 0x0016},
cmdLutElement{0x00, 0x03, -1, 0x03, 0x0002, 0x001e},
cmdLutElement{0x00, 0x04, -1, 0x03, 0x0002, 0x0026},
cmdLutElement{0x00, 0x04, -1, 0x03, 0x0002, 0x0036},
cmdLutElement{0x00, 0x01, -1, 0x03, 0x0003, 0x000a},
cmdLutElement{0x00, 0x01, -1, 0x03, 0x0003, 0x000c},
cmdLutElement{0x00, 0x02, -1, 0x03, 0x0003, 0x000e},
cmdLutElement{0x00, 0x02, -1, 0x03, 0x0003, 0x0012},
cmdLutElement{0x00, 0x03, -1, 0x03, 0x0003, 0x0016},
cmdLutElement{0x00, 0x03, -1, 0x03, 0x0003, 0x001e},
cmdLutElement{0x00, 0x04, -1, 0x03, 0x0003, 0x0026},
cmdLutElement{0x00, 0x04, -1, 0x03, 0x0003, 0x0036},
cmdLutElement{0x00, 0x01, -1, 0x03, 0x0004, 0x000a},
cmdLutElement{0x00, 0x01, -1, 0x03, 0x0004, 0x000c},
cmdLutElement{0x00, 0x02, -1, 0x03, 0x0004, 0x000e},
cmdLutElement{0x00, 0x02, -1, 0x03, 0x0004, 0x0012},
cmdLutElement{0x00, 0x03, -1, 0x03, 0x0004, 0x0016},
cmdLutElement{0x00, 0x03, -1, 0x03, 0x0004, 0x001e},
cmdLutElement{0x00, 0x04, -1, 0x03, 0x0004, 0x0026},
cmdLutElement{0x00, 0x04, -1, 0x03, 0x0004, 0x0036},
cmdLutElement{0x00, 0x01, -1, 0x03, 0x0005, 0x000a},
cmdLutElement{0x00, 0x01, -1, 0x03, 0x0005, 0x000c},
cmdLutElement{0x00, 0x02, -1, 0x03, 0x0005, 0x000e},
cmdLutElement{0x00, 0x02, -1, 0x03, 0x0005, 0x0012},
cmdLutElement{0x00, 0x03, -1, 0x03, 0x0005, 0x0016},
cmdLutElement{0x00, 0x03, -1, 0x03, 0x0005, 0x001e},
cmdLutElement{0x00, 0x04, -1, 0x03, 0x0005, 0x0026},
cmdLutElement{0x00, 0x04, -1, 0x03, 0x0005, 0x0036},
cmdLutElement{0x01, 0x01, -1, 0x03, 0x0006, 0x000a},
cmdLutElement{0x01, 0x01, -1, 0x03, 0x0006, 0x000c},
cmdLutElement{0x01, 0x02, -1, 0x03, 0x0006, 0x000e},
cmdLutElement{0x01, 0x02, -1, 0x03, 0x0006, 0x0012},
cmdLutElement{0x01, 0x03, -1, 0x03, 0x0006, 0x0016},
cmdLutElement{0x01, 0x03, -1, 0x03, 0x0006, 0x001e},
cmdLutElement{0x01, 0x04, -1, 0x03, 0x0006, 0x0026},
cmdLutElement{0x01, 0x04, -1, 0x03, 0x0006, 0x0036},
cmdLutElement{0x01, 0x01, -1, 0x03, 0x0008, 0x000a},
cmdLutElement{0x01, 0x01, -1, 0x03, 0x0008, 0x000c},
cmdLutElement{0x01, 0x02, -1, 0x03, 0x0008, 0x000e},
cmdLutElement{0x01, 0x02, -1, 0x03, 0x0008, 0x0012},
cmdLutElement{0x01, 0x03, -1, 0x03, 0x0008, 0x0016},
cmdLutElement{0x01, 0x03, -1, 0x03, 0x0008, 0x001e},
cmdLutElement{0x01, 0x04, -1, 0x03, 0x0008, 0x0026},
cmdLutElement{0x01, 0x04, -1, 0x03, 0x0008, 0x0036},
cmdLutElement{0x02, 0x00, -1, 0x00, 0x000a, 0x0002},
cmdLutElement{0x02, 0x00, -1, 0x01, 0x000a, 0x0003},
cmdLutElement{0x02, 0x00, -1, 0x02, 0x000a, 0x0004},
cmdLutElement{0x02, 0x00, -1, 0x03, 0x000a, 0x0005},
cmdLutElement{0x02, 0x00, -1, 0x03, 0x000a, 0x0006},
cmdLutElement{0x02, 0x00, -1, 0x03, 0x000a, 0x0007},
cmdLutElement{0x02, 0x00, -1, 0x03, 0x000a, 0x0008},
cmdLutElement{0x02, 0x00, -1, 0x03, 0x000a, 0x0009},
cmdLutElement{0x02, 0x00, -1, 0x00, 0x000e, 0x0002},
cmdLutElement{0x02, 0x00, -1, 0x01, 0x000e, 0x0003},
cmdLutElement{0x02, 0x00, -1, 0x02, 0x000e, 0x0004},
cmdLutElement{0x02, 0x00, -1, 0x03, 0x000e, 0x0005},
cmdLutElement{0x02, 0x00, -1, 0x03, 0x000e, 0x0006},
cmdLutElement{0x02, 0x00, -1, 0x03, 0x000e, 0x0007},
cmdLutElement{0x02, 0x00, -1, 0x03, 0x000e, 0x0008},
cmdLutElement{0x02, 0x00, -1, 0x03, 0x000e, 0x0009},
cmdLutElement{0x03, 0x00, -1, 0x00, 0x0012, 0x0002},
cmdLutElement{0x03, 0x00, -1, 0x01, 0x0012, 0x0003},
cmdLutElement{0x03, 0x00, -1, 0x02, 0x0012, 0x0004},
cmdLutElement{0x03, 0x00, -1, 0x03, 0x0012, 0x0005},
cmdLutElement{0x03, 0x00, -1, 0x03, 0x0012, 0x0006},
cmdLutElement{0x03, 0x00, -1, 0x03, 0x0012, 0x0007},
cmdLutElement{0x03, 0x00, -1, 0x03, 0x0012, 0x0008},
cmdLutElement{0x03, 0x00, -1, 0x03, 0x0012, 0x0009},
cmdLutElement{0x03, 0x00, -1, 0x00, 0x001a, 0x0002},
cmdLutElement{0x03, 0x00, -1, 0x01, 0x001a, 0x0003},
cmdLutElement{0x03, 0x00, -1, 0x02, 0x001a, 0x0004},
cmdLutElement{0x03, 0x00, -1, 0x03, 0x001a, 0x0005},
cmdLutElement{0x03, 0x00, -1, 0x03, 0x001a, 0x0006},
cmdLutElement{0x03, 0x00, -1, 0x03, 0x001a, 0x0007},
cmdLutElement{0x03, 0x00, -1, 0x03, 0x001a, 0x0008},
cmdLutElement{0x03, 0x00, -1, 0x03, 0x001a, 0x0009},
cmdLutElement{0x04, 0x00, -1, 0x00, 0x0022, 0x0002},
cmdLutElement{0x04, 0x00, -1, 0x01, 0x0022, 0x0003},
cmdLutElement{0x04, 0x00, -1, 0x02, 0x0022, 0x0004},
cmdLutElement{0x04, 0x00, -1, 0x03, 0x0022, 0x0005},
cmdLutElement{0x04, 0x00, -1, 0x03, 0x0022, 0x0006},
cmdLutElement{0x04, 0x00, -1, 0x03, 0x0022, 0x0007},
cmdLutElement{0x04, 0x00, -1, 0x03, 0x0022, 0x0008},
cmdLutElement{0x04, 0x00, -1, 0x03, 0x0022, 0x0009},
cmdLutElement{0x04, 0x00, -1, 0x00, 0x0032, 0x0002},
cmdLutElement{0x04, 0x00, -1, 0x01, 0x0032, 0x0003},
cmdLutElement{0x04, 0x00, -1, 0x02, 0x0032, 0x0004},
cmdLutElement{0x04, 0x00, -1, 0x03, 0x0032, 0x0005},
cmdLutElement{0x04, 0x00, -1, 0x03, 0x0032, 0x0006},
cmdLutElement{0x04, 0x00, -1, 0x03, 0x0032, 0x0007},
cmdLutElement{0x04, 0x00, -1, 0x03, 0x0032, 0x0008},
cmdLutElement{0x04, 0x00, -1, 0x03, 0x0032, 0x0009},
cmdLutElement{0x05, 0x00, -1, 0x00, 0x0042, 0x0002},
cmdLutElement{0x05, 0x00, -1, 0x01, 0x0042, 0x0003},
cmdLutElement{0x05, 0x00, -1, 0x02, 0x0042, 0x0004},
cmdLutElement{0x05, 0x00, -1, 0x03, 0x0042, 0x0005},
cmdLutElement{0x05, 0x00, -1, 0x03, 0x0042, 0x0006},
cmdLutElement{0x05, 0x00, -1, 0x03, 0x0042, 0x0007},
cmdLutElement{0x05, 0x00, -1, 0x03, 0x0042, 0x0008},
cmdLutElement{0x05, 0x00, -1, 0x03, 0x0042, 0x0009},
cmdLutElement{0x05, 0x00, -1, 0x00, 0x0062, 0x0002},
cmdLutElement{0x05, 0x00, -1, 0x01, 0x0062, 0x0003},
cmdLutElement{0x05, 0x00, -1, 0x02, 0x0062, 0x0004},
cmdLutElement{0x05, 0x00, -1, 0x03, 0x0062, 0x0005},
cmdLutElement{0x05, 0x00, -1, 0x03, 0x0062, 0x0006},
cmdLutElement{0x05, 0x00, -1, 0x03, 0x0062, 0x0007},
cmdLutElement{0x05, 0x00, -1, 0x03, 0x0062, 0x0008},
cmdLutElement{0x05, 0x00, -1, 0x03, 0x0062, 0x0009},
cmdLutElement{0x02, 0x01, -1, 0x03, 0x000a, 0x000a},
cmdLutElement{0x02, 0x01, -1, 0x03, 0x000a, 0x000c},
cmdLutElement{0x02, 0x02, -1, 0x03, 0x000a, 0x000e},
cmdLutElement{0x02, 0x02, -1, 0x03, 0x000a, 0x0012},
cmdLutElement{0x02, 0x03, -1, 0x03, 0x000a, 0x0016},
cmdLutElement{0x02, 0x03, -1, 0x03, 0x000a, 0x001e},
cmdLutElement{0x02, 0x04, -1, 0x03, 0x000a, 0x0026},
cmdLutElement{0x02, 0x04, -1, 0x03, 0x000a, 0x0036},
cmdLutElement{0x02, 0x01, -1, 0x03, 0x000e, 0x000a},
cmdLutElement{0x02, 0x01, -1, 0x03, 0x000e, 0x000c},
cmdLutElement{0x02, 0x02, -1, 0x03, 0x000e, 0x000e},
cmdLutElement{0x02, 0x02, -1, 0x03, 0x000e, 0x0012},
cmdLutElement{0x02, 0x03, -1, 0x03, 0x000e, 0x0016},
cmdLutElement{0x02, 0x03, -1, 0x03, 0x000e, 0x001e},
cmdLutElement{0x02, 0x04, -1, 0x03, 0x000e, 0x0026},
cmdLutElement{0x02, 0x04, -1, 0x03, 0x000e, 0x0036},
cmdLutElement{0x03, 0x01, -1, 0x03, 0x0012, 0x000a},
cmdLutElement{0x03, 0x01, -1, 0x03, 0x0012, 0x000c},
cmdLutElement{0x03, 0x02, -1, 0x03, 0x0012, 0x000e},
cmdLutElement{0x03, 0x02, -1, 0x03, 0x0012, 0x0012},
cmdLutElement{0x03, 0x03, -1, 0x03, 0x0012, 0x0016},
cmdLutElement{0x03, 0x03, -1, 0x03, 0x0012, 0x001e},
cmdLutElement{0x03, 0x04, -1, 0x03, 0x0012, 0x0026},
cmdLutElement{0x03, 0x04, -1, 0x03, 0x0012, 0x0036},
cmdLutElement{0x03, 0x01, -1, 0x03, 0x001a, 0x000a},
cmdLutElement{0x03, 0x01, -1, 0x03, 0x001a, 0x000c},
cmdLutElement{0x03, 0x02, -1, 0x03, 0x001a, 0x000e},
cmdLutElement{0x03, 0x02, -1, 0x03, 0x001a, 0x0012},
cmdLutElement{0x03, 0x03, -1, 0x03, 0x001a, 0x0016},
cmdLutElement{0x03, 0x03, -1, 0x03, 0x001a, 0x001e},
cmdLutElement{0x03, 0x04, -1, 0x03, 0x001a, 0x0026},
cmdLutElement{0x03, 0x04, -1, 0x03, 0x001a, 0x0036},
cmdLutElement{0x04, 0x01, -1, 0x03, 0x0022, 0x000a},
cmdLutElement{0x04, 0x01, -1, 0x03, 0x0022, 0x000c},
cmdLutElement{0x04, 0x02, -1, 0x03, 0x0022, 0x000e},
cmdLutElement{0x04, 0x02, -1, 0x03, 0x0022, 0x0012},
cmdLutElement{0x04, 0x03, -1, 0x03, 0x0022, 0x0016},
cmdLutElement{0x04, 0x03, -1, 0x03, 0x0022, 0x001e},
cmdLutElement{0x04, 0x04, -1, 0x03, 0x0022, 0x0026},
cmdLutElement{0x04, 0x04, -1, 0x03, 0x0022, 0x0036},
cmdLutElement{0x04, 0x01, -1, 0x03, 0x0032, 0x000a},
cmdLutElement{0x04, 0x01, -1, 0x03, 0x0032, 0x000c},
cmdLutElement{0x04, 0x02, -1, 0x03, 0x0032, 0x000e},
cmdLutElement{0x04, 0x02, -1, 0x03, 0x0032, 0x0012},
cmdLutElement{0x04, 0x03, -1, 0x03, 0x0032, 0x0016},
cmdLutElement{0x04, 0x03, -1, 0x03, 0x0032, 0x001e},
cmdLutElement{0x04, 0x04, -1, 0x03, 0x0032, 0x0026},
cmdLutElement{0x04, 0x04, -1, 0x03, 0x0032, 0x0036},
cmdLutElement{0x05, 0x01, -1, 0x03, 0x0042, 0x000a},
cmdLutElement{0x05, 0x01, -1, 0x03, 0x0042, 0x000c},
cmdLutElement{0x05, 0x02, -1, 0x03, 0x0042, 0x000e},
cmdLutElement{0x05, 0x02, -1, 0x03, 0x0042, 0x0012},
cmdLutElement{0x05, 0x03, -1, 0x03, 0x0042, 0x0016},
cmdLutElement{0x05, 0x03, -1, 0x03, 0x0042, 0x001e},
cmdLutElement{0x05, 0x04, -1, 0x03, 0x0042, 0x0026},
cmdLutElement{0x05, 0x04, -1, 0x03, 0x0042, 0x0036},
cmdLutElement{0x05, 0x01, -1, 0x03, 0x0062, 0x000a},
cmdLutElement{0x05, 0x01, -1, 0x03, 0x0062, 0x000c},
cmdLutElement{0x05, 0x02, -1, 0x03, 0x0062, 0x000e},
cmdLutElement{0x05, 0x02, -1, 0x03, 0x0062, 0x0012},
cmdLutElement{0x05, 0x03, -1, 0x03, 0x0062, 0x0016},
cmdLutElement{0x05, 0x03, -1, 0x03, 0x0062, 0x001e},
cmdLutElement{0x05, 0x04, -1, 0x03, 0x0062, 0x0026},
cmdLutElement{0x05, 0x04, -1, 0x03, 0x0062, 0x0036},
cmdLutElement{0x00, 0x05, -1, 0x03, 0x0000, 0x0046},
cmdLutElement{0x00, 0x05, -1, 0x03, 0x0000, 0x0066},
cmdLutElement{0x00, 0x06, -1, 0x03, 0x0000, 0x0086},
cmdLutElement{0x00, 0x07, -1, 0x03, 0x0000, 0x00c6},
cmdLutElement{0x00, 0x08, -1, 0x03, 0x0000, 0x0146},
cmdLutElement{0x00, 0x09, -1, 0x03, 0x0000, 0x0246},
cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0000, 0x0446},
cmdLutElement{0x00, 0x18, -1, 0x03, 0x0000, 0x0846},
cmdLutElement{0x00, 0x05, -1, 0x03, 0x0001, 0x0046},
cmdLutElement{0x00, 0x05, -1, 0x03, 0x0001, 0x0066},
cmdLutElement{0x00, 0x06, -1, 0x03, 0x0001, 0x0086},
cmdLutElement{0x00, 0x07, -1, 0x03, 0x0001, 0x00c6},
cmdLutElement{0x00, 0x08, -1, 0x03, 0x0001, 0x0146},
cmdLutElement{0x00, 0x09, -1, 0x03, 0x0001, 0x0246},
cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0001, 0x0446},
cmdLutElement{0x00, 0x18, -1, 0x03, 0x0001, 0x0846},
cmdLutElement{0x00, 0x05, -1, 0x03, 0x0002, 0x0046},
cmdLutElement{0x00, 0x05, -1, 0x03, 0x0002, 0x0066},
cmdLutElement{0x00, 0x06, -1, 0x03, 0x0002, 0x0086},
cmdLutElement{0x00, 0x07, -1, 0x03, 0x0002, 0x00c6},
cmdLutElement{0x00, 0x08, -1, 0x03, 0x0002, 0x0146},
cmdLutElement{0x00, 0x09, -1, 0x03, 0x0002, 0x0246},
cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0002, 0x0446},
cmdLutElement{0x00, 0x18, -1, 0x03, 0x0002, 0x0846},
cmdLutElement{0x00, 0x05, -1, 0x03, 0x0003, 0x0046},
cmdLutElement{0x00, 0x05, -1, 0x03, 0x0003, 0x0066},
cmdLutElement{0x00, 0x06, -1, 0x03, 0x0003, 0x0086},
cmdLutElement{0x00, 0x07, -1, 0x03, 0x0003, 0x00c6},
cmdLutElement{0x00, 0x08, -1, 0x03, 0x0003, 0x0146},
cmdLutElement{0x00, 0x09, -1, 0x03, 0x0003, 0x0246},
cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0003, 0x0446},
cmdLutElement{0x00, 0x18, -1, 0x03, 0x0003, 0x0846},
cmdLutElement{0x00, 0x05, -1, 0x03, 0x0004, 0x0046},
cmdLutElement{0x00, 0x05, -1, 0x03, 0x0004, 0x0066},
cmdLutElement{0x00, 0x06, -1, 0x03, 0x0004, 0x0086},
cmdLutElement{0x00, 0x07, -1, 0x03, 0x0004, 0x00c6},
cmdLutElement{0x00, 0x08, -1, 0x03, 0x0004, 0x0146},
cmdLutElement{0x00, 0x09, -1, 0x03, 0x0004, 0x0246},
cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0004, 0x0446},
cmdLutElement{0x00, 0x18, -1, 0x03, 0x0004, 0x0846},
cmdLutElement{0x00, 0x05, -1, 0x03, 0x0005, 0x0046},
cmdLutElement{0x00, 0x05, -1, 0x03, 0x0005, 0x0066},
cmdLutElement{0x00, 0x06, -1, 0x03, 0x0005, 0x0086},
cmdLutElement{0x00, 0x07, -1, 0x03, 0x0005, 0x00c6},
cmdLutElement{0x00, 0x08, -1, 0x03, 0x0005, 0x0146},
cmdLutElement{0x00, 0x09, -1, 0x03, 0x0005, 0x0246},
cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0005, 0x0446},
cmdLutElement{0x00, 0x18, -1, 0x03, 0x0005, 0x0846},
cmdLutElement{0x01, 0x05, -1, 0x03, 0x0006, 0x0046},
cmdLutElement{0x01, 0x05, -1, 0x03, 0x0006, 0x0066},
cmdLutElement{0x01, 0x06, -1, 0x03, 0x0006, 0x0086},
cmdLutElement{0x01, 0x07, -1, 0x03, 0x0006, 0x00c6},
cmdLutElement{0x01, 0x08, -1, 0x03, 0x0006, 0x0146},
cmdLutElement{0x01, 0x09, -1, 0x03, 0x0006, 0x0246},
cmdLutElement{0x01, 0x0a, -1, 0x03, 0x0006, 0x0446},
cmdLutElement{0x01, 0x18, -1, 0x03, 0x0006, 0x0846},
cmdLutElement{0x01, 0x05, -1, 0x03, 0x0008, 0x0046},
cmdLutElement{0x01, 0x05, -1, 0x03, 0x0008, 0x0066},
cmdLutElement{0x01, 0x06, -1, 0x03, 0x0008, 0x0086},
cmdLutElement{0x01, 0x07, -1, 0x03, 0x0008, 0x00c6},
cmdLutElement{0x01, 0x08, -1, 0x03, 0x0008, 0x0146},
cmdLutElement{0x01, 0x09, -1, 0x03, 0x0008, 0x0246},
cmdLutElement{0x01, 0x0a, -1, 0x03, 0x0008, 0x0446},
cmdLutElement{0x01, 0x18, -1, 0x03, 0x0008, 0x0846},
cmdLutElement{0x06, 0x00, -1, 0x00, 0x0082, 0x0002},
cmdLutElement{0x06, 0x00, -1, 0x01, 0x0082, 0x0003},
cmdLutElement{0x06, 0x00, -1, 0x02, 0x0082, 0x0004},
cmdLutElement{0x06, 0x00, -1, 0x03, 0x0082, 0x0005},
cmdLutElement{0x06, 0x00, -1, 0x03, 0x0082, 0x0006},
cmdLutElement{0x06, 0x00, -1, 0x03, 0x0082, 0x0007},
cmdLutElement{0x06, 0x00, -1, 0x03, 0x0082, 0x0008},
cmdLutElement{0x06, 0x00, -1, 0x03, 0x0082, 0x0009},
cmdLutElement{0x07, 0x00, -1, 0x00, 0x00c2, 0x0002},
cmdLutElement{0x07, 0x00, -1, 0x01, 0x00c2, 0x0003},
cmdLutElement{0x07, 0x00, -1, 0x02, 0x00c2, 0x0004},
cmdLutElement{0x07, 0x00, -1, 0x03, 0x00c2, 0x0005},
cmdLutElement{0x07, 0x00, -1, 0x03, 0x00c2, 0x0006},
cmdLutElement{0x07, 0x00, -1, 0x03, 0x00c2, 0x0007},
cmdLutElement{0x07, 0x00, -1, 0x03, 0x00c2, 0x0008},
cmdLutElement{0x07, 0x00, -1, 0x03, 0x00c2, 0x0009},
cmdLutElement{0x08, 0x00, -1, 0x00, 0x0142, 0x0002},
cmdLutElement{0x08, 0x00, -1, 0x01, 0x0142, 0x0003},
cmdLutElement{0x08, 0x00, -1, 0x02, 0x0142, 0x0004},
cmdLutElement{0x08, 0x00, -1, 0x03, 0x0142, 0x0005},
cmdLutElement{0x08, 0x00, -1, 0x03, 0x0142, 0x0006},
cmdLutElement{0x08, 0x00, -1, 0x03, 0x0142, 0x0007},
cmdLutElement{0x08, 0x00, -1, 0x03, 0x0142, 0x0008},
cmdLutElement{0x08, 0x00, -1, 0x03, 0x0142, 0x0009},
cmdLutElement{0x09, 0x00, -1, 0x00, 0x0242, 0x0002},
cmdLutElement{0x09, 0x00, -1, 0x01, 0x0242, 0x0003},
cmdLutElement{0x09, 0x00, -1, 0x02, 0x0242, 0x0004},
cmdLutElement{0x09, 0x00, -1, 0x03, 0x0242, 0x0005},
cmdLutElement{0x09, 0x00, -1, 0x03, 0x0242, 0x0006},
cmdLutElement{0x09, 0x00, -1, 0x03, 0x0242, 0x0007},
cmdLutElement{0x09, 0x00, -1, 0x03, 0x0242, 0x0008},
cmdLutElement{0x09, 0x00, -1, 0x03, 0x0242, 0x0009},
cmdLutElement{0x0a, 0x00, -1, 0x00, 0x0442, 0x0002},
cmdLutElement{0x0a, 0x00, -1, 0x01, 0x0442, 0x0003},
cmdLutElement{0x0a, 0x00, -1, 0x02, 0x0442, 0x0004},
cmdLutElement{0x0a, 0x00, -1, 0x03, 0x0442, 0x0005},
cmdLutElement{0x0a, 0x00, -1, 0x03, 0x0442, 0x0006},
cmdLutElement{0x0a, 0x00, -1, 0x03, 0x0442, 0x0007},
cmdLutElement{0x0a, 0x00, -1, 0x03, 0x0442, 0x0008},
cmdLutElement{0x0a, 0x00, -1, 0x03, 0x0442, 0x0009},
cmdLutElement{0x0c, 0x00, -1, 0x00, 0x0842, 0x0002},
cmdLutElement{0x0c, 0x00, -1, 0x01, 0x0842, 0x0003},
cmdLutElement{0x0c, 0x00, -1, 0x02, 0x0842, 0x0004},
cmdLutElement{0x0c, 0x00, -1, 0x03, 0x0842, 0x0005},
cmdLutElement{0x0c, 0x00, -1, 0x03, 0x0842, 0x0006},
cmdLutElement{0x0c, 0x00, -1, 0x03, 0x0842, 0x0007},
cmdLutElement{0x0c, 0x00, -1, 0x03, 0x0842, 0x0008},
cmdLutElement{0x0c, 0x00, -1, 0x03, 0x0842, 0x0009},
cmdLutElement{0x0e, 0x00, -1, 0x00, 0x1842, 0x0002},
cmdLutElement{0x0e, 0x00, -1, 0x01, 0x1842, 0x0003},
cmdLutElement{0x0e, 0x00, -1, 0x02, 0x1842, 0x0004},
cmdLutElement{0x0e, 0x00, -1, 0x03, 0x1842, 0x0005},
cmdLutElement{0x0e, 0x00, -1, 0x03, 0x1842, 0x0006},
cmdLutElement{0x0e, 0x00, -1, 0x03, 0x1842, 0x0007},
cmdLutElement{0x0e, 0x00, -1, 0x03, 0x1842, 0x0008},
cmdLutElement{0x0e, 0x00, -1, 0x03, 0x1842, 0x0009},
cmdLutElement{0x18, 0x00, -1, 0x00, 0x5842, 0x0002},
cmdLutElement{0x18, 0x00, -1, 0x01, 0x5842, 0x0003},
cmdLutElement{0x18, 0x00, -1, 0x02, 0x5842, 0x0004},
cmdLutElement{0x18, 0x00, -1, 0x03, 0x5842, 0x0005},
cmdLutElement{0x18, 0x00, -1, 0x03, 0x5842, 0x0006},
cmdLutElement{0x18, 0x00, -1, 0x03, 0x5842, 0x0007},
cmdLutElement{0x18, 0x00, -1, 0x03, 0x5842, 0x0008},
cmdLutElement{0x18, 0x00, -1, 0x03, 0x5842, 0x0009},
cmdLutElement{0x02, 0x05, -1, 0x03, 0x000a, 0x0046},
cmdLutElement{0x02, 0x05, -1, 0x03, 0x000a, 0x0066},
cmdLutElement{0x02, 0x06, -1, 0x03, 0x000a, 0x0086},
cmdLutElement{0x02, 0x07, -1, 0x03, 0x000a, 0x00c6},
cmdLutElement{0x02, 0x08, -1, 0x03, 0x000a, 0x0146},
cmdLutElement{0x02, 0x09, -1, 0x03, 0x000a, 0x0246},
cmdLutElement{0x02, 0x0a, -1, 0x03, 0x000a, 0x0446},
cmdLutElement{0x02, 0x18, -1, 0x03, 0x000a, 0x0846},
cmdLutElement{0x02, 0x05, -1, 0x03, 0x000e, 0x0046},
cmdLutElement{0x02, 0x05, -1, 0x03, 0x000e, 0x0066},
cmdLutElement{0x02, 0x06, -1, 0x03, 0x000e, 0x0086},
cmdLutElement{0x02, 0x07, -1, 0x03, 0x000e, 0x00c6},
cmdLutElement{0x02, 0x08, -1, 0x03, 0x000e, 0x0146},
cmdLutElement{0x02, 0x09, -1, 0x03, 0x000e, 0x0246},
cmdLutElement{0x02, 0x0a, -1, 0x03, 0x000e, 0x0446},
cmdLutElement{0x02, 0x18, -1, 0x03, 0x000e, 0x0846},
cmdLutElement{0x03, 0x05, -1, 0x03, 0x0012, 0x0046},
cmdLutElement{0x03, 0x05, -1, 0x03, 0x0012, 0x0066},
cmdLutElement{0x03, 0x06, -1, 0x03, 0x0012, 0x0086},
cmdLutElement{0x03, 0x07, -1, 0x03, 0x0012, 0x00c6},
cmdLutElement{0x03, 0x08, -1, 0x03, 0x0012, 0x0146},
cmdLutElement{0x03, 0x09, -1, 0x03, 0x0012, 0x0246},
cmdLutElement{0x03, 0x0a, -1, 0x03, 0x0012, 0x0446},
cmdLutElement{0x03, 0x18, -1, 0x03, 0x0012, 0x0846},
cmdLutElement{0x03, 0x05, -1, 0x03, 0x001a, 0x0046},
cmdLutElement{0x03, 0x05, -1, 0x03, 0x001a, 0x0066},
cmdLutElement{0x03, 0x06, -1, 0x03, 0x001a, 0x0086},
cmdLutElement{0x03, 0x07, -1, 0x03, 0x001a, 0x00c6},
cmdLutElement{0x03, 0x08, -1, 0x03, 0x001a, 0x0146},
cmdLutElement{0x03, 0x09, -1, 0x03, 0x001a, 0x0246},
cmdLutElement{0x03, 0x0a, -1, 0x03, 0x001a, 0x0446},
cmdLutElement{0x03, 0x18, -1, 0x03, 0x001a, 0x0846},
cmdLutElement{0x04, 0x05, -1, 0x03, 0x0022, 0x0046},
cmdLutElement{0x04, 0x05, -1, 0x03, 0x0022, 0x0066},
cmdLutElement{0x04, 0x06, -1, 0x03, 0x0022, 0x0086},
cmdLutElement{0x04, 0x07, -1, 0x03, 0x0022, 0x00c6},
cmdLutElement{0x04, 0x08, -1, 0x03, 0x0022, 0x0146},
cmdLutElement{0x04, 0x09, -1, 0x03, 0x0022, 0x0246},
cmdLutElement{0x04, 0x0a, -1, 0x03, 0x0022, 0x0446},
cmdLutElement{0x04, 0x18, -1, 0x03, 0x0022, 0x0846},
cmdLutElement{0x04, 0x05, -1, 0x03, 0x0032, 0x0046},
cmdLutElement{0x04, 0x05, -1, 0x03, 0x0032, 0x0066},
cmdLutElement{0x04, 0x06, -1, 0x03, 0x0032, 0x0086},
cmdLutElement{0x04, 0x07, -1, 0x03, 0x0032, 0x00c6},
cmdLutElement{0x04, 0x08, -1, 0x03, 0x0032, 0x0146},
cmdLutElement{0x04, 0x09, -1, 0x03, 0x0032, 0x0246},
cmdLutElement{0x04, 0x0a, -1, 0x03, 0x0032, 0x0446},
cmdLutElement{0x04, 0x18, -1, 0x03, 0x0032, 0x0846},
cmdLutElement{0x05, 0x05, -1, 0x03, 0x0042, 0x0046},
cmdLutElement{0x05, 0x05, -1, 0x03, 0x0042, 0x0066},
cmdLutElement{0x05, 0x06, -1, 0x03, 0x0042, 0x0086},
cmdLutElement{0x05, 0x07, -1, 0x03, 0x0042, 0x00c6},
cmdLutElement{0x05, 0x08, -1, 0x03, 0x0042, 0x0146},
cmdLutElement{0x05, 0x09, -1, 0x03, 0x0042, 0x0246},
cmdLutElement{0x05, 0x0a, -1, 0x03, 0x0042, 0x0446},
cmdLutElement{0x05, 0x18, -1, 0x03, 0x0042, 0x0846},
cmdLutElement{0x05, 0x05, -1, 0x03, 0x0062, 0x0046},
cmdLutElement{0x05, 0x05, -1, 0x03, 0x0062, 0x0066},
cmdLutElement{0x05, 0x06, -1, 0x03, 0x0062, 0x0086},
cmdLutElement{0x05, 0x07, -1, 0x03, 0x0062, 0x00c6},
cmdLutElement{0x05, 0x08, -1, 0x03, 0x0062, 0x0146},
cmdLutElement{0x05, 0x09, -1, 0x03, 0x0062, 0x0246},
cmdLutElement{0x05, 0x0a, -1, 0x03, 0x0062, 0x0446},
cmdLutElement{0x05, 0x18, -1, 0x03, 0x0062, 0x0846},
cmdLutElement{0x06, 0x01, -1, 0x03, 0x0082, 0x000a},
cmdLutElement{0x06, 0x01, -1, 0x03, 0x0082, 0x000c},
cmdLutElement{0x06, 0x02, -1, 0x03, 0x0082, 0x000e},
cmdLutElement{0x06, 0x02, -1, 0x03, 0x0082, 0x0012},
cmdLutElement{0x06, 0x03, -1, 0x03, 0x0082, 0x0016},
cmdLutElement{0x06, 0x03, -1, 0x03, 0x0082, 0x001e},
cmdLutElement{0x06, 0x04, -1, 0x03, 0x0082, 0x0026},
cmdLutElement{0x06, 0x04, -1, 0x03, 0x0082, 0x0036},
cmdLutElement{0x07, 0x01, -1, 0x03, 0x00c2, 0x000a},
cmdLutElement{0x07, 0x01, -1, 0x03, 0x00c2, 0x000c},
cmdLutElement{0x07, 0x02, -1, 0x03, 0x00c2, 0x000e},
cmdLutElement{0x07, 0x02, -1, 0x03, 0x00c2, 0x0012},
cmdLutElement{0x07, 0x03, -1, 0x03, 0x00c2, 0x0016},
cmdLutElement{0x07, 0x03, -1, 0x03, 0x00c2, 0x001e},
cmdLutElement{0x07, 0x04, -1, 0x03, 0x00c2, 0x0026},
cmdLutElement{0x07, 0x04, -1, 0x03, 0x00c2, 0x0036},
cmdLutElement{0x08, 0x01, -1, 0x03, 0x0142, 0x000a},
cmdLutElement{0x08, 0x01, -1, 0x03, 0x0142, 0x000c},
cmdLutElement{0x08, 0x02, -1, 0x03, 0x0142, 0x000e},
cmdLutElement{0x08, 0x02, -1, 0x03, 0x0142, 0x0012},
cmdLutElement{0x08, 0x03, -1, 0x03, 0x0142, 0x0016},
cmdLutElement{0x08, 0x03, -1, 0x03, 0x0142, 0x001e},
cmdLutElement{0x08, 0x04, -1, 0x03, 0x0142, 0x0026},
cmdLutElement{0x08, 0x04, -1, 0x03, 0x0142, 0x0036},
cmdLutElement{0x09, 0x01, -1, 0x03, 0x0242, 0x000a},
cmdLutElement{0x09, 0x01, -1, 0x03, 0x0242, 0x000c},
cmdLutElement{0x09, 0x02, -1, 0x03, 0x0242, 0x000e},
cmdLutElement{0x09, 0x02, -1, 0x03, 0x0242, 0x0012},
cmdLutElement{0x09, 0x03, -1, 0x03, 0x0242, 0x0016},
cmdLutElement{0x09, 0x03, -1, 0x03, 0x0242, 0x001e},
cmdLutElement{0x09, 0x04, -1, 0x03, 0x0242, 0x0026},
cmdLutElement{0x09, 0x04, -1, 0x03, 0x0242, 0x0036},
cmdLutElement{0x0a, 0x01, -1, 0x03, 0x0442, 0x000a},
cmdLutElement{0x0a, 0x01, -1, 0x03, 0x0442, 0x000c},
cmdLutElement{0x0a, 0x02, -1, 0x03, 0x0442, 0x000e},
cmdLutElement{0x0a, 0x02, -1, 0x03, 0x0442, 0x0012},
cmdLutElement{0x0a, 0x03, -1, 0x03, 0x0442, 0x0016},
cmdLutElement{0x0a, 0x03, -1, 0x03, 0x0442, 0x001e},
cmdLutElement{0x0a, 0x04, -1, 0x03, 0x0442, 0x0026},
cmdLutElement{0x0a, 0x04, -1, 0x03, 0x0442, 0x0036},
cmdLutElement{0x0c, 0x01, -1, 0x03, 0x0842, 0x000a},
cmdLutElement{0x0c, 0x01, -1, 0x03, 0x0842, 0x000c},
cmdLutElement{0x0c, 0x02, -1, 0x03, 0x0842, 0x000e},
cmdLutElement{0x0c, 0x02, -1, 0x03, 0x0842, 0x0012},
cmdLutElement{0x0c, 0x03, -1, 0x03, 0x0842, 0x0016},
cmdLutElement{0x0c, 0x03, -1, 0x03, 0x0842, 0x001e},
cmdLutElement{0x0c, 0x04, -1, 0x03, 0x0842, 0x0026},
cmdLutElement{0x0c, 0x04, -1, 0x03, 0x0842, 0x0036},
cmdLutElement{0x0e, 0x01, -1, 0x03, 0x1842, 0x000a},
cmdLutElement{0x0e, 0x01, -1, 0x03, 0x1842, 0x000c},
cmdLutElement{0x0e, 0x02, -1, 0x03, 0x1842, 0x000e},
cmdLutElement{0x0e, 0x02, -1, 0x03, 0x1842, 0x0012},
cmdLutElement{0x0e, 0x03, -1, 0x03, 0x1842, 0x0016},
cmdLutElement{0x0e, 0x03, -1, 0x03, 0x1842, 0x001e},
cmdLutElement{0x0e, 0x04, -1, 0x03, 0x1842, 0x0026},
cmdLutElement{0x0e, 0x04, -1, 0x03, 0x1842, 0x0036},
cmdLutElement{0x18, 0x01, -1, 0x03, 0x5842, 0x000a},
cmdLutElement{0x18, 0x01, -1, 0x03, 0x5842, 0x000c},
cmdLutElement{0x18, 0x02, -1, 0x03, 0x5842, 0x000e},
cmdLutElement{0x18, 0x02, -1, 0x03, 0x5842, 0x0012},
cmdLutElement{0x18, 0x03, -1, 0x03, 0x5842, 0x0016},
cmdLutElement{0x18, 0x03, -1, 0x03, 0x5842, 0x001e},
cmdLutElement{0x18, 0x04, -1, 0x03, 0x5842, 0x0026},
cmdLutElement{0x18, 0x04, -1, 0x03, 0x5842, 0x0036},
cmdLutElement{0x06, 0x05, -1, 0x03, 0x0082, 0x0046},
cmdLutElement{0x06, 0x05, -1, 0x03, 0x0082, 0x0066},
cmdLutElement{0x06, 0x06, -1, 0x03, 0x0082, 0x0086},
cmdLutElement{0x06, 0x07, -1, 0x03, 0x0082, 0x00c6},
cmdLutElement{0x06, 0x08, -1, 0x03, 0x0082, 0x0146},
cmdLutElement{0x06, 0x09, -1, 0x03, 0x0082, 0x0246},
cmdLutElement{0x06, 0x0a, -1, 0x03, 0x0082, 0x0446},
cmdLutElement{0x06, 0x18, -1, 0x03, 0x0082, 0x0846},
cmdLutElement{0x07, 0x05, -1, 0x03, 0x00c2, 0x0046},
cmdLutElement{0x07, 0x05, -1, 0x03, 0x00c2, 0x0066},
cmdLutElement{0x07, 0x06, -1, 0x03, 0x00c2, 0x0086},
cmdLutElement{0x07, 0x07, -1, 0x03, 0x00c2, 0x00c6},
cmdLutElement{0x07, 0x08, -1, 0x03, 0x00c2, 0x0146},
cmdLutElement{0x07, 0x09, -1, 0x03, 0x00c2, 0x0246},
cmdLutElement{0x07, 0x0a, -1, 0x03, 0x00c2, 0x0446},
cmdLutElement{0x07, 0x18, -1, 0x03, 0x00c2, 0x0846},
cmdLutElement{0x08, 0x05, -1, 0x03, 0x0142, 0x0046},
cmdLutElement{0x08, 0x05, -1, 0x03, 0x0142, 0x0066},
cmdLutElement{0x08, 0x06, -1, 0x03, 0x0142, 0x0086},
cmdLutElement{0x08, 0x07, -1, 0x03, 0x0142, 0x00c6},
cmdLutElement{0x08, 0x08, -1, 0x03, 0x0142, 0x0146},
cmdLutElement{0x08, 0x09, -1, 0x03, 0x0142, 0x0246},
cmdLutElement{0x08, 0x0a, -1, 0x03, 0x0142, 0x0446},
cmdLutElement{0x08, 0x18, -1, 0x03, 0x0142, 0x0846},
cmdLutElement{0x09, 0x05, -1, 0x03, 0x0242, 0x0046},
cmdLutElement{0x09, 0x05, -1, 0x03, 0x0242, 0x0066},
cmdLutElement{0x09, 0x06, -1, 0x03, 0x0242, 0x0086},
cmdLutElement{0x09, 0x07, -1, 0x03, 0x0242, 0x00c6},
cmdLutElement{0x09, 0x08, -1, 0x03, 0x0242, 0x0146},
cmdLutElement{0x09, 0x09, -1, 0x03, 0x0242, 0x0246},
cmdLutElement{0x09, 0x0a, -1, 0x03, 0x0242, 0x0446},
cmdLutElement{0x09, 0x18, -1, 0x03, 0x0242, 0x0846},
cmdLutElement{0x0a, 0x05, -1, 0x03, 0x0442, 0x0046},
cmdLutElement{0x0a, 0x05, -1, 0x03, 0x0442, 0x0066},
cmdLutElement{0x0a, 0x06, -1, 0x03, 0x0442, 0x0086},
cmdLutElement{0x0a, 0x07, -1, 0x03, 0x0442, 0x00c6},
cmdLutElement{0x0a, 0x08, -1, 0x03, 0x0442, 0x0146},
cmdLutElement{0x0a, 0x09, -1, 0x03, 0x0442, 0x0246},
cmdLutElement{0x0a, 0x0a, -1, 0x03, 0x0442, 0x0446},
cmdLutElement{0x0a, 0x18, -1, 0x03, 0x0442, 0x0846},
cmdLutElement{0x0c, 0x05, -1, 0x03, 0x0842, 0x0046},
cmdLutElement{0x0c, 0x05, -1, 0x03, 0x0842, 0x0066},
cmdLutElement{0x0c, 0x06, -1, 0x03, 0x0842, 0x0086},
cmdLutElement{0x0c, 0x07, -1, 0x03, 0x0842, 0x00c6},
cmdLutElement{0x0c, 0x08, -1, 0x03, 0x0842, 0x0146},
cmdLutElement{0x0c, 0x09, -1, 0x03, 0x0842, 0x0246},
cmdLutElement{0x0c, 0x0a, -1, 0x03, 0x0842, 0x0446},
cmdLutElement{0x0c, 0x18, -1, 0x03, 0x0842, 0x0846},
cmdLutElement{0x0e, 0x05, -1, 0x03, 0x1842, 0x0046},
cmdLutElement{0x0e, 0x05, -1, 0x03, 0x1842, 0x0066},
cmdLutElement{0x0e, 0x06, -1, 0x03, 0x1842, 0x0086},
cmdLutElement{0x0e, 0x07, -1, 0x03, 0x1842, 0x00c6},
cmdLutElement{0x0e, 0x08, -1, 0x03, 0x1842, 0x0146},
cmdLutElement{0x0e, 0x09, -1, 0x03, 0x1842, 0x0246},
cmdLutElement{0x0e, 0x0a, -1, 0x03, 0x1842, 0x0446},
cmdLutElement{0x0e, 0x18, -1, 0x03, 0x1842, 0x0846},
cmdLutElement{0x18, 0x05, -1, 0x03, 0x5842, 0x0046},
cmdLutElement{0x18, 0x05, -1, 0x03, 0x5842, 0x0066},
cmdLutElement{0x18, 0x06, -1, 0x03, 0x5842, 0x0086},
cmdLutElement{0x18, 0x07, -1, 0x03, 0x5842, 0x00c6},
cmdLutElement{0x18, 0x08, -1, 0x03, 0x5842, 0x0146},
cmdLutElement{0x18, 0x09, -1, 0x03, 0x5842, 0x0246},
cmdLutElement{0x18, 0x0a, -1, 0x03, 0x5842, 0x0446},
cmdLutElement{0x18, 0x18, -1, 0x03, 0x5842, 0x0846},
}

196
vendor/github.com/andybalholm/brotli/quality.go generated vendored Normal file
View File

@ -0,0 +1,196 @@
package brotli
const fastOnePassCompressionQuality = 0
const fastTwoPassCompressionQuality = 1
const zopflificationQuality = 10
const hqZopflificationQuality = 11
const maxQualityForStaticEntropyCodes = 2
const minQualityForBlockSplit = 4
const minQualityForNonzeroDistanceParams = 4
const minQualityForOptimizeHistograms = 4
const minQualityForExtensiveReferenceSearch = 5
const minQualityForContextModeling = 5
const minQualityForHqContextModeling = 7
const minQualityForHqBlockSplitting = 10
/* For quality below MIN_QUALITY_FOR_BLOCK_SPLIT there is no block splitting,
so we buffer at most this much literals and commands. */
const maxNumDelayedSymbols = 0x2FFF
/* Returns hash-table size for quality levels 0 and 1. */
func maxHashTableSize(quality int) uint {
if quality == fastOnePassCompressionQuality {
return 1 << 15
} else {
return 1 << 17
}
}
/* The maximum length for which the zopflification uses distinct distances. */
const maxZopfliLenQuality10 = 150
const maxZopfliLenQuality11 = 325
/* Do not thoroughly search when a long copy is found. */
const longCopyQuickStep = 16384
func maxZopfliLen(params *encoderParams) uint {
if params.quality <= 10 {
return maxZopfliLenQuality10
} else {
return maxZopfliLenQuality11
}
}
/* Number of best candidates to evaluate to expand Zopfli chain. */
func maxZopfliCandidates(params *encoderParams) uint {
if params.quality <= 10 {
return 1
} else {
return 5
}
}
func sanitizeParams(params *encoderParams) {
params.quality = brotli_min_int(maxQuality, brotli_max_int(minQuality, params.quality))
if params.quality <= maxQualityForStaticEntropyCodes {
params.large_window = false
}
if params.lgwin < minWindowBits {
params.lgwin = minWindowBits
} else {
var max_lgwin int
if params.large_window {
max_lgwin = largeMaxWindowBits
} else {
max_lgwin = maxWindowBits
}
if params.lgwin > uint(max_lgwin) {
params.lgwin = uint(max_lgwin)
}
}
}
/* Returns optimized lg_block value. */
func computeLgBlock(params *encoderParams) int {
var lgblock int = params.lgblock
if params.quality == fastOnePassCompressionQuality || params.quality == fastTwoPassCompressionQuality {
lgblock = int(params.lgwin)
} else if params.quality < minQualityForBlockSplit {
lgblock = 14
} else if lgblock == 0 {
lgblock = 16
if params.quality >= 9 && params.lgwin > uint(lgblock) {
lgblock = brotli_min_int(18, int(params.lgwin))
}
} else {
lgblock = brotli_min_int(maxInputBlockBits, brotli_max_int(minInputBlockBits, lgblock))
}
return lgblock
}
/* Returns log2 of the size of main ring buffer area.
Allocate at least lgwin + 1 bits for the ring buffer so that the newly
added block fits there completely and we still get lgwin bits and at least
read_block_size_bits + 1 bits because the copy tail length needs to be
smaller than ring-buffer size. */
func computeRbBits(params *encoderParams) int {
return 1 + brotli_max_int(int(params.lgwin), params.lgblock)
}
func maxMetablockSize(params *encoderParams) uint {
var bits int = brotli_min_int(computeRbBits(params), maxInputBlockBits)
return uint(1) << uint(bits)
}
/* When searching for backward references and have not seen matches for a long
time, we can skip some match lookups. Unsuccessful match lookups are very
expensive and this kind of a heuristic speeds up compression quite a lot.
At first 8 byte strides are taken and every second byte is put to hasher.
After 4x more literals stride by 16 bytes, every put 4-th byte to hasher.
Applied only to qualities 2 to 9. */
func literalSpreeLengthForSparseSearch(params *encoderParams) uint {
if params.quality < 9 {
return 64
} else {
return 512
}
}
func chooseHasher(params *encoderParams, hparams *hasherParams) {
if params.quality > 9 {
hparams.type_ = 10
} else if params.quality == 4 && params.size_hint >= 1<<20 {
hparams.type_ = 54
} else if params.quality < 5 {
hparams.type_ = params.quality
} else if params.lgwin <= 16 {
if params.quality < 7 {
hparams.type_ = 40
} else if params.quality < 9 {
hparams.type_ = 41
} else {
hparams.type_ = 42
}
} else if params.size_hint >= 1<<20 && params.lgwin >= 19 {
hparams.type_ = 6
hparams.block_bits = params.quality - 1
hparams.bucket_bits = 15
hparams.hash_len = 5
if params.quality < 7 {
hparams.num_last_distances_to_check = 4
} else if params.quality < 9 {
hparams.num_last_distances_to_check = 10
} else {
hparams.num_last_distances_to_check = 16
}
} else {
hparams.type_ = 5
hparams.block_bits = params.quality - 1
if params.quality < 7 {
hparams.bucket_bits = 14
} else {
hparams.bucket_bits = 15
}
if params.quality < 7 {
hparams.num_last_distances_to_check = 4
} else if params.quality < 9 {
hparams.num_last_distances_to_check = 10
} else {
hparams.num_last_distances_to_check = 16
}
}
if params.lgwin > 24 {
/* Different hashers for large window brotli: not for qualities <= 2,
these are too fast for large window. Not for qualities >= 10: their
hasher already works well with large window. So the changes are:
H3 --> H35: for quality 3.
H54 --> H55: for quality 4 with size hint > 1MB
H6 --> H65: for qualities 5, 6, 7, 8, 9. */
if hparams.type_ == 3 {
hparams.type_ = 35
}
if hparams.type_ == 54 {
hparams.type_ = 55
}
if hparams.type_ == 6 {
hparams.type_ = 65
}
}
}

102
vendor/github.com/andybalholm/brotli/reader.go generated vendored Normal file
View File

@ -0,0 +1,102 @@
package brotli
import (
"errors"
"io"
)
type decodeError int
func (err decodeError) Error() string {
return "brotli: " + string(decoderErrorString(int(err)))
}
var errExcessiveInput = errors.New("brotli: excessive input")
var errInvalidState = errors.New("brotli: invalid state")
// readBufSize is a "good" buffer size that avoids excessive round-trips
// between C and Go but doesn't waste too much memory on buffering.
// It is arbitrarily chosen to be equal to the constant used in io.Copy.
const readBufSize = 32 * 1024
// NewReader creates a new Reader reading the given reader.
func NewReader(src io.Reader) *Reader {
r := new(Reader)
r.Reset(src)
return r
}
// Reset discards the Reader's state and makes it equivalent to the result of
// its original state from NewReader, but writing to src instead.
// This permits reusing a Reader rather than allocating a new one.
// Error is always nil
func (r *Reader) Reset(src io.Reader) error {
decoderStateInit(r)
r.src = src
if r.buf == nil {
r.buf = make([]byte, readBufSize)
}
return nil
}
func (r *Reader) Read(p []byte) (n int, err error) {
if !decoderHasMoreOutput(r) && len(r.in) == 0 {
m, readErr := r.src.Read(r.buf)
if m == 0 {
// If readErr is `nil`, we just proxy underlying stream behavior.
return 0, readErr
}
r.in = r.buf[:m]
}
if len(p) == 0 {
return 0, nil
}
for {
var written uint
in_len := uint(len(r.in))
out_len := uint(len(p))
in_remaining := in_len
out_remaining := out_len
result := decoderDecompressStream(r, &in_remaining, &r.in, &out_remaining, &p)
written = out_len - out_remaining
n = int(written)
switch result {
case decoderResultSuccess:
if len(r.in) > 0 {
return n, errExcessiveInput
}
return n, nil
case decoderResultError:
return n, decodeError(decoderGetErrorCode(r))
case decoderResultNeedsMoreOutput:
if n == 0 {
return 0, io.ErrShortBuffer
}
return n, nil
case decoderNeedsMoreInput:
}
if len(r.in) != 0 {
return 0, errInvalidState
}
// Calling r.src.Read may block. Don't block if we have data to return.
if n > 0 {
return n, nil
}
// Top off the buffer.
encN, err := r.src.Read(r.buf)
if encN == 0 {
// Not enough data to complete decoding.
if err == io.EOF {
return 0, io.ErrUnexpectedEOF
}
return 0, err
}
r.in = r.buf[:encN]
}
}

134
vendor/github.com/andybalholm/brotli/ringbuffer.go generated vendored Normal file
View File

@ -0,0 +1,134 @@
package brotli
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* A ringBuffer(window_bits, tail_bits) contains `1 << window_bits' bytes of
data in a circular manner: writing a byte writes it to:
`position() % (1 << window_bits)'.
For convenience, the ringBuffer array contains another copy of the
first `1 << tail_bits' bytes:
buffer_[i] == buffer_[i + (1 << window_bits)], if i < (1 << tail_bits),
and another copy of the last two bytes:
buffer_[-1] == buffer_[(1 << window_bits) - 1] and
buffer_[-2] == buffer_[(1 << window_bits) - 2]. */
type ringBuffer struct {
size_ uint32
mask_ uint32
tail_size_ uint32
total_size_ uint32
cur_size_ uint32
pos_ uint32
data_ []byte
buffer_ []byte
}
func ringBufferInit(rb *ringBuffer) {
rb.pos_ = 0
}
func ringBufferSetup(params *encoderParams, rb *ringBuffer) {
var window_bits int = computeRbBits(params)
var tail_bits int = params.lgblock
*(*uint32)(&rb.size_) = 1 << uint(window_bits)
*(*uint32)(&rb.mask_) = (1 << uint(window_bits)) - 1
*(*uint32)(&rb.tail_size_) = 1 << uint(tail_bits)
*(*uint32)(&rb.total_size_) = rb.size_ + rb.tail_size_
}
const kSlackForEightByteHashingEverywhere uint = 7
/* Allocates or re-allocates data_ to the given length + plus some slack
region before and after. Fills the slack regions with zeros. */
func ringBufferInitBuffer(buflen uint32, rb *ringBuffer) {
var new_data []byte
var i uint
size := 2 + int(buflen) + int(kSlackForEightByteHashingEverywhere)
if cap(rb.data_) < size {
new_data = make([]byte, size)
} else {
new_data = rb.data_[:size]
}
if rb.data_ != nil {
copy(new_data, rb.data_[:2+rb.cur_size_+uint32(kSlackForEightByteHashingEverywhere)])
}
rb.data_ = new_data
rb.cur_size_ = buflen
rb.buffer_ = rb.data_[2:]
rb.data_[1] = 0
rb.data_[0] = rb.data_[1]
for i = 0; i < kSlackForEightByteHashingEverywhere; i++ {
rb.buffer_[rb.cur_size_+uint32(i)] = 0
}
}
func ringBufferWriteTail(bytes []byte, n uint, rb *ringBuffer) {
var masked_pos uint = uint(rb.pos_ & rb.mask_)
if uint32(masked_pos) < rb.tail_size_ {
/* Just fill the tail buffer with the beginning data. */
var p uint = uint(rb.size_ + uint32(masked_pos))
copy(rb.buffer_[p:], bytes[:brotli_min_size_t(n, uint(rb.tail_size_-uint32(masked_pos)))])
}
}
/* Push bytes into the ring buffer. */
func ringBufferWrite(bytes []byte, n uint, rb *ringBuffer) {
if rb.pos_ == 0 && uint32(n) < rb.tail_size_ {
/* Special case for the first write: to process the first block, we don't
need to allocate the whole ring-buffer and we don't need the tail
either. However, we do this memory usage optimization only if the
first write is less than the tail size, which is also the input block
size, otherwise it is likely that other blocks will follow and we
will need to reallocate to the full size anyway. */
rb.pos_ = uint32(n)
ringBufferInitBuffer(rb.pos_, rb)
copy(rb.buffer_, bytes[:n])
return
}
if rb.cur_size_ < rb.total_size_ {
/* Lazily allocate the full buffer. */
ringBufferInitBuffer(rb.total_size_, rb)
/* Initialize the last two bytes to zero, so that we don't have to worry
later when we copy the last two bytes to the first two positions. */
rb.buffer_[rb.size_-2] = 0
rb.buffer_[rb.size_-1] = 0
}
{
var masked_pos uint = uint(rb.pos_ & rb.mask_)
/* The length of the writes is limited so that we do not need to worry
about a write */
ringBufferWriteTail(bytes, n, rb)
if uint32(masked_pos+n) <= rb.size_ {
/* A single write fits. */
copy(rb.buffer_[masked_pos:], bytes[:n])
} else {
/* Split into two writes.
Copy into the end of the buffer, including the tail buffer. */
copy(rb.buffer_[masked_pos:], bytes[:brotli_min_size_t(n, uint(rb.total_size_-uint32(masked_pos)))])
/* Copy into the beginning of the buffer */
copy(rb.buffer_, bytes[rb.size_-uint32(masked_pos):][:uint32(n)-(rb.size_-uint32(masked_pos))])
}
}
{
var not_first_lap bool = rb.pos_&(1<<31) != 0
var rb_pos_mask uint32 = (1 << 31) - 1
rb.data_[0] = rb.buffer_[rb.size_-2]
rb.data_[1] = rb.buffer_[rb.size_-1]
rb.pos_ = (rb.pos_ & rb_pos_mask) + uint32(uint32(n)&rb_pos_mask)
if not_first_lap {
/* Wrap, but preserve not-a-first-lap feature. */
rb.pos_ |= 1 << 31
}
}
}

295
vendor/github.com/andybalholm/brotli/state.go generated vendored Normal file
View File

@ -0,0 +1,295 @@
package brotli
import "io"
/* Copyright 2015 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Brotli state for partial streaming decoding. */
const (
stateUninited = iota
stateLargeWindowBits
stateInitialize
stateMetablockBegin
stateMetablockHeader
stateMetablockHeader2
stateContextModes
stateCommandBegin
stateCommandInner
stateCommandPostDecodeLiterals
stateCommandPostWrapCopy
stateUncompressed
stateMetadata
stateCommandInnerWrite
stateMetablockDone
stateCommandPostWrite1
stateCommandPostWrite2
stateHuffmanCode0
stateHuffmanCode1
stateHuffmanCode2
stateHuffmanCode3
stateContextMap1
stateContextMap2
stateTreeGroup
stateDone
)
const (
stateMetablockHeaderNone = iota
stateMetablockHeaderEmpty
stateMetablockHeaderNibbles
stateMetablockHeaderSize
stateMetablockHeaderUncompressed
stateMetablockHeaderReserved
stateMetablockHeaderBytes
stateMetablockHeaderMetadata
)
const (
stateUncompressedNone = iota
stateUncompressedWrite
)
const (
stateTreeGroupNone = iota
stateTreeGroupLoop
)
const (
stateContextMapNone = iota
stateContextMapReadPrefix
stateContextMapHuffman
stateContextMapDecode
stateContextMapTransform
)
const (
stateHuffmanNone = iota
stateHuffmanSimpleSize
stateHuffmanSimpleRead
stateHuffmanSimpleBuild
stateHuffmanComplex
stateHuffmanLengthSymbols
)
const (
stateDecodeUint8None = iota
stateDecodeUint8Short
stateDecodeUint8Long
)
const (
stateReadBlockLengthNone = iota
stateReadBlockLengthSuffix
)
type Reader struct {
src io.Reader
buf []byte // scratch space for reading from src
in []byte // current chunk to decode; usually aliases buf
state int
loop_counter int
br bitReader
buffer struct {
u64 uint64
u8 [8]byte
}
buffer_length uint32
pos int
max_backward_distance int
max_distance int
ringbuffer_size int
ringbuffer_mask int
dist_rb_idx int
dist_rb [4]int
error_code int
sub_loop_counter uint32
ringbuffer []byte
ringbuffer_end []byte
htree_command []huffmanCode
context_lookup []byte
context_map_slice []byte
dist_context_map_slice []byte
literal_hgroup huffmanTreeGroup
insert_copy_hgroup huffmanTreeGroup
distance_hgroup huffmanTreeGroup
block_type_trees []huffmanCode
block_len_trees []huffmanCode
trivial_literal_context int
distance_context int
meta_block_remaining_len int
block_length_index uint32
block_length [3]uint32
num_block_types [3]uint32
block_type_rb [6]uint32
distance_postfix_bits uint32
num_direct_distance_codes uint32
distance_postfix_mask int
num_dist_htrees uint32
dist_context_map []byte
literal_htree []huffmanCode
dist_htree_index byte
repeat_code_len uint32
prev_code_len uint32
copy_length int
distance_code int
rb_roundtrips uint
partial_pos_out uint
symbol uint32
repeat uint32
space uint32
table [32]huffmanCode
symbol_lists symbolList
symbols_lists_array [huffmanMaxCodeLength + 1 + numCommandSymbols]uint16
next_symbol [32]int
code_length_code_lengths [codeLengthCodes]byte
code_length_histo [16]uint16
htree_index int
next []huffmanCode
context_index uint32
max_run_length_prefix uint32
code uint32
context_map_table [huffmanMaxSize272]huffmanCode
substate_metablock_header int
substate_tree_group int
substate_context_map int
substate_uncompressed int
substate_huffman int
substate_decode_uint8 int
substate_read_block_length int
is_last_metablock uint
is_uncompressed uint
is_metadata uint
should_wrap_ringbuffer uint
canny_ringbuffer_allocation uint
large_window bool
size_nibbles uint
window_bits uint32
new_ringbuffer_size int
num_literal_htrees uint32
context_map []byte
context_modes []byte
dictionary *dictionary
transforms *transforms
trivial_literal_contexts [8]uint32
}
func decoderStateInit(s *Reader) bool {
s.error_code = 0 /* BROTLI_DECODER_NO_ERROR */
initBitReader(&s.br)
s.state = stateUninited
s.large_window = false
s.substate_metablock_header = stateMetablockHeaderNone
s.substate_tree_group = stateTreeGroupNone
s.substate_context_map = stateContextMapNone
s.substate_uncompressed = stateUncompressedNone
s.substate_huffman = stateHuffmanNone
s.substate_decode_uint8 = stateDecodeUint8None
s.substate_read_block_length = stateReadBlockLengthNone
s.buffer_length = 0
s.loop_counter = 0
s.pos = 0
s.rb_roundtrips = 0
s.partial_pos_out = 0
s.block_type_trees = nil
s.block_len_trees = nil
s.ringbuffer = nil
s.ringbuffer_size = 0
s.new_ringbuffer_size = 0
s.ringbuffer_mask = 0
s.context_map = nil
s.context_modes = nil
s.dist_context_map = nil
s.context_map_slice = nil
s.dist_context_map_slice = nil
s.sub_loop_counter = 0
s.literal_hgroup.codes = nil
s.literal_hgroup.htrees = nil
s.insert_copy_hgroup.codes = nil
s.insert_copy_hgroup.htrees = nil
s.distance_hgroup.codes = nil
s.distance_hgroup.htrees = nil
s.is_last_metablock = 0
s.is_uncompressed = 0
s.is_metadata = 0
s.should_wrap_ringbuffer = 0
s.canny_ringbuffer_allocation = 1
s.window_bits = 0
s.max_distance = 0
s.dist_rb[0] = 16
s.dist_rb[1] = 15
s.dist_rb[2] = 11
s.dist_rb[3] = 4
s.dist_rb_idx = 0
s.block_type_trees = nil
s.block_len_trees = nil
s.symbol_lists.storage = s.symbols_lists_array[:]
s.symbol_lists.offset = huffmanMaxCodeLength + 1
s.dictionary = getDictionary()
s.transforms = getTransforms()
return true
}
func decoderStateMetablockBegin(s *Reader) {
s.meta_block_remaining_len = 0
s.block_length[0] = 1 << 24
s.block_length[1] = 1 << 24
s.block_length[2] = 1 << 24
s.num_block_types[0] = 1
s.num_block_types[1] = 1
s.num_block_types[2] = 1
s.block_type_rb[0] = 1
s.block_type_rb[1] = 0
s.block_type_rb[2] = 1
s.block_type_rb[3] = 0
s.block_type_rb[4] = 1
s.block_type_rb[5] = 0
s.context_map = nil
s.context_modes = nil
s.dist_context_map = nil
s.context_map_slice = nil
s.literal_htree = nil
s.dist_context_map_slice = nil
s.dist_htree_index = 0
s.context_lookup = nil
s.literal_hgroup.codes = nil
s.literal_hgroup.htrees = nil
s.insert_copy_hgroup.codes = nil
s.insert_copy_hgroup.htrees = nil
s.distance_hgroup.codes = nil
s.distance_hgroup.htrees = nil
}
func decoderStateCleanupAfterMetablock(s *Reader) {
s.context_modes = nil
s.context_map = nil
s.dist_context_map = nil
s.literal_hgroup.htrees = nil
s.insert_copy_hgroup.htrees = nil
s.distance_hgroup.htrees = nil
}
func decoderHuffmanTreeGroupInit(s *Reader, group *huffmanTreeGroup, alphabet_size uint32, max_symbol uint32, ntrees uint32) bool {
var max_table_size uint = uint(kMaxHuffmanTableSize[(alphabet_size+31)>>5])
group.alphabet_size = uint16(alphabet_size)
group.max_symbol = uint16(max_symbol)
group.num_htrees = uint16(ntrees)
group.htrees = make([][]huffmanCode, ntrees)
group.codes = make([]huffmanCode, (uint(ntrees) * max_table_size))
return !(group.codes == nil)
}

662
vendor/github.com/andybalholm/brotli/static_dict.go generated vendored Normal file
View File

@ -0,0 +1,662 @@
package brotli
import "encoding/binary"
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Class to model the static dictionary. */
const maxStaticDictionaryMatchLen = 37
const kInvalidMatch uint32 = 0xFFFFFFF
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
func hash(data []byte) uint32 {
var h uint32 = binary.LittleEndian.Uint32(data) * kDictHashMul32
/* The higher bits contain more mixture from the multiplication,
so we take our results from there. */
return h >> uint(32-kDictNumBits)
}
func addMatch(distance uint, len uint, len_code uint, matches []uint32) {
var match uint32 = uint32((distance << 5) + len_code)
matches[len] = brotli_min_uint32_t(matches[len], match)
}
func dictMatchLength(dict *dictionary, data []byte, id uint, len uint, maxlen uint) uint {
var offset uint = uint(dict.offsets_by_length[len]) + len*id
return findMatchLengthWithLimit(dict.data[offset:], data, brotli_min_size_t(uint(len), maxlen))
}
func isMatch(d *dictionary, w dictWord, data []byte, max_length uint) bool {
if uint(w.len) > max_length {
return false
} else {
var offset uint = uint(d.offsets_by_length[w.len]) + uint(w.len)*uint(w.idx)
var dict []byte = d.data[offset:]
if w.transform == 0 {
/* Match against base dictionary word. */
return findMatchLengthWithLimit(dict, data, uint(w.len)) == uint(w.len)
} else if w.transform == 10 {
/* Match against uppercase first transform.
Note that there are only ASCII uppercase words in the lookup table. */
return dict[0] >= 'a' && dict[0] <= 'z' && (dict[0]^32) == data[0] && findMatchLengthWithLimit(dict[1:], data[1:], uint(w.len)-1) == uint(w.len-1)
} else {
/* Match against uppercase all transform.
Note that there are only ASCII uppercase words in the lookup table. */
var i uint
for i = 0; i < uint(w.len); i++ {
if dict[i] >= 'a' && dict[i] <= 'z' {
if (dict[i] ^ 32) != data[i] {
return false
}
} else {
if dict[i] != data[i] {
return false
}
}
}
return true
}
}
}
func findAllStaticDictionaryMatches(dict *encoderDictionary, data []byte, min_length uint, max_length uint, matches []uint32) bool {
var has_found_match bool = false
{
var offset uint = uint(dict.buckets[hash(data)])
var end bool = offset == 0
for !end {
w := dict.dict_words[offset]
offset++
var l uint = uint(w.len) & 0x1F
var n uint = uint(1) << dict.words.size_bits_by_length[l]
var id uint = uint(w.idx)
end = !(w.len&0x80 == 0)
w.len = byte(l)
if w.transform == 0 {
var matchlen uint = dictMatchLength(dict.words, data, id, l, max_length)
var s []byte
var minlen uint
var maxlen uint
var len uint
/* Transform "" + BROTLI_TRANSFORM_IDENTITY + "" */
if matchlen == l {
addMatch(id, l, l, matches)
has_found_match = true
}
/* Transforms "" + BROTLI_TRANSFORM_OMIT_LAST_1 + "" and
"" + BROTLI_TRANSFORM_OMIT_LAST_1 + "ing " */
if matchlen >= l-1 {
addMatch(id+12*n, l-1, l, matches)
if l+2 < max_length && data[l-1] == 'i' && data[l] == 'n' && data[l+1] == 'g' && data[l+2] == ' ' {
addMatch(id+49*n, l+3, l, matches)
}
has_found_match = true
}
/* Transform "" + BROTLI_TRANSFORM_OMIT_LAST_# + "" (# = 2 .. 9) */
minlen = min_length
if l > 9 {
minlen = brotli_max_size_t(minlen, l-9)
}
maxlen = brotli_min_size_t(matchlen, l-2)
for len = minlen; len <= maxlen; len++ {
var cut uint = l - len
var transform_id uint = (cut << 2) + uint((dict.cutoffTransforms>>(cut*6))&0x3F)
addMatch(id+transform_id*n, uint(len), l, matches)
has_found_match = true
}
if matchlen < l || l+6 >= max_length {
continue
}
s = data[l:]
/* Transforms "" + BROTLI_TRANSFORM_IDENTITY + <suffix> */
if s[0] == ' ' {
addMatch(id+n, l+1, l, matches)
if s[1] == 'a' {
if s[2] == ' ' {
addMatch(id+28*n, l+3, l, matches)
} else if s[2] == 's' {
if s[3] == ' ' {
addMatch(id+46*n, l+4, l, matches)
}
} else if s[2] == 't' {
if s[3] == ' ' {
addMatch(id+60*n, l+4, l, matches)
}
} else if s[2] == 'n' {
if s[3] == 'd' && s[4] == ' ' {
addMatch(id+10*n, l+5, l, matches)
}
}
} else if s[1] == 'b' {
if s[2] == 'y' && s[3] == ' ' {
addMatch(id+38*n, l+4, l, matches)
}
} else if s[1] == 'i' {
if s[2] == 'n' {
if s[3] == ' ' {
addMatch(id+16*n, l+4, l, matches)
}
} else if s[2] == 's' {
if s[3] == ' ' {
addMatch(id+47*n, l+4, l, matches)
}
}
} else if s[1] == 'f' {
if s[2] == 'o' {
if s[3] == 'r' && s[4] == ' ' {
addMatch(id+25*n, l+5, l, matches)
}
} else if s[2] == 'r' {
if s[3] == 'o' && s[4] == 'm' && s[5] == ' ' {
addMatch(id+37*n, l+6, l, matches)
}
}
} else if s[1] == 'o' {
if s[2] == 'f' {
if s[3] == ' ' {
addMatch(id+8*n, l+4, l, matches)
}
} else if s[2] == 'n' {
if s[3] == ' ' {
addMatch(id+45*n, l+4, l, matches)
}
}
} else if s[1] == 'n' {
if s[2] == 'o' && s[3] == 't' && s[4] == ' ' {
addMatch(id+80*n, l+5, l, matches)
}
} else if s[1] == 't' {
if s[2] == 'h' {
if s[3] == 'e' {
if s[4] == ' ' {
addMatch(id+5*n, l+5, l, matches)
}
} else if s[3] == 'a' {
if s[4] == 't' && s[5] == ' ' {
addMatch(id+29*n, l+6, l, matches)
}
}
} else if s[2] == 'o' {
if s[3] == ' ' {
addMatch(id+17*n, l+4, l, matches)
}
}
} else if s[1] == 'w' {
if s[2] == 'i' && s[3] == 't' && s[4] == 'h' && s[5] == ' ' {
addMatch(id+35*n, l+6, l, matches)
}
}
} else if s[0] == '"' {
addMatch(id+19*n, l+1, l, matches)
if s[1] == '>' {
addMatch(id+21*n, l+2, l, matches)
}
} else if s[0] == '.' {
addMatch(id+20*n, l+1, l, matches)
if s[1] == ' ' {
addMatch(id+31*n, l+2, l, matches)
if s[2] == 'T' && s[3] == 'h' {
if s[4] == 'e' {
if s[5] == ' ' {
addMatch(id+43*n, l+6, l, matches)
}
} else if s[4] == 'i' {
if s[5] == 's' && s[6] == ' ' {
addMatch(id+75*n, l+7, l, matches)
}
}
}
}
} else if s[0] == ',' {
addMatch(id+76*n, l+1, l, matches)
if s[1] == ' ' {
addMatch(id+14*n, l+2, l, matches)
}
} else if s[0] == '\n' {
addMatch(id+22*n, l+1, l, matches)
if s[1] == '\t' {
addMatch(id+50*n, l+2, l, matches)
}
} else if s[0] == ']' {
addMatch(id+24*n, l+1, l, matches)
} else if s[0] == '\'' {
addMatch(id+36*n, l+1, l, matches)
} else if s[0] == ':' {
addMatch(id+51*n, l+1, l, matches)
} else if s[0] == '(' {
addMatch(id+57*n, l+1, l, matches)
} else if s[0] == '=' {
if s[1] == '"' {
addMatch(id+70*n, l+2, l, matches)
} else if s[1] == '\'' {
addMatch(id+86*n, l+2, l, matches)
}
} else if s[0] == 'a' {
if s[1] == 'l' && s[2] == ' ' {
addMatch(id+84*n, l+3, l, matches)
}
} else if s[0] == 'e' {
if s[1] == 'd' {
if s[2] == ' ' {
addMatch(id+53*n, l+3, l, matches)
}
} else if s[1] == 'r' {
if s[2] == ' ' {
addMatch(id+82*n, l+3, l, matches)
}
} else if s[1] == 's' {
if s[2] == 't' && s[3] == ' ' {
addMatch(id+95*n, l+4, l, matches)
}
}
} else if s[0] == 'f' {
if s[1] == 'u' && s[2] == 'l' && s[3] == ' ' {
addMatch(id+90*n, l+4, l, matches)
}
} else if s[0] == 'i' {
if s[1] == 'v' {
if s[2] == 'e' && s[3] == ' ' {
addMatch(id+92*n, l+4, l, matches)
}
} else if s[1] == 'z' {
if s[2] == 'e' && s[3] == ' ' {
addMatch(id+100*n, l+4, l, matches)
}
}
} else if s[0] == 'l' {
if s[1] == 'e' {
if s[2] == 's' && s[3] == 's' && s[4] == ' ' {
addMatch(id+93*n, l+5, l, matches)
}
} else if s[1] == 'y' {
if s[2] == ' ' {
addMatch(id+61*n, l+3, l, matches)
}
}
} else if s[0] == 'o' {
if s[1] == 'u' && s[2] == 's' && s[3] == ' ' {
addMatch(id+106*n, l+4, l, matches)
}
}
} else {
var is_all_caps bool = (w.transform != transformUppercaseFirst)
/* Set is_all_caps=0 for BROTLI_TRANSFORM_UPPERCASE_FIRST and
is_all_caps=1 otherwise (BROTLI_TRANSFORM_UPPERCASE_ALL)
transform. */
var s []byte
if !isMatch(dict.words, w, data, max_length) {
continue
}
/* Transform "" + kUppercase{First,All} + "" */
var tmp int
if is_all_caps {
tmp = 44
} else {
tmp = 9
}
addMatch(id+uint(tmp)*n, l, l, matches)
has_found_match = true
if l+1 >= max_length {
continue
}
/* Transforms "" + kUppercase{First,All} + <suffix> */
s = data[l:]
if s[0] == ' ' {
var tmp int
if is_all_caps {
tmp = 68
} else {
tmp = 4
}
addMatch(id+uint(tmp)*n, l+1, l, matches)
} else if s[0] == '"' {
var tmp int
if is_all_caps {
tmp = 87
} else {
tmp = 66
}
addMatch(id+uint(tmp)*n, l+1, l, matches)
if s[1] == '>' {
var tmp int
if is_all_caps {
tmp = 97
} else {
tmp = 69
}
addMatch(id+uint(tmp)*n, l+2, l, matches)
}
} else if s[0] == '.' {
var tmp int
if is_all_caps {
tmp = 101
} else {
tmp = 79
}
addMatch(id+uint(tmp)*n, l+1, l, matches)
if s[1] == ' ' {
var tmp int
if is_all_caps {
tmp = 114
} else {
tmp = 88
}
addMatch(id+uint(tmp)*n, l+2, l, matches)
}
} else if s[0] == ',' {
var tmp int
if is_all_caps {
tmp = 112
} else {
tmp = 99
}
addMatch(id+uint(tmp)*n, l+1, l, matches)
if s[1] == ' ' {
var tmp int
if is_all_caps {
tmp = 107
} else {
tmp = 58
}
addMatch(id+uint(tmp)*n, l+2, l, matches)
}
} else if s[0] == '\'' {
var tmp int
if is_all_caps {
tmp = 94
} else {
tmp = 74
}
addMatch(id+uint(tmp)*n, l+1, l, matches)
} else if s[0] == '(' {
var tmp int
if is_all_caps {
tmp = 113
} else {
tmp = 78
}
addMatch(id+uint(tmp)*n, l+1, l, matches)
} else if s[0] == '=' {
if s[1] == '"' {
var tmp int
if is_all_caps {
tmp = 105
} else {
tmp = 104
}
addMatch(id+uint(tmp)*n, l+2, l, matches)
} else if s[1] == '\'' {
var tmp int
if is_all_caps {
tmp = 116
} else {
tmp = 108
}
addMatch(id+uint(tmp)*n, l+2, l, matches)
}
}
}
}
}
/* Transforms with prefixes " " and "." */
if max_length >= 5 && (data[0] == ' ' || data[0] == '.') {
var is_space bool = (data[0] == ' ')
var offset uint = uint(dict.buckets[hash(data[1:])])
var end bool = offset == 0
for !end {
w := dict.dict_words[offset]
offset++
var l uint = uint(w.len) & 0x1F
var n uint = uint(1) << dict.words.size_bits_by_length[l]
var id uint = uint(w.idx)
end = !(w.len&0x80 == 0)
w.len = byte(l)
if w.transform == 0 {
var s []byte
if !isMatch(dict.words, w, data[1:], max_length-1) {
continue
}
/* Transforms " " + BROTLI_TRANSFORM_IDENTITY + "" and
"." + BROTLI_TRANSFORM_IDENTITY + "" */
var tmp int
if is_space {
tmp = 6
} else {
tmp = 32
}
addMatch(id+uint(tmp)*n, l+1, l, matches)
has_found_match = true
if l+2 >= max_length {
continue
}
/* Transforms " " + BROTLI_TRANSFORM_IDENTITY + <suffix> and
"." + BROTLI_TRANSFORM_IDENTITY + <suffix>
*/
s = data[l+1:]
if s[0] == ' ' {
var tmp int
if is_space {
tmp = 2
} else {
tmp = 77
}
addMatch(id+uint(tmp)*n, l+2, l, matches)
} else if s[0] == '(' {
var tmp int
if is_space {
tmp = 89
} else {
tmp = 67
}
addMatch(id+uint(tmp)*n, l+2, l, matches)
} else if is_space {
if s[0] == ',' {
addMatch(id+103*n, l+2, l, matches)
if s[1] == ' ' {
addMatch(id+33*n, l+3, l, matches)
}
} else if s[0] == '.' {
addMatch(id+71*n, l+2, l, matches)
if s[1] == ' ' {
addMatch(id+52*n, l+3, l, matches)
}
} else if s[0] == '=' {
if s[1] == '"' {
addMatch(id+81*n, l+3, l, matches)
} else if s[1] == '\'' {
addMatch(id+98*n, l+3, l, matches)
}
}
}
} else if is_space {
var is_all_caps bool = (w.transform != transformUppercaseFirst)
/* Set is_all_caps=0 for BROTLI_TRANSFORM_UPPERCASE_FIRST and
is_all_caps=1 otherwise (BROTLI_TRANSFORM_UPPERCASE_ALL)
transform. */
var s []byte
if !isMatch(dict.words, w, data[1:], max_length-1) {
continue
}
/* Transforms " " + kUppercase{First,All} + "" */
var tmp int
if is_all_caps {
tmp = 85
} else {
tmp = 30
}
addMatch(id+uint(tmp)*n, l+1, l, matches)
has_found_match = true
if l+2 >= max_length {
continue
}
/* Transforms " " + kUppercase{First,All} + <suffix> */
s = data[l+1:]
if s[0] == ' ' {
var tmp int
if is_all_caps {
tmp = 83
} else {
tmp = 15
}
addMatch(id+uint(tmp)*n, l+2, l, matches)
} else if s[0] == ',' {
if !is_all_caps {
addMatch(id+109*n, l+2, l, matches)
}
if s[1] == ' ' {
var tmp int
if is_all_caps {
tmp = 111
} else {
tmp = 65
}
addMatch(id+uint(tmp)*n, l+3, l, matches)
}
} else if s[0] == '.' {
var tmp int
if is_all_caps {
tmp = 115
} else {
tmp = 96
}
addMatch(id+uint(tmp)*n, l+2, l, matches)
if s[1] == ' ' {
var tmp int
if is_all_caps {
tmp = 117
} else {
tmp = 91
}
addMatch(id+uint(tmp)*n, l+3, l, matches)
}
} else if s[0] == '=' {
if s[1] == '"' {
var tmp int
if is_all_caps {
tmp = 110
} else {
tmp = 118
}
addMatch(id+uint(tmp)*n, l+3, l, matches)
} else if s[1] == '\'' {
var tmp int
if is_all_caps {
tmp = 119
} else {
tmp = 120
}
addMatch(id+uint(tmp)*n, l+3, l, matches)
}
}
}
}
}
if max_length >= 6 {
/* Transforms with prefixes "e ", "s ", ", " and "\xC2\xA0" */
if (data[1] == ' ' && (data[0] == 'e' || data[0] == 's' || data[0] == ',')) || (data[0] == 0xC2 && data[1] == 0xA0) {
var offset uint = uint(dict.buckets[hash(data[2:])])
var end bool = offset == 0
for !end {
w := dict.dict_words[offset]
offset++
var l uint = uint(w.len) & 0x1F
var n uint = uint(1) << dict.words.size_bits_by_length[l]
var id uint = uint(w.idx)
end = !(w.len&0x80 == 0)
w.len = byte(l)
if w.transform == 0 && isMatch(dict.words, w, data[2:], max_length-2) {
if data[0] == 0xC2 {
addMatch(id+102*n, l+2, l, matches)
has_found_match = true
} else if l+2 < max_length && data[l+2] == ' ' {
var t uint = 13
if data[0] == 'e' {
t = 18
} else if data[0] == 's' {
t = 7
}
addMatch(id+t*n, l+3, l, matches)
has_found_match = true
}
}
}
}
}
if max_length >= 9 {
/* Transforms with prefixes " the " and ".com/" */
if (data[0] == ' ' && data[1] == 't' && data[2] == 'h' && data[3] == 'e' && data[4] == ' ') || (data[0] == '.' && data[1] == 'c' && data[2] == 'o' && data[3] == 'm' && data[4] == '/') {
var offset uint = uint(dict.buckets[hash(data[5:])])
var end bool = offset == 0
for !end {
w := dict.dict_words[offset]
offset++
var l uint = uint(w.len) & 0x1F
var n uint = uint(1) << dict.words.size_bits_by_length[l]
var id uint = uint(w.idx)
end = !(w.len&0x80 == 0)
w.len = byte(l)
if w.transform == 0 && isMatch(dict.words, w, data[5:], max_length-5) {
var tmp int
if data[0] == ' ' {
tmp = 41
} else {
tmp = 72
}
addMatch(id+uint(tmp)*n, l+5, l, matches)
has_found_match = true
if l+5 < max_length {
var s []byte = data[l+5:]
if data[0] == ' ' {
if l+8 < max_length && s[0] == ' ' && s[1] == 'o' && s[2] == 'f' && s[3] == ' ' {
addMatch(id+62*n, l+9, l, matches)
if l+12 < max_length && s[4] == 't' && s[5] == 'h' && s[6] == 'e' && s[7] == ' ' {
addMatch(id+73*n, l+13, l, matches)
}
}
}
}
}
}
}
}
return has_found_match
}

75094
vendor/github.com/andybalholm/brotli/static_dict_lut.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

22
vendor/github.com/andybalholm/brotli/symbol_list.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
package brotli
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Utilities for building Huffman decoding tables. */
type symbolList struct {
storage []uint16
offset int
}
func symbolListGet(sl symbolList, i int) uint16 {
return sl.storage[i+sl.offset]
}
func symbolListPut(sl symbolList, i int, val uint16) {
sl.storage[i+sl.offset] = val
}

641
vendor/github.com/andybalholm/brotli/transform.go generated vendored Normal file
View File

@ -0,0 +1,641 @@
package brotli
const (
transformIdentity = 0
transformOmitLast1 = 1
transformOmitLast2 = 2
transformOmitLast3 = 3
transformOmitLast4 = 4
transformOmitLast5 = 5
transformOmitLast6 = 6
transformOmitLast7 = 7
transformOmitLast8 = 8
transformOmitLast9 = 9
transformUppercaseFirst = 10
transformUppercaseAll = 11
transformOmitFirst1 = 12
transformOmitFirst2 = 13
transformOmitFirst3 = 14
transformOmitFirst4 = 15
transformOmitFirst5 = 16
transformOmitFirst6 = 17
transformOmitFirst7 = 18
transformOmitFirst8 = 19
transformOmitFirst9 = 20
transformShiftFirst = 21
transformShiftAll = 22 + iota - 22
numTransformTypes
)
const transformsMaxCutOff = transformOmitLast9
type transforms struct {
prefix_suffix_size uint16
prefix_suffix []byte
prefix_suffix_map []uint16
num_transforms uint32
transforms []byte
params []byte
cutOffTransforms [transformsMaxCutOff + 1]int16
}
func transformPrefixId(t *transforms, I int) byte {
return t.transforms[(I*3)+0]
}
func transformType(t *transforms, I int) byte {
return t.transforms[(I*3)+1]
}
func transformSuffixId(t *transforms, I int) byte {
return t.transforms[(I*3)+2]
}
func transformPrefix(t *transforms, I int) []byte {
return t.prefix_suffix[t.prefix_suffix_map[transformPrefixId(t, I)]:]
}
func transformSuffix(t *transforms, I int) []byte {
return t.prefix_suffix[t.prefix_suffix_map[transformSuffixId(t, I)]:]
}
/* RFC 7932 transforms string data */
const kPrefixSuffix string = "\001 \002, \010 of the \004 of \002s \001.\005 and \004 " + "in \001\"\004 to \002\">\001\n\002. \001]\005 for \003 a \006 " + "that \001'\006 with \006 from \004 by \001(\006. T" + "he \004 on \004 as \004 is \004ing \002\n\t\001:\003ed " + "\002=\"\004 at \003ly \001,\002='\005.com/\007. This \005" + " not \003er \003al \004ful \004ive \005less \004es" + "t \004ize \002\xc2\xa0\004ous \005 the \002e \000"
var kPrefixSuffixMap = [50]uint16{
0x00,
0x02,
0x05,
0x0E,
0x13,
0x16,
0x18,
0x1E,
0x23,
0x25,
0x2A,
0x2D,
0x2F,
0x32,
0x34,
0x3A,
0x3E,
0x45,
0x47,
0x4E,
0x55,
0x5A,
0x5C,
0x63,
0x68,
0x6D,
0x72,
0x77,
0x7A,
0x7C,
0x80,
0x83,
0x88,
0x8C,
0x8E,
0x91,
0x97,
0x9F,
0xA5,
0xA9,
0xAD,
0xB2,
0xB7,
0xBD,
0xC2,
0xC7,
0xCA,
0xCF,
0xD5,
0xD8,
}
/* RFC 7932 transforms */
var kTransformsData = []byte{
49,
transformIdentity,
49,
49,
transformIdentity,
0,
0,
transformIdentity,
0,
49,
transformOmitFirst1,
49,
49,
transformUppercaseFirst,
0,
49,
transformIdentity,
47,
0,
transformIdentity,
49,
4,
transformIdentity,
0,
49,
transformIdentity,
3,
49,
transformUppercaseFirst,
49,
49,
transformIdentity,
6,
49,
transformOmitFirst2,
49,
49,
transformOmitLast1,
49,
1,
transformIdentity,
0,
49,
transformIdentity,
1,
0,
transformUppercaseFirst,
0,
49,
transformIdentity,
7,
49,
transformIdentity,
9,
48,
transformIdentity,
0,
49,
transformIdentity,
8,
49,
transformIdentity,
5,
49,
transformIdentity,
10,
49,
transformIdentity,
11,
49,
transformOmitLast3,
49,
49,
transformIdentity,
13,
49,
transformIdentity,
14,
49,
transformOmitFirst3,
49,
49,
transformOmitLast2,
49,
49,
transformIdentity,
15,
49,
transformIdentity,
16,
0,
transformUppercaseFirst,
49,
49,
transformIdentity,
12,
5,
transformIdentity,
49,
0,
transformIdentity,
1,
49,
transformOmitFirst4,
49,
49,
transformIdentity,
18,
49,
transformIdentity,
17,
49,
transformIdentity,
19,
49,
transformIdentity,
20,
49,
transformOmitFirst5,
49,
49,
transformOmitFirst6,
49,
47,
transformIdentity,
49,
49,
transformOmitLast4,
49,
49,
transformIdentity,
22,
49,
transformUppercaseAll,
49,
49,
transformIdentity,
23,
49,
transformIdentity,
24,
49,
transformIdentity,
25,
49,
transformOmitLast7,
49,
49,
transformOmitLast1,
26,
49,
transformIdentity,
27,
49,
transformIdentity,
28,
0,
transformIdentity,
12,
49,
transformIdentity,
29,
49,
transformOmitFirst9,
49,
49,
transformOmitFirst7,
49,
49,
transformOmitLast6,
49,
49,
transformIdentity,
21,
49,
transformUppercaseFirst,
1,
49,
transformOmitLast8,
49,
49,
transformIdentity,
31,
49,
transformIdentity,
32,
47,
transformIdentity,
3,
49,
transformOmitLast5,
49,
49,
transformOmitLast9,
49,
0,
transformUppercaseFirst,
1,
49,
transformUppercaseFirst,
8,
5,
transformIdentity,
21,
49,
transformUppercaseAll,
0,
49,
transformUppercaseFirst,
10,
49,
transformIdentity,
30,
0,
transformIdentity,
5,
35,
transformIdentity,
49,
47,
transformIdentity,
2,
49,
transformUppercaseFirst,
17,
49,
transformIdentity,
36,
49,
transformIdentity,
33,
5,
transformIdentity,
0,
49,
transformUppercaseFirst,
21,
49,
transformUppercaseFirst,
5,
49,
transformIdentity,
37,
0,
transformIdentity,
30,
49,
transformIdentity,
38,
0,
transformUppercaseAll,
0,
49,
transformIdentity,
39,
0,
transformUppercaseAll,
49,
49,
transformIdentity,
34,
49,
transformUppercaseAll,
8,
49,
transformUppercaseFirst,
12,
0,
transformIdentity,
21,
49,
transformIdentity,
40,
0,
transformUppercaseFirst,
12,
49,
transformIdentity,
41,
49,
transformIdentity,
42,
49,
transformUppercaseAll,
17,
49,
transformIdentity,
43,
0,
transformUppercaseFirst,
5,
49,
transformUppercaseAll,
10,
0,
transformIdentity,
34,
49,
transformUppercaseFirst,
33,
49,
transformIdentity,
44,
49,
transformUppercaseAll,
5,
45,
transformIdentity,
49,
0,
transformIdentity,
33,
49,
transformUppercaseFirst,
30,
49,
transformUppercaseAll,
30,
49,
transformIdentity,
46,
49,
transformUppercaseAll,
1,
49,
transformUppercaseFirst,
34,
0,
transformUppercaseFirst,
33,
0,
transformUppercaseAll,
30,
0,
transformUppercaseAll,
1,
49,
transformUppercaseAll,
33,
49,
transformUppercaseAll,
21,
49,
transformUppercaseAll,
12,
0,
transformUppercaseAll,
5,
49,
transformUppercaseAll,
34,
0,
transformUppercaseAll,
12,
0,
transformUppercaseFirst,
30,
0,
transformUppercaseAll,
34,
0,
transformUppercaseFirst,
34,
}
var kBrotliTransforms = transforms{
217,
[]byte(kPrefixSuffix),
kPrefixSuffixMap[:],
121,
kTransformsData,
nil, /* no extra parameters */
[transformsMaxCutOff + 1]int16{0, 12, 27, 23, 42, 63, 56, 48, 59, 64},
}
func getTransforms() *transforms {
return &kBrotliTransforms
}
func toUpperCase(p []byte) int {
if p[0] < 0xC0 {
if p[0] >= 'a' && p[0] <= 'z' {
p[0] ^= 32
}
return 1
}
/* An overly simplified uppercasing model for UTF-8. */
if p[0] < 0xE0 {
p[1] ^= 32
return 2
}
/* An arbitrary transform for three byte characters. */
p[2] ^= 5
return 3
}
func shiftTransform(word []byte, word_len int, parameter uint16) int {
/* Limited sign extension: scalar < (1 << 24). */
var scalar uint32 = (uint32(parameter) & 0x7FFF) + (0x1000000 - (uint32(parameter) & 0x8000))
if word[0] < 0x80 {
/* 1-byte rune / 0sssssss / 7 bit scalar (ASCII). */
scalar += uint32(word[0])
word[0] = byte(scalar & 0x7F)
return 1
} else if word[0] < 0xC0 {
/* Continuation / 10AAAAAA. */
return 1
} else if word[0] < 0xE0 {
/* 2-byte rune / 110sssss AAssssss / 11 bit scalar. */
if word_len < 2 {
return 1
}
scalar += uint32(word[1]&0x3F | (word[0]&0x1F)<<6)
word[0] = byte(0xC0 | (scalar>>6)&0x1F)
word[1] = byte(uint32(word[1]&0xC0) | scalar&0x3F)
return 2
} else if word[0] < 0xF0 {
/* 3-byte rune / 1110ssss AAssssss BBssssss / 16 bit scalar. */
if word_len < 3 {
return word_len
}
scalar += uint32(word[2])&0x3F | uint32(word[1]&0x3F)<<6 | uint32(word[0]&0x0F)<<12
word[0] = byte(0xE0 | (scalar>>12)&0x0F)
word[1] = byte(uint32(word[1]&0xC0) | (scalar>>6)&0x3F)
word[2] = byte(uint32(word[2]&0xC0) | scalar&0x3F)
return 3
} else if word[0] < 0xF8 {
/* 4-byte rune / 11110sss AAssssss BBssssss CCssssss / 21 bit scalar. */
if word_len < 4 {
return word_len
}
scalar += uint32(word[3])&0x3F | uint32(word[2]&0x3F)<<6 | uint32(word[1]&0x3F)<<12 | uint32(word[0]&0x07)<<18
word[0] = byte(0xF0 | (scalar>>18)&0x07)
word[1] = byte(uint32(word[1]&0xC0) | (scalar>>12)&0x3F)
word[2] = byte(uint32(word[2]&0xC0) | (scalar>>6)&0x3F)
word[3] = byte(uint32(word[3]&0xC0) | scalar&0x3F)
return 4
}
return 1
}
func transformDictionaryWord(dst []byte, word []byte, len int, trans *transforms, transform_idx int) int {
var idx int = 0
var prefix []byte = transformPrefix(trans, transform_idx)
var type_ byte = transformType(trans, transform_idx)
var suffix []byte = transformSuffix(trans, transform_idx)
{
var prefix_len int = int(prefix[0])
prefix = prefix[1:]
for {
tmp1 := prefix_len
prefix_len--
if tmp1 == 0 {
break
}
dst[idx] = prefix[0]
idx++
prefix = prefix[1:]
}
}
{
var t int = int(type_)
var i int = 0
if t <= transformOmitLast9 {
len -= t
} else if t >= transformOmitFirst1 && t <= transformOmitFirst9 {
var skip int = t - (transformOmitFirst1 - 1)
word = word[skip:]
len -= skip
}
for i < len {
dst[idx] = word[i]
idx++
i++
}
if t == transformUppercaseFirst {
toUpperCase(dst[idx-len:])
} else if t == transformUppercaseAll {
var uppercase []byte = dst
uppercase = uppercase[idx-len:]
for len > 0 {
var step int = toUpperCase(uppercase)
uppercase = uppercase[step:]
len -= step
}
} else if t == transformShiftFirst {
var param uint16 = uint16(trans.params[transform_idx*2]) + uint16(trans.params[transform_idx*2+1])<<8
shiftTransform(dst[idx-len:], int(len), param)
} else if t == transformShiftAll {
var param uint16 = uint16(trans.params[transform_idx*2]) + uint16(trans.params[transform_idx*2+1])<<8
var shift []byte = dst
shift = shift[idx-len:]
for len > 0 {
var step int = shiftTransform(shift, int(len), param)
shift = shift[step:]
len -= step
}
}
}
{
var suffix_len int = int(suffix[0])
suffix = suffix[1:]
for {
tmp2 := suffix_len
suffix_len--
if tmp2 == 0 {
break
}
dst[idx] = suffix[0]
idx++
suffix = suffix[1:]
}
return idx
}
}

70
vendor/github.com/andybalholm/brotli/utf8_util.go generated vendored Normal file
View File

@ -0,0 +1,70 @@
package brotli
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Heuristics for deciding about the UTF8-ness of strings. */
const kMinUTF8Ratio float64 = 0.75
/* Returns 1 if at least min_fraction of the bytes between pos and
pos + length in the (data, mask) ring-buffer is UTF8-encoded, otherwise
returns 0. */
func parseAsUTF8(symbol *int, input []byte, size uint) uint {
/* ASCII */
if input[0]&0x80 == 0 {
*symbol = int(input[0])
if *symbol > 0 {
return 1
}
}
/* 2-byte UTF8 */
if size > 1 && input[0]&0xE0 == 0xC0 && input[1]&0xC0 == 0x80 {
*symbol = (int(input[0])&0x1F)<<6 | int(input[1])&0x3F
if *symbol > 0x7F {
return 2
}
}
/* 3-byte UFT8 */
if size > 2 && input[0]&0xF0 == 0xE0 && input[1]&0xC0 == 0x80 && input[2]&0xC0 == 0x80 {
*symbol = (int(input[0])&0x0F)<<12 | (int(input[1])&0x3F)<<6 | int(input[2])&0x3F
if *symbol > 0x7FF {
return 3
}
}
/* 4-byte UFT8 */
if size > 3 && input[0]&0xF8 == 0xF0 && input[1]&0xC0 == 0x80 && input[2]&0xC0 == 0x80 && input[3]&0xC0 == 0x80 {
*symbol = (int(input[0])&0x07)<<18 | (int(input[1])&0x3F)<<12 | (int(input[2])&0x3F)<<6 | int(input[3])&0x3F
if *symbol > 0xFFFF && *symbol <= 0x10FFFF {
return 4
}
}
/* Not UTF8, emit a special symbol above the UTF8-code space */
*symbol = 0x110000 | int(input[0])
return 1
}
/* Returns 1 if at least min_fraction of the data is UTF8-encoded.*/
func isMostlyUTF8(data []byte, pos uint, mask uint, length uint, min_fraction float64) bool {
var size_utf8 uint = 0
var i uint = 0
for i < length {
var symbol int
current_data := data[(pos+i)&mask:]
var bytes_read uint = parseAsUTF8(&symbol, current_data, length-i)
i += bytes_read
if symbol < 0x110000 {
size_utf8 += bytes_read
}
}
return float64(size_utf8) > min_fraction*float64(length)
}

7
vendor/github.com/andybalholm/brotli/util.go generated vendored Normal file
View File

@ -0,0 +1,7 @@
package brotli
func assert(cond bool) {
if !cond {
panic("assertion failure")
}
}

52
vendor/github.com/andybalholm/brotli/write_bits.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
package brotli
import "encoding/binary"
/* Copyright 2010 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Write bits into a byte array. */
/* This function writes bits into bytes in increasing addresses, and within
a byte least-significant-bit first.
The function can write up to 56 bits in one go with WriteBits
Example: let's assume that 3 bits (Rs below) have been written already:
BYTE-0 BYTE+1 BYTE+2
0000 0RRR 0000 0000 0000 0000
Now, we could write 5 or less bits in MSB by just sifting by 3
and OR'ing to BYTE-0.
For n bits, we take the last 5 bits, OR that with high bits in BYTE-0,
and locate the rest in BYTE+1, BYTE+2, etc. */
func writeBits(n_bits uint, bits uint64, pos *uint, array []byte) {
/* This branch of the code can write up to 56 bits at a time,
7 bits are lost by being perhaps already in *p and at least
1 bit is needed to initialize the bit-stream ahead (i.e. if 7
bits are in *p and we write 57 bits, then the next write will
access a byte that was never initialized). */
p := array[*pos>>3:]
v := uint64(p[0])
v |= bits << (*pos & 7)
binary.LittleEndian.PutUint64(p, v)
*pos += n_bits
}
func writeSingleBit(bit bool, pos *uint, array []byte) {
if bit {
writeBits(1, 1, pos, array)
} else {
writeBits(1, 0, pos, array)
}
}
func writeBitsPrepareStorage(pos uint, array []byte) {
assert(pos&7 == 0)
array[pos>>3] = 0
}

119
vendor/github.com/andybalholm/brotli/writer.go generated vendored Normal file
View File

@ -0,0 +1,119 @@
package brotli
import (
"errors"
"io"
)
const (
BestSpeed = 0
BestCompression = 11
DefaultCompression = 6
)
// WriterOptions configures Writer.
type WriterOptions struct {
// Quality controls the compression-speed vs compression-density trade-offs.
// The higher the quality, the slower the compression. Range is 0 to 11.
Quality int
// LGWin is the base 2 logarithm of the sliding window size.
// Range is 10 to 24. 0 indicates automatic configuration based on Quality.
LGWin int
}
var (
errEncode = errors.New("brotli: encode error")
errWriterClosed = errors.New("brotli: Writer is closed")
)
// Writes to the returned writer are compressed and written to dst.
// It is the caller's responsibility to call Close on the Writer when done.
// Writes may be buffered and not flushed until Close.
func NewWriter(dst io.Writer) *Writer {
return NewWriterLevel(dst, DefaultCompression)
}
// NewWriterLevel is like NewWriter but specifies the compression level instead
// of assuming DefaultCompression.
// The compression level can be DefaultCompression or any integer value between
// BestSpeed and BestCompression inclusive.
func NewWriterLevel(dst io.Writer, level int) *Writer {
return NewWriterOptions(dst, WriterOptions{
Quality: level,
})
}
// NewWriterOptions is like NewWriter but specifies WriterOptions
func NewWriterOptions(dst io.Writer, options WriterOptions) *Writer {
w := new(Writer)
w.options = options
w.Reset(dst)
return w
}
// Reset discards the Writer's state and makes it equivalent to the result of
// its original state from NewWriter or NewWriterLevel, but writing to dst
// instead. This permits reusing a Writer rather than allocating a new one.
func (w *Writer) Reset(dst io.Writer) {
encoderInitState(w)
w.params.quality = w.options.Quality
if w.options.LGWin > 0 {
w.params.lgwin = uint(w.options.LGWin)
}
w.dst = dst
w.err = nil
}
func (w *Writer) writeChunk(p []byte, op int) (n int, err error) {
if w.dst == nil {
return 0, errWriterClosed
}
if w.err != nil {
return 0, w.err
}
for {
availableIn := uint(len(p))
nextIn := p
success := encoderCompressStream(w, op, &availableIn, &nextIn)
bytesConsumed := len(p) - int(availableIn)
p = p[bytesConsumed:]
n += bytesConsumed
if !success {
return n, errEncode
}
if len(p) == 0 || w.err != nil {
return n, w.err
}
}
}
// Flush outputs encoded data for all input provided to Write. The resulting
// output can be decoded to match all input before Flush, but the stream is
// not yet complete until after Close.
// Flush has a negative impact on compression.
func (w *Writer) Flush() error {
_, err := w.writeChunk(nil, operationFlush)
return err
}
// Close flushes remaining data to the decorated writer.
func (w *Writer) Close() error {
// If stream is already closed, it is reported by `writeChunk`.
_, err := w.writeChunk(nil, operationFinish)
w.dst = nil
return err
}
// Write implements io.Writer. Flush or Close must be called to ensure that the
// encoded bytes are actually flushed to the underlying Writer.
func (w *Writer) Write(p []byte) (n int, err error) {
return w.writeChunk(p, operationProcess)
}
type nopCloser struct {
io.Writer
}
func (nopCloser) Close() error { return nil }

View File

@ -1,16 +0,0 @@
// Package common contains the common dictionary used by the enc and dec packages
package common // import "github.com/itchio/go-brotli/common"
/*
#cgo CFLAGS: -I${SRCDIR}/../include
#include "dictionary.h"
*/
import "C"
import "unsafe"
// GetDictionary retrieves a pointer to the dictionary data structure
func GetDictionary() unsafe.Pointer {
return unsafe.Pointer(C.BrotliGetDictionary())
}

View File

@ -1,57 +0,0 @@
/* Copyright 2016 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
#ifndef BROTLI_COMMON_CONSTANTS_H_
#define BROTLI_COMMON_CONSTANTS_H_
/* Specification: 7.3. Encoding of the context map */
#define BROTLI_CONTEXT_MAP_MAX_RLE 16
/* Specification: 2. Compressed representation overview */
#define BROTLI_MAX_NUMBER_OF_BLOCK_TYPES 256
/* Specification: 3.3. Alphabet sizes: insert-and-copy length */
#define BROTLI_NUM_LITERAL_SYMBOLS 256
#define BROTLI_NUM_COMMAND_SYMBOLS 704
#define BROTLI_NUM_BLOCK_LEN_SYMBOLS 26
#define BROTLI_MAX_CONTEXT_MAP_SYMBOLS (BROTLI_MAX_NUMBER_OF_BLOCK_TYPES + \
BROTLI_CONTEXT_MAP_MAX_RLE)
#define BROTLI_MAX_BLOCK_TYPE_SYMBOLS (BROTLI_MAX_NUMBER_OF_BLOCK_TYPES + 2)
/* Specification: 3.5. Complex prefix codes */
#define BROTLI_REPEAT_PREVIOUS_CODE_LENGTH 16
#define BROTLI_REPEAT_ZERO_CODE_LENGTH 17
#define BROTLI_CODE_LENGTH_CODES (BROTLI_REPEAT_ZERO_CODE_LENGTH + 1)
/* "code length of 8 is repeated" */
#define BROTLI_INITIAL_REPEATED_CODE_LENGTH 8
/* Specification: 4. Encoding of distances */
#define BROTLI_NUM_DISTANCE_SHORT_CODES 16
#define BROTLI_MAX_NPOSTFIX 3
#define BROTLI_MAX_NDIRECT 120
#define BROTLI_MAX_DISTANCE_BITS 24U
/* BROTLI_NUM_DISTANCE_SYMBOLS == 520 */
#define BROTLI_NUM_DISTANCE_SYMBOLS (BROTLI_NUM_DISTANCE_SHORT_CODES + \
BROTLI_MAX_NDIRECT + \
(BROTLI_MAX_DISTANCE_BITS << \
(BROTLI_MAX_NPOSTFIX + 1)))
/* Distance that is guaranteed to be representable in any stream. */
#define BROTLI_MAX_DISTANCE 0x3FFFFFC
/* 7.1. Context modes and context ID lookup for literals */
/* "context IDs for literals are in the range of 0..63" */
#define BROTLI_LITERAL_CONTEXT_BITS 6
/* 7.2. Context ID for distances */
#define BROTLI_DISTANCE_CONTEXT_BITS 2
/* 9.1. Format of the Stream Header */
/* Number of slack bytes for window size. Don't confuse
with BROTLI_NUM_DISTANCE_SHORT_CODES. */
#define BROTLI_WINDOW_GAP 16
#define BROTLI_MAX_BACKWARD_LIMIT(W) (((size_t)1 << (W)) - BROTLI_WINDOW_GAP)
#endif /* BROTLI_COMMON_CONSTANTS_H_ */

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

View File

@ -1,64 +0,0 @@
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Collection of static dictionary words. */
#ifndef BROTLI_COMMON_DICTIONARY_H_
#define BROTLI_COMMON_DICTIONARY_H_
#include <brotli/port.h>
#include <brotli/types.h>
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
typedef struct BrotliDictionary {
/**
* Number of bits to encode index of dictionary word in a bucket.
*
* Specification: Appendix A. Static Dictionary Data
*
* Words in a dictionary are bucketed by length.
* @c 0 means that there are no words of a given length.
* Dictionary consists of words with length of [4..24] bytes.
* Values at [0..3] and [25..31] indices should not be addressed.
*/
uint8_t size_bits_by_length[32];
/* assert(offset[i + 1] == offset[i] + (bits[i] ? (i << bits[i]) : 0)) */
uint32_t offsets_by_length[32];
/* assert(data_size == offsets_by_length[31]) */
size_t data_size;
/* Data array is not bound, and should obey to size_bits_by_length values.
Specified size matches default (RFC 7932) dictionary. Its size is
defined by data_size */
const uint8_t* data;
} BrotliDictionary;
BROTLI_COMMON_API const BrotliDictionary* BrotliGetDictionary(void);
/**
* Sets dictionary data.
*
* When dictionary data is already set / present, this method is no-op.
*
* Dictionary data MUST be provided before BrotliGetDictionary is invoked.
* This method is used ONLY in multi-client environment (e.g. C + Java),
* to reduce storage by sharing single dictionary between implementations.
*/
BROTLI_COMMON_API void BrotliSetDictionaryData(const uint8_t* data);
#define BROTLI_MIN_DICTIONARY_WORD_LENGTH 4
#define BROTLI_MAX_DICTIONARY_WORD_LENGTH 24
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
#endif /* BROTLI_COMMON_DICTIONARY_H_ */

View File

@ -1,418 +0,0 @@
/* Copyright 2016 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Macros for compiler / platform specific features and build options. */
#ifndef BROTLI_COMMON_PLATFORM_H_
#define BROTLI_COMMON_PLATFORM_H_
#include <string.h> /* memcpy */
#include <brotli/port.h>
#include <brotli/types.h>
#if defined OS_LINUX || defined OS_CYGWIN
#include <endian.h>
#elif defined OS_FREEBSD
#include <machine/endian.h>
#elif defined OS_MACOSX
#include <machine/endian.h>
/* Let's try and follow the Linux convention */
#define BROTLI_X_BYTE_ORDER BYTE_ORDER
#define BROTLI_X_LITTLE_ENDIAN LITTLE_ENDIAN
#define BROTLI_X_BIG_ENDIAN BIG_ENDIAN
#endif
#if defined(BROTLI_ENABLE_LOG) || defined(BROTLI_DEBUG)
#include <assert.h>
#include <stdio.h>
#endif
/* Macros for compiler / platform specific features and build options.
Build options are:
* BROTLI_BUILD_32_BIT disables 64-bit optimizations
* BROTLI_BUILD_64_BIT forces to use 64-bit optimizations
* BROTLI_BUILD_BIG_ENDIAN forces to use big-endian optimizations
* BROTLI_BUILD_ENDIAN_NEUTRAL disables endian-aware optimizations
* BROTLI_BUILD_LITTLE_ENDIAN forces to use little-endian optimizations
* BROTLI_BUILD_PORTABLE disables dangerous optimizations, like unaligned
read and overlapping memcpy; this reduces decompression speed by 5%
* BROTLI_BUILD_NO_RBIT disables "rbit" optimization for ARM CPUs
* BROTLI_DEBUG dumps file name and line number when decoder detects stream
or memory error
* BROTLI_ENABLE_LOG enables asserts and dumps various state information
*/
#if BROTLI_MODERN_COMPILER || __has_attribute(always_inline)
#define BROTLI_ATTRIBUTE_ALWAYS_INLINE __attribute__ ((always_inline))
#else
#define BROTLI_ATTRIBUTE_ALWAYS_INLINE
#endif
#if defined(_WIN32) || defined(__CYGWIN__)
#define BROTLI_ATTRIBUTE_VISIBILITY_HIDDEN
#elif BROTLI_MODERN_COMPILER || __has_attribute(visibility)
#define BROTLI_ATTRIBUTE_VISIBILITY_HIDDEN \
__attribute__ ((visibility ("hidden")))
#else
#define BROTLI_ATTRIBUTE_VISIBILITY_HIDDEN
#endif
#ifndef BROTLI_INTERNAL
#define BROTLI_INTERNAL BROTLI_ATTRIBUTE_VISIBILITY_HIDDEN
#endif
#ifndef _MSC_VER
#if defined(__cplusplus) || !defined(__STRICT_ANSI__) || \
(defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L)
#define BROTLI_INLINE inline BROTLI_ATTRIBUTE_ALWAYS_INLINE
#else
#define BROTLI_INLINE
#endif
#else /* _MSC_VER */
#define BROTLI_INLINE __forceinline
#endif /* _MSC_VER */
#if BROTLI_MODERN_COMPILER || __has_attribute(unused)
#define BROTLI_UNUSED_FUNCTION static BROTLI_INLINE __attribute__ ((unused))
#else
#define BROTLI_UNUSED_FUNCTION static BROTLI_INLINE
#endif
#if !defined(__cplusplus) && !defined(c_plusplus) && \
(defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L)
#define BROTLI_RESTRICT restrict
#elif BROTLI_GCC_VERSION > 295 || defined(__llvm__)
#define BROTLI_RESTRICT __restrict
#else
#define BROTLI_RESTRICT
#endif
#if BROTLI_MODERN_COMPILER || __has_attribute(noinline)
#define BROTLI_NOINLINE __attribute__((noinline))
#else
#define BROTLI_NOINLINE
#endif
#if defined(__arm__) || defined(__thumb__) || \
defined(_M_ARM) || defined(_M_ARMT) || defined(__ARM64_ARCH_8__)
#define BROTLI_TARGET_ARM
#if (defined(__ARM_ARCH) && (__ARM_ARCH == 7)) || \
(defined(M_ARM) && (M_ARM == 7))
#define BROTLI_TARGET_ARMV7
#endif /* ARMv7 */
#if defined(__aarch64__) || defined(__ARM64_ARCH_8__)
#define BROTLI_TARGET_ARMV8
#endif /* ARMv8 */
#endif /* ARM */
#if defined(__i386) || defined(_M_IX86)
#define BROTLI_TARGET_X86
#endif
#if defined(__x86_64__) || defined(_M_X64)
#define BROTLI_TARGET_X64
#endif
#if defined(__PPC64__)
#define BROTLI_TARGET_POWERPC64
#endif
#if defined(BROTLI_BUILD_64_BIT)
#define BROTLI_64_BITS 1
#elif defined(BROTLI_BUILD_32_BIT)
#define BROTLI_64_BITS 0
#elif defined(BROTLI_TARGET_X64) || defined(BROTLI_TARGET_ARMV8) || \
defined(BROTLI_TARGET_POWERPC64)
#define BROTLI_64_BITS 1
#else
#define BROTLI_64_BITS 0
#endif
#if (BROTLI_64_BITS)
#define brotli_reg_t uint64_t
#else
#define brotli_reg_t uint32_t
#endif
#if defined(BROTLI_BUILD_BIG_ENDIAN)
#define BROTLI_BIG_ENDIAN 1
#elif defined(BROTLI_BUILD_LITTLE_ENDIAN)
#define BROTLI_LITTLE_ENDIAN 1
#elif defined(BROTLI_BUILD_ENDIAN_NEUTRAL)
/* Just break elif chain. */
#elif defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
#define BROTLI_LITTLE_ENDIAN 1
#elif defined(_WIN32) || defined(BROTLI_TARGET_X64)
/* Win32 & x64 can currently always be assumed to be little endian */
#define BROTLI_LITTLE_ENDIAN 1
#elif defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
#define BROTLI_BIG_ENDIAN 1
#elif defined(BROTLI_X_BYTE_ORDER)
#if BROTLI_X_BYTE_ORDER == BROTLI_X_LITTLE_ENDIAN
#define BROTLI_LITTLE_ENDIAN 1
#elif BROTLI_X_BYTE_ORDER == BROTLI_X_BIG_ENDIAN
#define BROTLI_BIG_ENDIAN 1
#endif
#endif /* BROTLI_X_BYTE_ORDER */
#if !defined(BROTLI_LITTLE_ENDIAN)
#define BROTLI_LITTLE_ENDIAN 0
#endif
#if !defined(BROTLI_BIG_ENDIAN)
#define BROTLI_BIG_ENDIAN 0
#endif
#ifdef BROTLI_X_BYTE_ORDER
#undef BROTLI_X_BYTE_ORDER
#undef BROTLI_X_LITTLE_ENDIAN
#undef BROTLI_X_BIG_ENDIAN
#endif
#ifdef BROTLI_BUILD_PORTABLE
#define BROTLI_ALIGNED_READ (!!1)
#elif defined(BROTLI_TARGET_X86) || defined(BROTLI_TARGET_X64) || \
defined(BROTLI_TARGET_ARMV7) || defined(BROTLI_TARGET_ARMV8)
/* Allow unaligned read only for white-listed CPUs. */
#define BROTLI_ALIGNED_READ (!!0)
#else
#define BROTLI_ALIGNED_READ (!!1)
#endif
#if BROTLI_ALIGNED_READ
/* Portable unaligned memory access: read / write values via memcpy. */
static BROTLI_INLINE uint16_t BrotliUnalignedRead16(const void* p) {
uint16_t t;
memcpy(&t, p, sizeof t);
return t;
}
static BROTLI_INLINE uint32_t BrotliUnalignedRead32(const void* p) {
uint32_t t;
memcpy(&t, p, sizeof t);
return t;
}
static BROTLI_INLINE uint64_t BrotliUnalignedRead64(const void* p) {
uint64_t t;
memcpy(&t, p, sizeof t);
return t;
}
static BROTLI_INLINE void BrotliUnalignedWrite64(void* p, uint64_t v) {
memcpy(p, &v, sizeof v);
}
#else /* BROTLI_ALIGNED_READ */
/* Unaligned memory access is allowed: just cast pointer to requested type. */
static BROTLI_INLINE uint16_t BrotliUnalignedRead16(const void* p) {
return *(const uint16_t*)p;
}
static BROTLI_INLINE uint32_t BrotliUnalignedRead32(const void* p) {
return *(const uint32_t*)p;
}
static BROTLI_INLINE uint64_t BrotliUnalignedRead64(const void* p) {
return *(const uint64_t*)p;
}
static BROTLI_INLINE void BrotliUnalignedWrite64(void* p, uint64_t v) {
*(uint64_t*)p = v;
}
#endif /* BROTLI_ALIGNED_READ */
#if BROTLI_LITTLE_ENDIAN
/* Straight endianness. Just read / write values. */
#define BROTLI_UNALIGNED_LOAD16LE BrotliUnalignedRead16
#define BROTLI_UNALIGNED_LOAD32LE BrotliUnalignedRead32
#define BROTLI_UNALIGNED_LOAD64LE BrotliUnalignedRead64
#define BROTLI_UNALIGNED_STORE64LE BrotliUnalignedWrite64
#elif BROTLI_BIG_ENDIAN /* BROTLI_LITTLE_ENDIAN */
/* Explain compiler to byte-swap values. */
#define BROTLI_BSWAP16_(V) ((uint16_t)( \
(((V) & 0xFFU) << 8) | \
(((V) >> 8) & 0xFFU)))
static BROTLI_INLINE uint16_t BROTLI_UNALIGNED_LOAD16LE(const void* p) {
uint16_t value = BrotliUnalignedRead16(p);
return BROTLI_BSWAP16_(value);
}
#define BROTLI_BSWAP32_(V) ( \
(((V) & 0xFFU) << 24) | (((V) & 0xFF00U) << 8) | \
(((V) >> 8) & 0xFF00U) | (((V) >> 24) & 0xFFU))
static BROTLI_INLINE uint32_t BROTLI_UNALIGNED_LOAD32LE(const void* p) {
uint32_t value = BrotliUnalignedRead32(p);
return BROTLI_BSWAP32_(value);
}
#define BROTLI_BSWAP64_(V) ( \
(((V) & 0xFFU) << 56) | (((V) & 0xFF00U) << 40) | \
(((V) & 0xFF0000U) << 24) | (((V) & 0xFF000000U) << 8) | \
(((V) >> 8) & 0xFF000000U) | (((V) >> 24) & 0xFF0000U) | \
(((V) >> 40) & 0xFF00U) | (((V) >> 56) & 0xFFU))
static BROTLI_INLINE uint64_t BROTLI_UNALIGNED_LOAD64LE(const void* p) {
uint64_t value = BrotliUnalignedRead64(p);
return BROTLI_BSWAP64_(value);
}
static BROTLI_INLINE void BROTLI_UNALIGNED_STORE64LE(void* p, uint64_t v) {
uint64_t value = BROTLI_BSWAP64_(v);
BrotliUnalignedWrite64(p, value);
}
#else /* BROTLI_LITTLE_ENDIAN */
/* Read / store values byte-wise; hopefully compiler will understand. */
static BROTLI_INLINE uint16_t BROTLI_UNALIGNED_LOAD16LE(const void* p) {
const uint8_t* in = (const uint8_t*)p;
return (uint16_t)(in[0] | (in[1] << 8));
}
static BROTLI_INLINE uint32_t BROTLI_UNALIGNED_LOAD32LE(const void* p) {
const uint8_t* in = (const uint8_t*)p;
uint32_t value = (uint32_t)(in[0]);
value |= (uint32_t)(in[1]) << 8;
value |= (uint32_t)(in[2]) << 16;
value |= (uint32_t)(in[3]) << 24;
return value;
}
static BROTLI_INLINE uint64_t BROTLI_UNALIGNED_LOAD64LE(const void* p) {
const uint8_t* in = (const uint8_t*)p;
uint64_t value = (uint64_t)(in[0]);
value |= (uint64_t)(in[1]) << 8;
value |= (uint64_t)(in[2]) << 16;
value |= (uint64_t)(in[3]) << 24;
value |= (uint64_t)(in[4]) << 32;
value |= (uint64_t)(in[5]) << 40;
value |= (uint64_t)(in[6]) << 48;
value |= (uint64_t)(in[7]) << 56;
return value;
}
static BROTLI_INLINE void BROTLI_UNALIGNED_STORE64LE(void* p, uint64_t v) {
uint8_t* out = (uint8_t*)p;
out[0] = (uint8_t)v;
out[1] = (uint8_t)(v >> 8);
out[2] = (uint8_t)(v >> 16);
out[3] = (uint8_t)(v >> 24);
out[4] = (uint8_t)(v >> 32);
out[5] = (uint8_t)(v >> 40);
out[6] = (uint8_t)(v >> 48);
out[7] = (uint8_t)(v >> 56);
}
#endif /* BROTLI_LITTLE_ENDIAN */
/* Define "BROTLI_PREDICT_TRUE" and "BROTLI_PREDICT_FALSE" macros for capable
compilers.
To apply compiler hint, enclose the branching condition into macros, like this:
if (BROTLI_PREDICT_TRUE(zero == 0)) {
// main execution path
} else {
// compiler should place this code outside of main execution path
}
OR:
if (BROTLI_PREDICT_FALSE(something_rare_or_unexpected_happens)) {
// compiler should place this code outside of main execution path
}
*/
#if BROTLI_MODERN_COMPILER || __has_builtin(__builtin_expect)
#define BROTLI_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
#define BROTLI_PREDICT_FALSE(x) (__builtin_expect(x, 0))
#else
#define BROTLI_PREDICT_FALSE(x) (x)
#define BROTLI_PREDICT_TRUE(x) (x)
#endif
/* BROTLI_IS_CONSTANT macros returns true for compile-time constants. */
#if BROTLI_MODERN_COMPILER || __has_builtin(__builtin_constant_p)
#define BROTLI_IS_CONSTANT(x) (!!__builtin_constant_p(x))
#else
#define BROTLI_IS_CONSTANT(x) (!!0)
#endif
#if defined(BROTLI_TARGET_ARM)
#define BROTLI_HAS_UBFX (!!1)
#else
#define BROTLI_HAS_UBFX (!!0)
#endif
#ifdef BROTLI_ENABLE_LOG
#define BROTLI_DCHECK(x) assert(x)
#define BROTLI_LOG(x) printf x
#else
#define BROTLI_DCHECK(x)
#define BROTLI_LOG(x)
#endif
#if defined(BROTLI_DEBUG) || defined(BROTLI_ENABLE_LOG)
static BROTLI_INLINE void BrotliDump(const char* f, int l, const char* fn) {
fprintf(stderr, "%s:%d (%s)\n", f, l, fn);
fflush(stderr);
}
#define BROTLI_DUMP() BrotliDump(__FILE__, __LINE__, __FUNCTION__)
#else
#define BROTLI_DUMP() (void)(0)
#endif
#if (BROTLI_MODERN_COMPILER || defined(__llvm__)) && \
!defined(BROTLI_BUILD_NO_RBIT)
#if defined(BROTLI_TARGET_ARMV7) || defined(BROTLI_TARGET_ARMV8)
/* TODO: detect ARMv6T2 and enable this code for it. */
static BROTLI_INLINE brotli_reg_t BrotliRBit(brotli_reg_t input) {
brotli_reg_t output;
__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input));
return output;
}
#define BROTLI_RBIT(x) BrotliRBit(x)
#endif /* armv7 */
#endif /* gcc || clang */
#if !defined(BROTLI_RBIT)
static BROTLI_INLINE void BrotliRBit(void) { /* Should break build if used. */ }
#endif /* BROTLI_RBIT */
#define BROTLI_REPEAT(N, X) { \
if ((N & 1) != 0) {X;} \
if ((N & 2) != 0) {X; X;} \
if ((N & 4) != 0) {X; X; X; X;} \
}
#define BROTLI_UNUSED(X) (void)(X)
#define BROTLI_MIN_MAX(T) \
static BROTLI_INLINE T brotli_min_ ## T (T a, T b) { return a < b ? a : b; } \
static BROTLI_INLINE T brotli_max_ ## T (T a, T b) { return a > b ? a : b; }
BROTLI_MIN_MAX(double) BROTLI_MIN_MAX(float) BROTLI_MIN_MAX(int)
BROTLI_MIN_MAX(size_t) BROTLI_MIN_MAX(uint32_t) BROTLI_MIN_MAX(uint8_t)
#undef BROTLI_MIN_MAX
#define BROTLI_MIN(T, A, B) (brotli_min_ ## T((A), (B)))
#define BROTLI_MAX(T, A, B) (brotli_max_ ## T((A), (B)))
#define BROTLI_SWAP(T, A, I, J) { \
T __brotli_swap_tmp = (A)[(I)]; \
(A)[(I)] = (A)[(J)]; \
(A)[(J)] = __brotli_swap_tmp; \
}
BROTLI_UNUSED_FUNCTION void BrotliSuppressUnusedFunctions(void) {
BROTLI_UNUSED(BrotliSuppressUnusedFunctions);
BROTLI_UNUSED(BrotliUnalignedRead16);
BROTLI_UNUSED(BrotliUnalignedRead32);
BROTLI_UNUSED(BrotliUnalignedRead64);
BROTLI_UNUSED(BrotliUnalignedWrite64);
BROTLI_UNUSED(BROTLI_UNALIGNED_LOAD16LE);
BROTLI_UNUSED(BROTLI_UNALIGNED_LOAD32LE);
BROTLI_UNUSED(BROTLI_UNALIGNED_LOAD64LE);
BROTLI_UNUSED(BROTLI_UNALIGNED_STORE64LE);
BROTLI_UNUSED(BrotliRBit);
BROTLI_UNUSED(brotli_min_double);
BROTLI_UNUSED(brotli_max_double);
BROTLI_UNUSED(brotli_min_float);
BROTLI_UNUSED(brotli_max_float);
BROTLI_UNUSED(brotli_min_int);
BROTLI_UNUSED(brotli_max_int);
BROTLI_UNUSED(brotli_min_size_t);
BROTLI_UNUSED(brotli_max_size_t);
BROTLI_UNUSED(brotli_min_uint32_t);
BROTLI_UNUSED(brotli_max_uint32_t);
BROTLI_UNUSED(brotli_min_uint8_t);
BROTLI_UNUSED(brotli_max_uint8_t);
}
#endif /* BROTLI_COMMON_PLATFORM_H_ */

View File

@ -1,26 +0,0 @@
/* Copyright 2016 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Version definition. */
#ifndef BROTLI_COMMON_VERSION_H_
#define BROTLI_COMMON_VERSION_H_
/* This macro should only be used when library is compiled together with client.
If library is dynamically linked, use BrotliDecoderVersion and
BrotliEncoderVersion methods. */
/* Semantic version, calculated as (MAJOR << 24) | (MINOR << 12) | PATCH */
#define BROTLI_VERSION 0x1000002
/* This macro is used by build system to produce Libtool-friendly soname. See
https://www.gnu.org/software/libtool/manual/html_node/Libtool-versioning.html
*/
/* ABI version, calculated as (CURRENT << 24) | (REVISION << 12) | AGE */
#define BROTLI_ABI_VERSION 0x1002000
#endif /* BROTLI_COMMON_VERSION_H_ */

View File

@ -1,14 +0,0 @@
#brotli/enc
include ../shared.mk
OBJS_NODICT = backward_references.o block_splitter.o brotli_bit_stream.o compress_fragment.o compress_fragment_two_pass.o encode.o encode_parallel.o entropy_encode.o histogram.o literal_cost.o metablock.o static_dict.o streams.o utf8_util.o
OBJS = $(OBJS_NODICT) dictionary.o
nodict : $(OBJS_NODICT)
all : $(OBJS)
clean :
rm -f $(OBJS) $(SO)

View File

@ -1,134 +0,0 @@
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Function to find backward reference copies. */
#include "./backward_references.h"
#include "../common/constants.h"
#include "../common/dictionary.h"
#include "../common/platform.h"
#include <brotli/types.h>
#include "./command.h"
#include "./dictionary_hash.h"
#include "./memory.h"
#include "./quality.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static BROTLI_INLINE size_t ComputeDistanceCode(size_t distance,
size_t max_distance,
const int* dist_cache) {
if (distance <= max_distance) {
size_t distance_plus_3 = distance + 3;
size_t offset0 = distance_plus_3 - (size_t)dist_cache[0];
size_t offset1 = distance_plus_3 - (size_t)dist_cache[1];
if (distance == (size_t)dist_cache[0]) {
return 0;
} else if (distance == (size_t)dist_cache[1]) {
return 1;
} else if (offset0 < 7) {
return (0x9750468 >> (4 * offset0)) & 0xF;
} else if (offset1 < 7) {
return (0xFDB1ACE >> (4 * offset1)) & 0xF;
} else if (distance == (size_t)dist_cache[2]) {
return 2;
} else if (distance == (size_t)dist_cache[3]) {
return 3;
}
}
return distance + BROTLI_NUM_DISTANCE_SHORT_CODES - 1;
}
#define EXPAND_CAT(a, b) CAT(a, b)
#define CAT(a, b) a ## b
#define FN(X) EXPAND_CAT(X, HASHER())
#define EXPORT_FN(X) EXPAND_CAT(X, EXPAND_CAT(PREFIX(), HASHER()))
#define PREFIX() N
#define HASHER() H2
/* NOLINTNEXTLINE(build/include) */
#include "./backward_references_inc.h"
#undef HASHER
#define HASHER() H3
/* NOLINTNEXTLINE(build/include) */
#include "./backward_references_inc.h"
#undef HASHER
#define HASHER() H4
/* NOLINTNEXTLINE(build/include) */
#include "./backward_references_inc.h"
#undef HASHER
#define HASHER() H5
/* NOLINTNEXTLINE(build/include) */
#include "./backward_references_inc.h"
#undef HASHER
#define HASHER() H6
/* NOLINTNEXTLINE(build/include) */
#include "./backward_references_inc.h"
#undef HASHER
#define HASHER() H40
/* NOLINTNEXTLINE(build/include) */
#include "./backward_references_inc.h"
#undef HASHER
#define HASHER() H41
/* NOLINTNEXTLINE(build/include) */
#include "./backward_references_inc.h"
#undef HASHER
#define HASHER() H42
/* NOLINTNEXTLINE(build/include) */
#include "./backward_references_inc.h"
#undef HASHER
#define HASHER() H54
/* NOLINTNEXTLINE(build/include) */
#include "./backward_references_inc.h"
#undef HASHER
#undef PREFIX
#undef EXPORT_FN
#undef FN
#undef CAT
#undef EXPAND_CAT
void BrotliCreateBackwardReferences(const BrotliDictionary* dictionary,
size_t num_bytes,
size_t position,
const uint8_t* ringbuffer,
size_t ringbuffer_mask,
const BrotliEncoderParams* params,
HasherHandle hasher,
int* dist_cache,
size_t* last_insert_len,
Command* commands,
size_t* num_commands,
size_t* num_literals) {
switch (params->hasher.type) {
#define CASE_(N) \
case N: \
CreateBackwardReferencesNH ## N(dictionary, \
kStaticDictionaryHash, num_bytes, position, ringbuffer, \
ringbuffer_mask, params, hasher, dist_cache, \
last_insert_len, commands, num_commands, num_literals); \
return;
FOR_GENERIC_HASHERS(CASE_)
#undef CASE_
default:
break;
}
}
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif

View File

@ -1,39 +0,0 @@
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Function to find backward reference copies. */
#ifndef BROTLI_ENC_BACKWARD_REFERENCES_H_
#define BROTLI_ENC_BACKWARD_REFERENCES_H_
#include "../common/constants.h"
#include "../common/dictionary.h"
#include "../common/platform.h"
#include <brotli/types.h>
#include "./command.h"
#include "./hash.h"
#include "./quality.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
/* "commands" points to the next output command to write to, "*num_commands" is
initially the total amount of commands output by previous
CreateBackwardReferences calls, and must be incremented by the amount written
by this call. */
BROTLI_INTERNAL void BrotliCreateBackwardReferences(
const BrotliDictionary* dictionary,
size_t num_bytes, size_t position, const uint8_t* ringbuffer,
size_t ringbuffer_mask, const BrotliEncoderParams* params,
HasherHandle hasher, int* dist_cache, size_t* last_insert_len,
Command* commands, size_t* num_commands, size_t* num_literals);
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
#endif /* BROTLI_ENC_BACKWARD_REFERENCES_H_ */

View File

@ -1,800 +0,0 @@
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Function to find backward reference copies. */
#include "./backward_references_hq.h"
#include <string.h> /* memcpy, memset */
#include "../common/constants.h"
#include "../common/platform.h"
#include <brotli/types.h>
#include "./command.h"
#include "./fast_log.h"
#include "./find_match_length.h"
#include "./literal_cost.h"
#include "./memory.h"
#include "./prefix.h"
#include "./quality.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static const float kInfinity = 1.7e38f; /* ~= 2 ^ 127 */
static const uint32_t kDistanceCacheIndex[] = {
0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
};
static const int kDistanceCacheOffset[] = {
0, 0, 0, 0, -1, 1, -2, 2, -3, 3, -1, 1, -2, 2, -3, 3
};
void BrotliInitZopfliNodes(ZopfliNode* array, size_t length) {
ZopfliNode stub;
size_t i;
stub.length = 1;
stub.distance = 0;
stub.insert_length = 0;
stub.u.cost = kInfinity;
for (i = 0; i < length; ++i) array[i] = stub;
}
static BROTLI_INLINE uint32_t ZopfliNodeCopyLength(const ZopfliNode* self) {
return self->length & 0xffffff;
}
static BROTLI_INLINE uint32_t ZopfliNodeLengthCode(const ZopfliNode* self) {
const uint32_t modifier = self->length >> 24;
return ZopfliNodeCopyLength(self) + 9u - modifier;
}
static BROTLI_INLINE uint32_t ZopfliNodeCopyDistance(const ZopfliNode* self) {
return self->distance & 0x7ffffff;
}
static BROTLI_INLINE uint32_t ZopfliNodeDistanceCode(const ZopfliNode* self) {
const uint32_t short_code = self->distance >> 27;
return short_code == 0 ?
ZopfliNodeCopyDistance(self) + BROTLI_NUM_DISTANCE_SHORT_CODES - 1 :
short_code - 1;
}
static BROTLI_INLINE uint32_t ZopfliNodeCommandLength(const ZopfliNode* self) {
return ZopfliNodeCopyLength(self) + self->insert_length;
}
/* Histogram based cost model for zopflification. */
typedef struct ZopfliCostModel {
/* The insert and copy length symbols. */
float cost_cmd_[BROTLI_NUM_COMMAND_SYMBOLS];
float cost_dist_[BROTLI_NUM_DISTANCE_SYMBOLS];
/* Cumulative costs of literals per position in the stream. */
float* literal_costs_;
float min_cost_cmd_;
size_t num_bytes_;
} ZopfliCostModel;
static void InitZopfliCostModel(
MemoryManager* m, ZopfliCostModel* self, size_t num_bytes) {
self->num_bytes_ = num_bytes;
self->literal_costs_ = BROTLI_ALLOC(m, float, num_bytes + 2);
if (BROTLI_IS_OOM(m)) return;
}
static void CleanupZopfliCostModel(MemoryManager* m, ZopfliCostModel* self) {
BROTLI_FREE(m, self->literal_costs_);
}
static void SetCost(const uint32_t* histogram, size_t histogram_size,
float* cost) {
size_t sum = 0;
float log2sum;
size_t i;
for (i = 0; i < histogram_size; i++) {
sum += histogram[i];
}
log2sum = (float)FastLog2(sum);
for (i = 0; i < histogram_size; i++) {
if (histogram[i] == 0) {
cost[i] = log2sum + 2;
continue;
}
/* Shannon bits for this symbol. */
cost[i] = log2sum - (float)FastLog2(histogram[i]);
/* Cannot be coded with less than 1 bit */
if (cost[i] < 1) cost[i] = 1;
}
}
static void ZopfliCostModelSetFromCommands(ZopfliCostModel* self,
size_t position,
const uint8_t* ringbuffer,
size_t ringbuffer_mask,
const Command* commands,
size_t num_commands,
size_t last_insert_len) {
uint32_t histogram_literal[BROTLI_NUM_LITERAL_SYMBOLS];
uint32_t histogram_cmd[BROTLI_NUM_COMMAND_SYMBOLS];
uint32_t histogram_dist[BROTLI_NUM_DISTANCE_SYMBOLS];
float cost_literal[BROTLI_NUM_LITERAL_SYMBOLS];
size_t pos = position - last_insert_len;
float min_cost_cmd = kInfinity;
size_t i;
float* cost_cmd = self->cost_cmd_;
memset(histogram_literal, 0, sizeof(histogram_literal));
memset(histogram_cmd, 0, sizeof(histogram_cmd));
memset(histogram_dist, 0, sizeof(histogram_dist));
for (i = 0; i < num_commands; i++) {
size_t inslength = commands[i].insert_len_;
size_t copylength = CommandCopyLen(&commands[i]);
size_t distcode = commands[i].dist_prefix_;
size_t cmdcode = commands[i].cmd_prefix_;
size_t j;
histogram_cmd[cmdcode]++;
if (cmdcode >= 128) histogram_dist[distcode]++;
for (j = 0; j < inslength; j++) {
histogram_literal[ringbuffer[(pos + j) & ringbuffer_mask]]++;
}
pos += inslength + copylength;
}
SetCost(histogram_literal, BROTLI_NUM_LITERAL_SYMBOLS, cost_literal);
SetCost(histogram_cmd, BROTLI_NUM_COMMAND_SYMBOLS, cost_cmd);
SetCost(histogram_dist, BROTLI_NUM_DISTANCE_SYMBOLS, self->cost_dist_);
for (i = 0; i < BROTLI_NUM_COMMAND_SYMBOLS; ++i) {
min_cost_cmd = BROTLI_MIN(float, min_cost_cmd, cost_cmd[i]);
}
self->min_cost_cmd_ = min_cost_cmd;
{
float* literal_costs = self->literal_costs_;
size_t num_bytes = self->num_bytes_;
literal_costs[0] = 0.0;
for (i = 0; i < num_bytes; ++i) {
literal_costs[i + 1] = literal_costs[i] +
cost_literal[ringbuffer[(position + i) & ringbuffer_mask]];
}
}
}
static void ZopfliCostModelSetFromLiteralCosts(ZopfliCostModel* self,
size_t position,
const uint8_t* ringbuffer,
size_t ringbuffer_mask) {
float* literal_costs = self->literal_costs_;
float* cost_dist = self->cost_dist_;
float* cost_cmd = self->cost_cmd_;
size_t num_bytes = self->num_bytes_;
size_t i;
BrotliEstimateBitCostsForLiterals(position, num_bytes, ringbuffer_mask,
ringbuffer, &literal_costs[1]);
literal_costs[0] = 0.0;
for (i = 0; i < num_bytes; ++i) {
literal_costs[i + 1] += literal_costs[i];
}
for (i = 0; i < BROTLI_NUM_COMMAND_SYMBOLS; ++i) {
cost_cmd[i] = (float)FastLog2(11 + (uint32_t)i);
}
for (i = 0; i < BROTLI_NUM_DISTANCE_SYMBOLS; ++i) {
cost_dist[i] = (float)FastLog2(20 + (uint32_t)i);
}
self->min_cost_cmd_ = (float)FastLog2(11);
}
static BROTLI_INLINE float ZopfliCostModelGetCommandCost(
const ZopfliCostModel* self, uint16_t cmdcode) {
return self->cost_cmd_[cmdcode];
}
static BROTLI_INLINE float ZopfliCostModelGetDistanceCost(
const ZopfliCostModel* self, size_t distcode) {
return self->cost_dist_[distcode];
}
static BROTLI_INLINE float ZopfliCostModelGetLiteralCosts(
const ZopfliCostModel* self, size_t from, size_t to) {
return self->literal_costs_[to] - self->literal_costs_[from];
}
static BROTLI_INLINE float ZopfliCostModelGetMinCostCmd(
const ZopfliCostModel* self) {
return self->min_cost_cmd_;
}
/* REQUIRES: len >= 2, start_pos <= pos */
/* REQUIRES: cost < kInfinity, nodes[start_pos].cost < kInfinity */
/* Maintains the "ZopfliNode array invariant". */
static BROTLI_INLINE void UpdateZopfliNode(ZopfliNode* nodes, size_t pos,
size_t start_pos, size_t len, size_t len_code, size_t dist,
size_t short_code, float cost) {
ZopfliNode* next = &nodes[pos + len];
next->length = (uint32_t)(len | ((len + 9u - len_code) << 24));
next->distance = (uint32_t)(dist | (short_code << 27));
next->insert_length = (uint32_t)(pos - start_pos);
next->u.cost = cost;
}
typedef struct PosData {
size_t pos;
int distance_cache[4];
float costdiff;
float cost;
} PosData;
/* Maintains the smallest 8 cost difference together with their positions */
typedef struct StartPosQueue {
PosData q_[8];
size_t idx_;
} StartPosQueue;
static BROTLI_INLINE void InitStartPosQueue(StartPosQueue* self) {
self->idx_ = 0;
}
static size_t StartPosQueueSize(const StartPosQueue* self) {
return BROTLI_MIN(size_t, self->idx_, 8);
}
static void StartPosQueuePush(StartPosQueue* self, const PosData* posdata) {
size_t offset = ~(self->idx_++) & 7;
size_t len = StartPosQueueSize(self);
size_t i;
PosData* q = self->q_;
q[offset] = *posdata;
/* Restore the sorted order. In the list of |len| items at most |len - 1|
adjacent element comparisons / swaps are required. */
for (i = 1; i < len; ++i) {
if (q[offset & 7].costdiff > q[(offset + 1) & 7].costdiff) {
BROTLI_SWAP(PosData, q, offset & 7, (offset + 1) & 7);
}
++offset;
}
}
static const PosData* StartPosQueueAt(const StartPosQueue* self, size_t k) {
return &self->q_[(k - self->idx_) & 7];
}
/* Returns the minimum possible copy length that can improve the cost of any */
/* future position. */
static size_t ComputeMinimumCopyLength(const float start_cost,
const ZopfliNode* nodes,
const size_t num_bytes,
const size_t pos) {
/* Compute the minimum possible cost of reaching any future position. */
float min_cost = start_cost;
size_t len = 2;
size_t next_len_bucket = 4;
size_t next_len_offset = 10;
while (pos + len <= num_bytes && nodes[pos + len].u.cost <= min_cost) {
/* We already reached (pos + len) with no more cost than the minimum
possible cost of reaching anything from this pos, so there is no point in
looking for lengths <= len. */
++len;
if (len == next_len_offset) {
/* We reached the next copy length code bucket, so we add one more
extra bit to the minimum cost. */
min_cost += 1.0f;
next_len_offset += next_len_bucket;
next_len_bucket *= 2;
}
}
return len;
}
/* REQUIRES: nodes[pos].cost < kInfinity
REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */
static uint32_t ComputeDistanceShortcut(const size_t block_start,
const size_t pos,
const size_t max_backward,
const size_t gap,
const ZopfliNode* nodes) {
const size_t clen = ZopfliNodeCopyLength(&nodes[pos]);
const size_t ilen = nodes[pos].insert_length;
const size_t dist = ZopfliNodeCopyDistance(&nodes[pos]);
/* Since |block_start + pos| is the end position of the command, the copy part
starts from |block_start + pos - clen|. Distances that are greater than
this or greater than |max_backward| are static dictionary references, and
do not update the last distances. Also distance code 0 (last distance)
does not update the last distances. */
if (pos == 0) {
return 0;
} else if (dist + clen <= block_start + pos + gap &&
dist <= max_backward + gap &&
ZopfliNodeDistanceCode(&nodes[pos]) > 0) {
return (uint32_t)pos;
} else {
return nodes[pos - clen - ilen].u.shortcut;
}
}
/* Fills in dist_cache[0..3] with the last four distances (as defined by
Section 4. of the Spec) that would be used at (block_start + pos) if we
used the shortest path of commands from block_start, computed from
nodes[0..pos]. The last four distances at block_start are in
starting_dist_cache[0..3].
REQUIRES: nodes[pos].cost < kInfinity
REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */
static void ComputeDistanceCache(const size_t pos,
const int* starting_dist_cache,
const ZopfliNode* nodes,
int* dist_cache) {
int idx = 0;
size_t p = nodes[pos].u.shortcut;
while (idx < 4 && p > 0) {
const size_t ilen = nodes[p].insert_length;
const size_t clen = ZopfliNodeCopyLength(&nodes[p]);
const size_t dist = ZopfliNodeCopyDistance(&nodes[p]);
dist_cache[idx++] = (int)dist;
/* Because of prerequisite, p >= clen + ilen >= 2. */
p = nodes[p - clen - ilen].u.shortcut;
}
for (; idx < 4; ++idx) {
dist_cache[idx] = *starting_dist_cache++;
}
}
/* Maintains "ZopfliNode array invariant" and pushes node to the queue, if it
is eligible. */
static void EvaluateNode(
const size_t block_start, const size_t pos, const size_t max_backward_limit,
const size_t gap, const int* starting_dist_cache,
const ZopfliCostModel* model, StartPosQueue* queue, ZopfliNode* nodes) {
/* Save cost, because ComputeDistanceCache invalidates it. */
float node_cost = nodes[pos].u.cost;
nodes[pos].u.shortcut = ComputeDistanceShortcut(
block_start, pos, max_backward_limit, gap, nodes);
if (node_cost <= ZopfliCostModelGetLiteralCosts(model, 0, pos)) {
PosData posdata;
posdata.pos = pos;
posdata.cost = node_cost;
posdata.costdiff = node_cost -
ZopfliCostModelGetLiteralCosts(model, 0, pos);
ComputeDistanceCache(
pos, starting_dist_cache, nodes, posdata.distance_cache);
StartPosQueuePush(queue, &posdata);
}
}
/* Returns longest copy length. */
static size_t UpdateNodes(
const size_t num_bytes, const size_t block_start, const size_t pos,
const uint8_t* ringbuffer, const size_t ringbuffer_mask,
const BrotliEncoderParams* params, const size_t max_backward_limit,
const int* starting_dist_cache, const size_t num_matches,
const BackwardMatch* matches, const ZopfliCostModel* model,
StartPosQueue* queue, ZopfliNode* nodes) {
const size_t cur_ix = block_start + pos;
const size_t cur_ix_masked = cur_ix & ringbuffer_mask;
const size_t max_distance = BROTLI_MIN(size_t, cur_ix, max_backward_limit);
const size_t max_len = num_bytes - pos;
const size_t max_zopfli_len = MaxZopfliLen(params);
const size_t max_iters = MaxZopfliCandidates(params);
size_t min_len;
size_t result = 0;
size_t k;
size_t gap = 0;
EvaluateNode(block_start, pos, max_backward_limit, gap, starting_dist_cache,
model, queue, nodes);
{
const PosData* posdata = StartPosQueueAt(queue, 0);
float min_cost = (posdata->cost + ZopfliCostModelGetMinCostCmd(model) +
ZopfliCostModelGetLiteralCosts(model, posdata->pos, pos));
min_len = ComputeMinimumCopyLength(min_cost, nodes, num_bytes, pos);
}
/* Go over the command starting positions in order of increasing cost
difference. */
for (k = 0; k < max_iters && k < StartPosQueueSize(queue); ++k) {
const PosData* posdata = StartPosQueueAt(queue, k);
const size_t start = posdata->pos;
const uint16_t inscode = GetInsertLengthCode(pos - start);
const float start_costdiff = posdata->costdiff;
const float base_cost = start_costdiff + (float)GetInsertExtra(inscode) +
ZopfliCostModelGetLiteralCosts(model, 0, pos);
/* Look for last distance matches using the distance cache from this
starting position. */
size_t best_len = min_len - 1;
size_t j = 0;
for (; j < BROTLI_NUM_DISTANCE_SHORT_CODES && best_len < max_len; ++j) {
const size_t idx = kDistanceCacheIndex[j];
const size_t backward =
(size_t)(posdata->distance_cache[idx] + kDistanceCacheOffset[j]);
size_t prev_ix = cur_ix - backward;
size_t len = 0;
uint8_t continuation = ringbuffer[cur_ix_masked + best_len];
if (cur_ix_masked + best_len > ringbuffer_mask) {
break;
}
if (BROTLI_PREDICT_FALSE(backward > max_distance + gap)) {
continue;
}
if (backward <= max_distance) {
if (prev_ix >= cur_ix) {
continue;
}
prev_ix &= ringbuffer_mask;
if (prev_ix + best_len > ringbuffer_mask ||
continuation != ringbuffer[prev_ix + best_len]) {
continue;
}
len = FindMatchLengthWithLimit(&ringbuffer[prev_ix],
&ringbuffer[cur_ix_masked],
max_len);
} else {
continue;
}
{
const float dist_cost = base_cost +
ZopfliCostModelGetDistanceCost(model, j);
size_t l;
for (l = best_len + 1; l <= len; ++l) {
const uint16_t copycode = GetCopyLengthCode(l);
const uint16_t cmdcode =
CombineLengthCodes(inscode, copycode, j == 0);
const float cost = (cmdcode < 128 ? base_cost : dist_cost) +
(float)GetCopyExtra(copycode) +
ZopfliCostModelGetCommandCost(model, cmdcode);
if (cost < nodes[pos + l].u.cost) {
UpdateZopfliNode(nodes, pos, start, l, l, backward, j + 1, cost);
result = BROTLI_MAX(size_t, result, l);
}
best_len = l;
}
}
}
/* At higher iterations look only for new last distance matches, since
looking only for new command start positions with the same distances
does not help much. */
if (k >= 2) continue;
{
/* Loop through all possible copy lengths at this position. */
size_t len = min_len;
for (j = 0; j < num_matches; ++j) {
BackwardMatch match = matches[j];
size_t dist = match.distance;
BROTLI_BOOL is_dictionary_match =
TO_BROTLI_BOOL(dist > max_distance + gap);
/* We already tried all possible last distance matches, so we can use
normal distance code here. */
size_t dist_code = dist + BROTLI_NUM_DISTANCE_SHORT_CODES - 1;
uint16_t dist_symbol;
uint32_t distextra;
uint32_t distnumextra;
float dist_cost;
size_t max_match_len;
PrefixEncodeCopyDistance(dist_code, 0, 0, &dist_symbol, &distextra);
distnumextra = distextra >> 24;
dist_cost = base_cost + (float)distnumextra +
ZopfliCostModelGetDistanceCost(model, dist_symbol);
/* Try all copy lengths up until the maximum copy length corresponding
to this distance. If the distance refers to the static dictionary, or
the maximum length is long enough, try only one maximum length. */
max_match_len = BackwardMatchLength(&match);
if (len < max_match_len &&
(is_dictionary_match || max_match_len > max_zopfli_len)) {
len = max_match_len;
}
for (; len <= max_match_len; ++len) {
const size_t len_code =
is_dictionary_match ? BackwardMatchLengthCode(&match) : len;
const uint16_t copycode = GetCopyLengthCode(len_code);
const uint16_t cmdcode = CombineLengthCodes(inscode, copycode, 0);
const float cost = dist_cost + (float)GetCopyExtra(copycode) +
ZopfliCostModelGetCommandCost(model, cmdcode);
if (cost < nodes[pos + len].u.cost) {
UpdateZopfliNode(nodes, pos, start, len, len_code, dist, 0, cost);
result = BROTLI_MAX(size_t, result, len);
}
}
}
}
}
return result;
}
static size_t ComputeShortestPathFromNodes(size_t num_bytes,
ZopfliNode* nodes) {
size_t index = num_bytes;
size_t num_commands = 0;
while (nodes[index].insert_length == 0 && nodes[index].length == 1) --index;
nodes[index].u.next = BROTLI_UINT32_MAX;
while (index != 0) {
size_t len = ZopfliNodeCommandLength(&nodes[index]);
index -= len;
nodes[index].u.next = (uint32_t)len;
num_commands++;
}
return num_commands;
}
/* REQUIRES: nodes != NULL and len(nodes) >= num_bytes + 1 */
void BrotliZopfliCreateCommands(const size_t num_bytes,
const size_t block_start,
const size_t max_backward_limit,
const ZopfliNode* nodes,
int* dist_cache,
size_t* last_insert_len,
const BrotliEncoderParams* params,
Command* commands,
size_t* num_literals) {
size_t pos = 0;
uint32_t offset = nodes[0].u.next;
size_t i;
size_t gap = 0;
BROTLI_UNUSED(params);
for (i = 0; offset != BROTLI_UINT32_MAX; i++) {
const ZopfliNode* next = &nodes[pos + offset];
size_t copy_length = ZopfliNodeCopyLength(next);
size_t insert_length = next->insert_length;
pos += insert_length;
offset = next->u.next;
if (i == 0) {
insert_length += *last_insert_len;
*last_insert_len = 0;
}
{
size_t distance = ZopfliNodeCopyDistance(next);
size_t len_code = ZopfliNodeLengthCode(next);
size_t max_distance =
BROTLI_MIN(size_t, block_start + pos, max_backward_limit);
BROTLI_BOOL is_dictionary = TO_BROTLI_BOOL(distance > max_distance + gap);
size_t dist_code = ZopfliNodeDistanceCode(next);
InitCommand(&commands[i], insert_length,
copy_length, (int)len_code - (int)copy_length, dist_code);
if (!is_dictionary && dist_code > 0) {
dist_cache[3] = dist_cache[2];
dist_cache[2] = dist_cache[1];
dist_cache[1] = dist_cache[0];
dist_cache[0] = (int)distance;
}
}
*num_literals += insert_length;
pos += copy_length;
}
*last_insert_len += num_bytes - pos;
}
static size_t ZopfliIterate(size_t num_bytes,
size_t position,
const uint8_t* ringbuffer,
size_t ringbuffer_mask,
const BrotliEncoderParams* params,
const size_t max_backward_limit,
const size_t gap,
const int* dist_cache,
const ZopfliCostModel* model,
const uint32_t* num_matches,
const BackwardMatch* matches,
ZopfliNode* nodes) {
const size_t max_zopfli_len = MaxZopfliLen(params);
StartPosQueue queue;
size_t cur_match_pos = 0;
size_t i;
nodes[0].length = 0;
nodes[0].u.cost = 0;
InitStartPosQueue(&queue);
for (i = 0; i + 3 < num_bytes; i++) {
size_t skip = UpdateNodes(num_bytes, position, i, ringbuffer,
ringbuffer_mask, params, max_backward_limit, dist_cache,
num_matches[i], &matches[cur_match_pos], model, &queue, nodes);
if (skip < BROTLI_LONG_COPY_QUICK_STEP) skip = 0;
cur_match_pos += num_matches[i];
if (num_matches[i] == 1 &&
BackwardMatchLength(&matches[cur_match_pos - 1]) > max_zopfli_len) {
skip = BROTLI_MAX(size_t,
BackwardMatchLength(&matches[cur_match_pos - 1]), skip);
}
if (skip > 1) {
skip--;
while (skip) {
i++;
if (i + 3 >= num_bytes) break;
EvaluateNode(position, i, max_backward_limit, gap, dist_cache, model,
&queue, nodes);
cur_match_pos += num_matches[i];
skip--;
}
}
}
return ComputeShortestPathFromNodes(num_bytes, nodes);
}
/* REQUIRES: nodes != NULL and len(nodes) >= num_bytes + 1 */
size_t BrotliZopfliComputeShortestPath(MemoryManager* m,
const BrotliDictionary* dictionary,
size_t num_bytes, size_t position, const uint8_t* ringbuffer,
size_t ringbuffer_mask, const BrotliEncoderParams* params,
const size_t max_backward_limit, const int* dist_cache, HasherHandle hasher,
ZopfliNode* nodes) {
const size_t max_zopfli_len = MaxZopfliLen(params);
ZopfliCostModel model;
StartPosQueue queue;
BackwardMatch matches[2 * (MAX_NUM_MATCHES_H10 + 64)];
const size_t store_end = num_bytes >= StoreLookaheadH10() ?
position + num_bytes - StoreLookaheadH10() + 1 : position;
size_t i;
size_t gap = 0;
size_t lz_matches_offset = 0;
nodes[0].length = 0;
nodes[0].u.cost = 0;
InitZopfliCostModel(m, &model, num_bytes);
if (BROTLI_IS_OOM(m)) return 0;
ZopfliCostModelSetFromLiteralCosts(
&model, position, ringbuffer, ringbuffer_mask);
InitStartPosQueue(&queue);
for (i = 0; i + HashTypeLengthH10() - 1 < num_bytes; i++) {
const size_t pos = position + i;
const size_t max_distance = BROTLI_MIN(size_t, pos, max_backward_limit);
size_t skip;
size_t num_matches = FindAllMatchesH10(hasher, dictionary, ringbuffer,
ringbuffer_mask, pos, num_bytes - i, max_distance, gap, params,
&matches[lz_matches_offset]);
if (num_matches > 0 &&
BackwardMatchLength(&matches[num_matches - 1]) > max_zopfli_len) {
matches[0] = matches[num_matches - 1];
num_matches = 1;
}
skip = UpdateNodes(num_bytes, position, i, ringbuffer, ringbuffer_mask,
params, max_backward_limit, dist_cache, num_matches, matches, &model,
&queue, nodes);
if (skip < BROTLI_LONG_COPY_QUICK_STEP) skip = 0;
if (num_matches == 1 && BackwardMatchLength(&matches[0]) > max_zopfli_len) {
skip = BROTLI_MAX(size_t, BackwardMatchLength(&matches[0]), skip);
}
if (skip > 1) {
/* Add the tail of the copy to the hasher. */
StoreRangeH10(hasher, ringbuffer, ringbuffer_mask, pos + 1, BROTLI_MIN(
size_t, pos + skip, store_end));
skip--;
while (skip) {
i++;
if (i + HashTypeLengthH10() - 1 >= num_bytes) break;
EvaluateNode(position, i, max_backward_limit, gap, dist_cache, &model,
&queue, nodes);
skip--;
}
}
}
CleanupZopfliCostModel(m, &model);
return ComputeShortestPathFromNodes(num_bytes, nodes);
}
void BrotliCreateZopfliBackwardReferences(MemoryManager* m,
const BrotliDictionary* dictionary,
size_t num_bytes, size_t position, const uint8_t* ringbuffer,
size_t ringbuffer_mask, const BrotliEncoderParams* params,
HasherHandle hasher, int* dist_cache, size_t* last_insert_len,
Command* commands, size_t* num_commands, size_t* num_literals) {
const size_t max_backward_limit = BROTLI_MAX_BACKWARD_LIMIT(params->lgwin);
ZopfliNode* nodes;
nodes = BROTLI_ALLOC(m, ZopfliNode, num_bytes + 1);
if (BROTLI_IS_OOM(m)) return;
BrotliInitZopfliNodes(nodes, num_bytes + 1);
*num_commands += BrotliZopfliComputeShortestPath(m, dictionary,
num_bytes, position, ringbuffer, ringbuffer_mask,
params, max_backward_limit, dist_cache, hasher, nodes);
if (BROTLI_IS_OOM(m)) return;
BrotliZopfliCreateCommands(num_bytes, position, max_backward_limit, nodes,
dist_cache, last_insert_len, params, commands, num_literals);
BROTLI_FREE(m, nodes);
}
void BrotliCreateHqZopfliBackwardReferences(MemoryManager* m,
const BrotliDictionary* dictionary,
size_t num_bytes, size_t position, const uint8_t* ringbuffer,
size_t ringbuffer_mask, const BrotliEncoderParams* params,
HasherHandle hasher, int* dist_cache, size_t* last_insert_len,
Command* commands, size_t* num_commands, size_t* num_literals) {
const size_t max_backward_limit = BROTLI_MAX_BACKWARD_LIMIT(params->lgwin);
uint32_t* num_matches = BROTLI_ALLOC(m, uint32_t, num_bytes);
size_t matches_size = 4 * num_bytes;
const size_t store_end = num_bytes >= StoreLookaheadH10() ?
position + num_bytes - StoreLookaheadH10() + 1 : position;
size_t cur_match_pos = 0;
size_t i;
size_t orig_num_literals;
size_t orig_last_insert_len;
int orig_dist_cache[4];
size_t orig_num_commands;
ZopfliCostModel model;
ZopfliNode* nodes;
BackwardMatch* matches = BROTLI_ALLOC(m, BackwardMatch, matches_size);
size_t gap = 0;
size_t shadow_matches = 0;
if (BROTLI_IS_OOM(m)) return;
for (i = 0; i + HashTypeLengthH10() - 1 < num_bytes; ++i) {
const size_t pos = position + i;
size_t max_distance = BROTLI_MIN(size_t, pos, max_backward_limit);
size_t max_length = num_bytes - i;
size_t num_found_matches;
size_t cur_match_end;
size_t j;
/* Ensure that we have enough free slots. */
BROTLI_ENSURE_CAPACITY(m, BackwardMatch, matches, matches_size,
cur_match_pos + MAX_NUM_MATCHES_H10 + shadow_matches);
if (BROTLI_IS_OOM(m)) return;
num_found_matches = FindAllMatchesH10(hasher, dictionary,
ringbuffer, ringbuffer_mask, pos, max_length,
max_distance, gap, params, &matches[cur_match_pos + shadow_matches]);
cur_match_end = cur_match_pos + num_found_matches;
for (j = cur_match_pos; j + 1 < cur_match_end; ++j) {
BROTLI_DCHECK(BackwardMatchLength(&matches[j]) <=
BackwardMatchLength(&matches[j + 1]));
}
num_matches[i] = (uint32_t)num_found_matches;
if (num_found_matches > 0) {
const size_t match_len = BackwardMatchLength(&matches[cur_match_end - 1]);
if (match_len > MAX_ZOPFLI_LEN_QUALITY_11) {
const size_t skip = match_len - 1;
matches[cur_match_pos++] = matches[cur_match_end - 1];
num_matches[i] = 1;
/* Add the tail of the copy to the hasher. */
StoreRangeH10(hasher, ringbuffer, ringbuffer_mask, pos + 1,
BROTLI_MIN(size_t, pos + match_len, store_end));
memset(&num_matches[i + 1], 0, skip * sizeof(num_matches[0]));
i += skip;
} else {
cur_match_pos = cur_match_end;
}
}
}
orig_num_literals = *num_literals;
orig_last_insert_len = *last_insert_len;
memcpy(orig_dist_cache, dist_cache, 4 * sizeof(dist_cache[0]));
orig_num_commands = *num_commands;
nodes = BROTLI_ALLOC(m, ZopfliNode, num_bytes + 1);
if (BROTLI_IS_OOM(m)) return;
InitZopfliCostModel(m, &model, num_bytes);
if (BROTLI_IS_OOM(m)) return;
for (i = 0; i < 2; i++) {
BrotliInitZopfliNodes(nodes, num_bytes + 1);
if (i == 0) {
ZopfliCostModelSetFromLiteralCosts(
&model, position, ringbuffer, ringbuffer_mask);
} else {
ZopfliCostModelSetFromCommands(&model, position, ringbuffer,
ringbuffer_mask, commands, *num_commands - orig_num_commands,
orig_last_insert_len);
}
*num_commands = orig_num_commands;
*num_literals = orig_num_literals;
*last_insert_len = orig_last_insert_len;
memcpy(dist_cache, orig_dist_cache, 4 * sizeof(dist_cache[0]));
*num_commands += ZopfliIterate(num_bytes, position, ringbuffer,
ringbuffer_mask, params, max_backward_limit, gap, dist_cache,
&model, num_matches, matches, nodes);
BrotliZopfliCreateCommands(num_bytes, position, max_backward_limit,
nodes, dist_cache, last_insert_len, params, commands, num_literals);
}
CleanupZopfliCostModel(m, &model);
BROTLI_FREE(m, nodes);
BROTLI_FREE(m, matches);
BROTLI_FREE(m, num_matches);
}
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif

View File

@ -1,97 +0,0 @@
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Function to find backward reference copies. */
#ifndef BROTLI_ENC_BACKWARD_REFERENCES_HQ_H_
#define BROTLI_ENC_BACKWARD_REFERENCES_HQ_H_
#include "../common/constants.h"
#include "../common/dictionary.h"
#include "../common/platform.h"
#include <brotli/types.h>
#include "./command.h"
#include "./hash.h"
#include "./memory.h"
#include "./quality.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
BROTLI_INTERNAL void BrotliCreateZopfliBackwardReferences(MemoryManager* m,
const BrotliDictionary* dictionary,
size_t num_bytes, size_t position, const uint8_t* ringbuffer,
size_t ringbuffer_mask, const BrotliEncoderParams* params,
HasherHandle hasher, int* dist_cache, size_t* last_insert_len,
Command* commands, size_t* num_commands, size_t* num_literals);
BROTLI_INTERNAL void BrotliCreateHqZopfliBackwardReferences(MemoryManager* m,
const BrotliDictionary* dictionary,
size_t num_bytes, size_t position, const uint8_t* ringbuffer,
size_t ringbuffer_mask, const BrotliEncoderParams* params,
HasherHandle hasher, int* dist_cache, size_t* last_insert_len,
Command* commands, size_t* num_commands, size_t* num_literals);
typedef struct ZopfliNode {
/* best length to get up to this byte (not including this byte itself)
highest 8 bit is used to reconstruct the length code */
uint32_t length;
/* distance associated with the length; highest 5 bits contain distance
short code + 1 (or zero if no short code); this way only distances shorter
than 128MiB are allowed here */
uint32_t distance;
/* number of literal inserts before this copy */
uint32_t insert_length;
/* This union holds information used by dynamic-programming. During forward
pass |cost| it used to store the goal function. When node is processed its
|cost| is invalidated in favor of |shortcut|. On path back-tracing pass
|next| is assigned the offset to next node on the path. */
union {
/* Smallest cost to get to this byte from the beginning, as found so far. */
float cost;
/* Offset to the next node on the path. Equals to command_length() of the
next node on the path. For last node equals to BROTLI_UINT32_MAX */
uint32_t next;
/* Node position that provides next distance for distance cache. */
uint32_t shortcut;
} u;
} ZopfliNode;
BROTLI_INTERNAL void BrotliInitZopfliNodes(ZopfliNode* array, size_t length);
/* Computes the shortest path of commands from position to at most
position + num_bytes.
On return, path->size() is the number of commands found and path[i] is the
length of the i-th command (copy length plus insert length).
Note that the sum of the lengths of all commands can be less than num_bytes.
On return, the nodes[0..num_bytes] array will have the following
"ZopfliNode array invariant":
For each i in [1..num_bytes], if nodes[i].cost < kInfinity, then
(1) nodes[i].copy_length() >= 2
(2) nodes[i].command_length() <= i and
(3) nodes[i - nodes[i].command_length()].cost < kInfinity */
BROTLI_INTERNAL size_t BrotliZopfliComputeShortestPath(MemoryManager* m,
const BrotliDictionary* dictionary,
size_t num_bytes, size_t position, const uint8_t* ringbuffer,
size_t ringbuffer_mask, const BrotliEncoderParams* params,
const size_t max_backward_limit, const int* dist_cache, HasherHandle hasher,
ZopfliNode* nodes);
BROTLI_INTERNAL void BrotliZopfliCreateCommands(
const size_t num_bytes, const size_t block_start,
const size_t max_backward_limit, const ZopfliNode* nodes,
int* dist_cache, size_t* last_insert_len, const BrotliEncoderParams* params,
Command* commands, size_t* num_literals);
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
#endif /* BROTLI_ENC_BACKWARD_REFERENCES_HQ_H_ */

View File

@ -1,154 +0,0 @@
/* NOLINT(build/header_guard) */
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* template parameters: EXPORT_FN, FN */
static BROTLI_NOINLINE void EXPORT_FN(CreateBackwardReferences)(
const BrotliDictionary* dictionary,
const uint16_t* dictionary_hash,
size_t num_bytes, size_t position,
const uint8_t* ringbuffer, size_t ringbuffer_mask,
const BrotliEncoderParams* params, HasherHandle hasher, int* dist_cache,
size_t* last_insert_len, Command* commands, size_t* num_commands,
size_t* num_literals) {
/* Set maximum distance, see section 9.1. of the spec. */
const size_t max_backward_limit = BROTLI_MAX_BACKWARD_LIMIT(params->lgwin);
const Command* const orig_commands = commands;
size_t insert_length = *last_insert_len;
const size_t pos_end = position + num_bytes;
const size_t store_end = num_bytes >= FN(StoreLookahead)() ?
position + num_bytes - FN(StoreLookahead)() + 1 : position;
/* For speed up heuristics for random data. */
const size_t random_heuristics_window_size =
LiteralSpreeLengthForSparseSearch(params);
size_t apply_random_heuristics = position + random_heuristics_window_size;
const size_t gap = 0;
/* Minimum score to accept a backward reference. */
const score_t kMinScore = BROTLI_SCORE_BASE + 100;
FN(PrepareDistanceCache)(hasher, dist_cache);
while (position + FN(HashTypeLength)() < pos_end) {
size_t max_length = pos_end - position;
size_t max_distance = BROTLI_MIN(size_t, position, max_backward_limit);
HasherSearchResult sr;
sr.len = 0;
sr.len_code_delta = 0;
sr.distance = 0;
sr.score = kMinScore;
FN(FindLongestMatch)(hasher, dictionary, dictionary_hash, ringbuffer,
ringbuffer_mask, dist_cache, position,
max_length, max_distance, gap, &sr);
if (sr.score > kMinScore) {
/* Found a match. Let's look for something even better ahead. */
int delayed_backward_references_in_row = 0;
--max_length;
for (;; --max_length) {
const score_t cost_diff_lazy = 175;
HasherSearchResult sr2;
sr2.len = params->quality < MIN_QUALITY_FOR_EXTENSIVE_REFERENCE_SEARCH ?
BROTLI_MIN(size_t, sr.len - 1, max_length) : 0;
sr2.len_code_delta = 0;
sr2.distance = 0;
sr2.score = kMinScore;
max_distance = BROTLI_MIN(size_t, position + 1, max_backward_limit);
FN(FindLongestMatch)(hasher, dictionary, dictionary_hash,
ringbuffer, ringbuffer_mask, dist_cache, position + 1, max_length,
max_distance, gap, &sr2);
if (sr2.score >= sr.score + cost_diff_lazy) {
/* Ok, let's just write one byte for now and start a match from the
next byte. */
++position;
++insert_length;
sr = sr2;
if (++delayed_backward_references_in_row < 4 &&
position + FN(HashTypeLength)() < pos_end) {
continue;
}
}
break;
}
apply_random_heuristics =
position + 2 * sr.len + random_heuristics_window_size;
max_distance = BROTLI_MIN(size_t, position, max_backward_limit);
{
/* The first 16 codes are special short-codes,
and the minimum offset is 1. */
size_t distance_code =
ComputeDistanceCode(sr.distance, max_distance + gap, dist_cache);
if ((sr.distance <= (max_distance + gap)) && distance_code > 0) {
dist_cache[3] = dist_cache[2];
dist_cache[2] = dist_cache[1];
dist_cache[1] = dist_cache[0];
dist_cache[0] = (int)sr.distance;
FN(PrepareDistanceCache)(hasher, dist_cache);
}
InitCommand(commands++, insert_length, sr.len, sr.len_code_delta,
distance_code);
}
*num_literals += insert_length;
insert_length = 0;
/* Put the hash keys into the table, if there are enough bytes left.
Depending on the hasher implementation, it can push all positions
in the given range or only a subset of them.
Avoid hash poisoning with RLE data. */
{
size_t range_start = position + 2;
size_t range_end = BROTLI_MIN(size_t, position + sr.len, store_end);
if (sr.distance < (sr.len >> 2)) {
range_start = BROTLI_MIN(size_t, range_end, BROTLI_MAX(size_t,
range_start, position + sr.len - (sr.distance << 2)));
}
FN(StoreRange)(hasher, ringbuffer, ringbuffer_mask, range_start,
range_end);
}
position += sr.len;
} else {
++insert_length;
++position;
/* If we have not seen matches for a long time, we can skip some
match lookups. Unsuccessful match lookups are very very expensive
and this kind of a heuristic speeds up compression quite
a lot. */
if (position > apply_random_heuristics) {
/* Going through uncompressible data, jump. */
if (position >
apply_random_heuristics + 4 * random_heuristics_window_size) {
/* It is quite a long time since we saw a copy, so we assume
that this data is not compressible, and store hashes less
often. Hashes of non compressible data are less likely to
turn out to be useful in the future, too, so we store less of
them to not to flood out the hash table of good compressible
data. */
const size_t kMargin =
BROTLI_MAX(size_t, FN(StoreLookahead)() - 1, 4);
size_t pos_jump =
BROTLI_MIN(size_t, position + 16, pos_end - kMargin);
for (; position < pos_jump; position += 4) {
FN(Store)(hasher, ringbuffer, ringbuffer_mask, position);
insert_length += 4;
}
} else {
const size_t kMargin =
BROTLI_MAX(size_t, FN(StoreLookahead)() - 1, 2);
size_t pos_jump =
BROTLI_MIN(size_t, position + 8, pos_end - kMargin);
for (; position < pos_jump; position += 2) {
FN(Store)(hasher, ringbuffer, ringbuffer_mask, position);
insert_length += 2;
}
}
}
}
}
insert_length += pos_end - position;
*last_insert_len = insert_length;
*num_commands += (size_t)(commands - orig_commands);
}

View File

@ -1,35 +0,0 @@
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Functions to estimate the bit cost of Huffman trees. */
#include "./bit_cost.h"
#include "../common/constants.h"
#include "../common/platform.h"
#include <brotli/types.h>
#include "./fast_log.h"
#include "./histogram.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#define FN(X) X ## Literal
#include "./bit_cost_inc.h" /* NOLINT(build/include) */
#undef FN
#define FN(X) X ## Command
#include "./bit_cost_inc.h" /* NOLINT(build/include) */
#undef FN
#define FN(X) X ## Distance
#include "./bit_cost_inc.h" /* NOLINT(build/include) */
#undef FN
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif

View File

@ -1,63 +0,0 @@
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Functions to estimate the bit cost of Huffman trees. */
#ifndef BROTLI_ENC_BIT_COST_H_
#define BROTLI_ENC_BIT_COST_H_
#include "../common/platform.h"
#include <brotli/types.h>
#include "./fast_log.h"
#include "./histogram.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static BROTLI_INLINE double ShannonEntropy(const uint32_t *population,
size_t size, size_t *total) {
size_t sum = 0;
double retval = 0;
const uint32_t *population_end = population + size;
size_t p;
if (size & 1) {
goto odd_number_of_elements_left;
}
while (population < population_end) {
p = *population++;
sum += p;
retval -= (double)p * FastLog2(p);
odd_number_of_elements_left:
p = *population++;
sum += p;
retval -= (double)p * FastLog2(p);
}
if (sum) retval += (double)sum * FastLog2(sum);
*total = sum;
return retval;
}
static BROTLI_INLINE double BitsEntropy(
const uint32_t *population, size_t size) {
size_t sum;
double retval = ShannonEntropy(population, size, &sum);
if (retval < sum) {
/* At least one bit per literal is needed. */
retval = (double)sum;
}
return retval;
}
BROTLI_INTERNAL double BrotliPopulationCostLiteral(const HistogramLiteral*);
BROTLI_INTERNAL double BrotliPopulationCostCommand(const HistogramCommand*);
BROTLI_INTERNAL double BrotliPopulationCostDistance(const HistogramDistance*);
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
#endif /* BROTLI_ENC_BIT_COST_H_ */

View File

@ -1,127 +0,0 @@
/* NOLINT(build/header_guard) */
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* template parameters: FN */
#define HistogramType FN(Histogram)
double FN(BrotliPopulationCost)(const HistogramType* histogram) {
static const double kOneSymbolHistogramCost = 12;
static const double kTwoSymbolHistogramCost = 20;
static const double kThreeSymbolHistogramCost = 28;
static const double kFourSymbolHistogramCost = 37;
const size_t data_size = FN(HistogramDataSize)();
int count = 0;
size_t s[5];
double bits = 0.0;
size_t i;
if (histogram->total_count_ == 0) {
return kOneSymbolHistogramCost;
}
for (i = 0; i < data_size; ++i) {
if (histogram->data_[i] > 0) {
s[count] = i;
++count;
if (count > 4) break;
}
}
if (count == 1) {
return kOneSymbolHistogramCost;
}
if (count == 2) {
return (kTwoSymbolHistogramCost + (double)histogram->total_count_);
}
if (count == 3) {
const uint32_t histo0 = histogram->data_[s[0]];
const uint32_t histo1 = histogram->data_[s[1]];
const uint32_t histo2 = histogram->data_[s[2]];
const uint32_t histomax =
BROTLI_MAX(uint32_t, histo0, BROTLI_MAX(uint32_t, histo1, histo2));
return (kThreeSymbolHistogramCost +
2 * (histo0 + histo1 + histo2) - histomax);
}
if (count == 4) {
uint32_t histo[4];
uint32_t h23;
uint32_t histomax;
for (i = 0; i < 4; ++i) {
histo[i] = histogram->data_[s[i]];
}
/* Sort */
for (i = 0; i < 4; ++i) {
size_t j;
for (j = i + 1; j < 4; ++j) {
if (histo[j] > histo[i]) {
BROTLI_SWAP(uint32_t, histo, j, i);
}
}
}
h23 = histo[2] + histo[3];
histomax = BROTLI_MAX(uint32_t, h23, histo[0]);
return (kFourSymbolHistogramCost +
3 * h23 + 2 * (histo[0] + histo[1]) - histomax);
}
{
/* In this loop we compute the entropy of the histogram and simultaneously
build a simplified histogram of the code length codes where we use the
zero repeat code 17, but we don't use the non-zero repeat code 16. */
size_t max_depth = 1;
uint32_t depth_histo[BROTLI_CODE_LENGTH_CODES] = { 0 };
const double log2total = FastLog2(histogram->total_count_);
for (i = 0; i < data_size;) {
if (histogram->data_[i] > 0) {
/* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) =
= log2(total_count) - log2(count(symbol)) */
double log2p = log2total - FastLog2(histogram->data_[i]);
/* Approximate the bit depth by round(-log2(P(symbol))) */
size_t depth = (size_t)(log2p + 0.5);
bits += histogram->data_[i] * log2p;
if (depth > 15) {
depth = 15;
}
if (depth > max_depth) {
max_depth = depth;
}
++depth_histo[depth];
++i;
} else {
/* Compute the run length of zeros and add the appropriate number of 0
and 17 code length codes to the code length code histogram. */
uint32_t reps = 1;
size_t k;
for (k = i + 1; k < data_size && histogram->data_[k] == 0; ++k) {
++reps;
}
i += reps;
if (i == data_size) {
/* Don't add any cost for the last zero run, since these are encoded
only implicitly. */
break;
}
if (reps < 3) {
depth_histo[0] += reps;
} else {
reps -= 2;
while (reps > 0) {
++depth_histo[BROTLI_REPEAT_ZERO_CODE_LENGTH];
/* Add the 3 extra bits for the 17 code length code. */
bits += 3;
reps >>= 3;
}
}
}
}
/* Add the estimated encoding cost of the code length code histogram. */
bits += (double)(18 + 2 * max_depth);
/* Add the entropy of the code length code histogram. */
bits += BitsEntropy(depth_histo, BROTLI_CODE_LENGTH_CODES);
}
return bits;
}
#undef HistogramType

View File

@ -1,33 +0,0 @@
/* NOLINT(build/header_guard) */
/* Copyright 2014 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* template parameters: FN */
#define HistogramType FN(Histogram)
/* Creates entropy codes for all block types and stores them to the bit
stream. */
static void FN(BuildAndStoreEntropyCodes)(MemoryManager* m, BlockEncoder* self,
const HistogramType* histograms, const size_t histograms_size,
HuffmanTree* tree, size_t* storage_ix, uint8_t* storage) {
const size_t alphabet_size = self->alphabet_size_;
const size_t table_size = histograms_size * alphabet_size;
self->depths_ = BROTLI_ALLOC(m, uint8_t, table_size);
self->bits_ = BROTLI_ALLOC(m, uint16_t, table_size);
if (BROTLI_IS_OOM(m)) return;
{
size_t i;
for (i = 0; i < histograms_size; ++i) {
size_t ix = i * alphabet_size;
BuildAndStoreHuffmanTree(&histograms[i].data_[0], alphabet_size, tree,
&self->depths_[ix], &self->bits_[ix], storage_ix, storage);
}
}
}
#undef HistogramType

View File

@ -1,194 +0,0 @@
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Block split point selection utilities. */
#include "./block_splitter.h"
#include <string.h> /* memcpy, memset */
#include "../common/platform.h"
#include "./bit_cost.h"
#include "./cluster.h"
#include "./command.h"
#include "./fast_log.h"
#include "./histogram.h"
#include "./memory.h"
#include "./quality.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static const size_t kMaxLiteralHistograms = 100;
static const size_t kMaxCommandHistograms = 50;
static const double kLiteralBlockSwitchCost = 28.1;
static const double kCommandBlockSwitchCost = 13.5;
static const double kDistanceBlockSwitchCost = 14.6;
static const size_t kLiteralStrideLength = 70;
static const size_t kCommandStrideLength = 40;
static const size_t kSymbolsPerLiteralHistogram = 544;
static const size_t kSymbolsPerCommandHistogram = 530;
static const size_t kSymbolsPerDistanceHistogram = 544;
static const size_t kMinLengthForBlockSplitting = 128;
static const size_t kIterMulForRefining = 2;
static const size_t kMinItersForRefining = 100;
static size_t CountLiterals(const Command* cmds, const size_t num_commands) {
/* Count how many we have. */
size_t total_length = 0;
size_t i;
for (i = 0; i < num_commands; ++i) {
total_length += cmds[i].insert_len_;
}
return total_length;
}
static void CopyLiteralsToByteArray(const Command* cmds,
const size_t num_commands,
const uint8_t* data,
const size_t offset,
const size_t mask,
uint8_t* literals) {
size_t pos = 0;
size_t from_pos = offset & mask;
size_t i;
for (i = 0; i < num_commands; ++i) {
size_t insert_len = cmds[i].insert_len_;
if (from_pos + insert_len > mask) {
size_t head_size = mask + 1 - from_pos;
memcpy(literals + pos, data + from_pos, head_size);
from_pos = 0;
pos += head_size;
insert_len -= head_size;
}
if (insert_len > 0) {
memcpy(literals + pos, data + from_pos, insert_len);
pos += insert_len;
}
from_pos = (from_pos + insert_len + CommandCopyLen(&cmds[i])) & mask;
}
}
static BROTLI_INLINE uint32_t MyRand(uint32_t* seed) {
/* Initial seed should be 7. In this case, loop length is (1 << 29). */
*seed *= 16807U;
return *seed;
}
static BROTLI_INLINE double BitCost(size_t count) {
return count == 0 ? -2.0 : FastLog2(count);
}
#define HISTOGRAMS_PER_BATCH 64
#define CLUSTERS_PER_BATCH 16
#define FN(X) X ## Literal
#define DataType uint8_t
/* NOLINTNEXTLINE(build/include) */
#include "./block_splitter_inc.h"
#undef DataType
#undef FN
#define FN(X) X ## Command
#define DataType uint16_t
/* NOLINTNEXTLINE(build/include) */
#include "./block_splitter_inc.h"
#undef FN
#define FN(X) X ## Distance
/* NOLINTNEXTLINE(build/include) */
#include "./block_splitter_inc.h"
#undef DataType
#undef FN
void BrotliInitBlockSplit(BlockSplit* self) {
self->num_types = 0;
self->num_blocks = 0;
self->types = 0;
self->lengths = 0;
self->types_alloc_size = 0;
self->lengths_alloc_size = 0;
}
void BrotliDestroyBlockSplit(MemoryManager* m, BlockSplit* self) {
BROTLI_FREE(m, self->types);
BROTLI_FREE(m, self->lengths);
}
void BrotliSplitBlock(MemoryManager* m,
const Command* cmds,
const size_t num_commands,
const uint8_t* data,
const size_t pos,
const size_t mask,
const BrotliEncoderParams* params,
BlockSplit* literal_split,
BlockSplit* insert_and_copy_split,
BlockSplit* dist_split) {
{
size_t literals_count = CountLiterals(cmds, num_commands);
uint8_t* literals = BROTLI_ALLOC(m, uint8_t, literals_count);
if (BROTLI_IS_OOM(m)) return;
/* Create a continuous array of literals. */
CopyLiteralsToByteArray(cmds, num_commands, data, pos, mask, literals);
/* Create the block split on the array of literals.
Literal histograms have alphabet size 256. */
SplitByteVectorLiteral(
m, literals, literals_count,
kSymbolsPerLiteralHistogram, kMaxLiteralHistograms,
kLiteralStrideLength, kLiteralBlockSwitchCost, params,
literal_split);
if (BROTLI_IS_OOM(m)) return;
BROTLI_FREE(m, literals);
}
{
/* Compute prefix codes for commands. */
uint16_t* insert_and_copy_codes = BROTLI_ALLOC(m, uint16_t, num_commands);
size_t i;
if (BROTLI_IS_OOM(m)) return;
for (i = 0; i < num_commands; ++i) {
insert_and_copy_codes[i] = cmds[i].cmd_prefix_;
}
/* Create the block split on the array of command prefixes. */
SplitByteVectorCommand(
m, insert_and_copy_codes, num_commands,
kSymbolsPerCommandHistogram, kMaxCommandHistograms,
kCommandStrideLength, kCommandBlockSwitchCost, params,
insert_and_copy_split);
if (BROTLI_IS_OOM(m)) return;
/* TODO: reuse for distances? */
BROTLI_FREE(m, insert_and_copy_codes);
}
{
/* Create a continuous array of distance prefixes. */
uint16_t* distance_prefixes = BROTLI_ALLOC(m, uint16_t, num_commands);
size_t j = 0;
size_t i;
if (BROTLI_IS_OOM(m)) return;
for (i = 0; i < num_commands; ++i) {
const Command* cmd = &cmds[i];
if (CommandCopyLen(cmd) && cmd->cmd_prefix_ >= 128) {
distance_prefixes[j++] = cmd->dist_prefix_;
}
}
/* Create the block split on the array of distance prefixes. */
SplitByteVectorDistance(
m, distance_prefixes, j,
kSymbolsPerDistanceHistogram, kMaxCommandHistograms,
kCommandStrideLength, kDistanceBlockSwitchCost, params,
dist_split);
if (BROTLI_IS_OOM(m)) return;
BROTLI_FREE(m, distance_prefixes);
}
}
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif

View File

@ -1,51 +0,0 @@
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Block split point selection utilities. */
#ifndef BROTLI_ENC_BLOCK_SPLITTER_H_
#define BROTLI_ENC_BLOCK_SPLITTER_H_
#include "../common/platform.h"
#include <brotli/types.h>
#include "./command.h"
#include "./memory.h"
#include "./quality.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
typedef struct BlockSplit {
size_t num_types; /* Amount of distinct types */
size_t num_blocks; /* Amount of values in types and length */
uint8_t* types;
uint32_t* lengths;
size_t types_alloc_size;
size_t lengths_alloc_size;
} BlockSplit;
BROTLI_INTERNAL void BrotliInitBlockSplit(BlockSplit* self);
BROTLI_INTERNAL void BrotliDestroyBlockSplit(MemoryManager* m,
BlockSplit* self);
BROTLI_INTERNAL void BrotliSplitBlock(MemoryManager* m,
const Command* cmds,
const size_t num_commands,
const uint8_t* data,
const size_t offset,
const size_t mask,
const BrotliEncoderParams* params,
BlockSplit* literal_split,
BlockSplit* insert_and_copy_split,
BlockSplit* dist_split);
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
#endif /* BROTLI_ENC_BLOCK_SPLITTER_H_ */

View File

@ -1,431 +0,0 @@
/* NOLINT(build/header_guard) */
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* template parameters: FN, DataType */
#define HistogramType FN(Histogram)
static void FN(InitialEntropyCodes)(const DataType* data, size_t length,
size_t stride,
size_t num_histograms,
HistogramType* histograms) {
uint32_t seed = 7;
size_t block_length = length / num_histograms;
size_t i;
FN(ClearHistograms)(histograms, num_histograms);
for (i = 0; i < num_histograms; ++i) {
size_t pos = length * i / num_histograms;
if (i != 0) {
pos += MyRand(&seed) % block_length;
}
if (pos + stride >= length) {
pos = length - stride - 1;
}
FN(HistogramAddVector)(&histograms[i], data + pos, stride);
}
}
static void FN(RandomSample)(uint32_t* seed,
const DataType* data,
size_t length,
size_t stride,
HistogramType* sample) {
size_t pos = 0;
if (stride >= length) {
stride = length;
} else {
pos = MyRand(seed) % (length - stride + 1);
}
FN(HistogramAddVector)(sample, data + pos, stride);
}
static void FN(RefineEntropyCodes)(const DataType* data, size_t length,
size_t stride,
size_t num_histograms,
HistogramType* histograms) {
size_t iters =
kIterMulForRefining * length / stride + kMinItersForRefining;
uint32_t seed = 7;
size_t iter;
iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms;
for (iter = 0; iter < iters; ++iter) {
HistogramType sample;
FN(HistogramClear)(&sample);
FN(RandomSample)(&seed, data, length, stride, &sample);
FN(HistogramAddHistogram)(&histograms[iter % num_histograms], &sample);
}
}
/* Assigns a block id from the range [0, num_histograms) to each data element
in data[0..length) and fills in block_id[0..length) with the assigned values.
Returns the number of blocks, i.e. one plus the number of block switches. */
static size_t FN(FindBlocks)(const DataType* data, const size_t length,
const double block_switch_bitcost,
const size_t num_histograms,
const HistogramType* histograms,
double* insert_cost,
double* cost,
uint8_t* switch_signal,
uint8_t *block_id) {
const size_t data_size = FN(HistogramDataSize)();
const size_t bitmaplen = (num_histograms + 7) >> 3;
size_t num_blocks = 1;
size_t i;
size_t j;
BROTLI_DCHECK(num_histograms <= 256);
if (num_histograms <= 1) {
for (i = 0; i < length; ++i) {
block_id[i] = 0;
}
return 1;
}
memset(insert_cost, 0, sizeof(insert_cost[0]) * data_size * num_histograms);
for (i = 0; i < num_histograms; ++i) {
insert_cost[i] = FastLog2((uint32_t)histograms[i].total_count_);
}
for (i = data_size; i != 0;) {
--i;
for (j = 0; j < num_histograms; ++j) {
insert_cost[i * num_histograms + j] =
insert_cost[j] - BitCost(histograms[j].data_[i]);
}
}
memset(cost, 0, sizeof(cost[0]) * num_histograms);
memset(switch_signal, 0, sizeof(switch_signal[0]) * length * bitmaplen);
/* After each iteration of this loop, cost[k] will contain the difference
between the minimum cost of arriving at the current byte position using
entropy code k, and the minimum cost of arriving at the current byte
position. This difference is capped at the block switch cost, and if it
reaches block switch cost, it means that when we trace back from the last
position, we need to switch here. */
for (i = 0; i < length; ++i) {
const size_t byte_ix = i;
size_t ix = byte_ix * bitmaplen;
size_t insert_cost_ix = data[byte_ix] * num_histograms;
double min_cost = 1e99;
double block_switch_cost = block_switch_bitcost;
size_t k;
for (k = 0; k < num_histograms; ++k) {
/* We are coding the symbol in data[byte_ix] with entropy code k. */
cost[k] += insert_cost[insert_cost_ix + k];
if (cost[k] < min_cost) {
min_cost = cost[k];
block_id[byte_ix] = (uint8_t)k;
}
}
/* More blocks for the beginning. */
if (byte_ix < 2000) {
block_switch_cost *= 0.77 + 0.07 * (double)byte_ix / 2000;
}
for (k = 0; k < num_histograms; ++k) {
cost[k] -= min_cost;
if (cost[k] >= block_switch_cost) {
const uint8_t mask = (uint8_t)(1u << (k & 7));
cost[k] = block_switch_cost;
BROTLI_DCHECK((k >> 3) < bitmaplen);
switch_signal[ix + (k >> 3)] |= mask;
}
}
}
{ /* Trace back from the last position and switch at the marked places. */
size_t byte_ix = length - 1;
size_t ix = byte_ix * bitmaplen;
uint8_t cur_id = block_id[byte_ix];
while (byte_ix > 0) {
const uint8_t mask = (uint8_t)(1u << (cur_id & 7));
BROTLI_DCHECK(((size_t)cur_id >> 3) < bitmaplen);
--byte_ix;
ix -= bitmaplen;
if (switch_signal[ix + (cur_id >> 3)] & mask) {
if (cur_id != block_id[byte_ix]) {
cur_id = block_id[byte_ix];
++num_blocks;
}
}
block_id[byte_ix] = cur_id;
}
}
return num_blocks;
}
static size_t FN(RemapBlockIds)(uint8_t* block_ids, const size_t length,
uint16_t* new_id, const size_t num_histograms) {
static const uint16_t kInvalidId = 256;
uint16_t next_id = 0;
size_t i;
for (i = 0; i < num_histograms; ++i) {
new_id[i] = kInvalidId;
}
for (i = 0; i < length; ++i) {
BROTLI_DCHECK(block_ids[i] < num_histograms);
if (new_id[block_ids[i]] == kInvalidId) {
new_id[block_ids[i]] = next_id++;
}
}
for (i = 0; i < length; ++i) {
block_ids[i] = (uint8_t)new_id[block_ids[i]];
BROTLI_DCHECK(block_ids[i] < num_histograms);
}
BROTLI_DCHECK(next_id <= num_histograms);
return next_id;
}
static void FN(BuildBlockHistograms)(const DataType* data, const size_t length,
const uint8_t* block_ids,
const size_t num_histograms,
HistogramType* histograms) {
size_t i;
FN(ClearHistograms)(histograms, num_histograms);
for (i = 0; i < length; ++i) {
FN(HistogramAdd)(&histograms[block_ids[i]], data[i]);
}
}
static void FN(ClusterBlocks)(MemoryManager* m,
const DataType* data, const size_t length,
const size_t num_blocks,
uint8_t* block_ids,
BlockSplit* split) {
uint32_t* histogram_symbols = BROTLI_ALLOC(m, uint32_t, num_blocks);
uint32_t* block_lengths = BROTLI_ALLOC(m, uint32_t, num_blocks);
const size_t expected_num_clusters = CLUSTERS_PER_BATCH *
(num_blocks + HISTOGRAMS_PER_BATCH - 1) / HISTOGRAMS_PER_BATCH;
size_t all_histograms_size = 0;
size_t all_histograms_capacity = expected_num_clusters;
HistogramType* all_histograms =
BROTLI_ALLOC(m, HistogramType, all_histograms_capacity);
size_t cluster_size_size = 0;
size_t cluster_size_capacity = expected_num_clusters;
uint32_t* cluster_size = BROTLI_ALLOC(m, uint32_t, cluster_size_capacity);
size_t num_clusters = 0;
HistogramType* histograms = BROTLI_ALLOC(m, HistogramType,
BROTLI_MIN(size_t, num_blocks, HISTOGRAMS_PER_BATCH));
size_t max_num_pairs =
HISTOGRAMS_PER_BATCH * HISTOGRAMS_PER_BATCH / 2;
size_t pairs_capacity = max_num_pairs + 1;
HistogramPair* pairs = BROTLI_ALLOC(m, HistogramPair, pairs_capacity);
size_t pos = 0;
uint32_t* clusters;
size_t num_final_clusters;
static const uint32_t kInvalidIndex = BROTLI_UINT32_MAX;
uint32_t* new_index;
size_t i;
uint32_t sizes[HISTOGRAMS_PER_BATCH] = { 0 };
uint32_t new_clusters[HISTOGRAMS_PER_BATCH] = { 0 };
uint32_t symbols[HISTOGRAMS_PER_BATCH] = { 0 };
uint32_t remap[HISTOGRAMS_PER_BATCH] = { 0 };
if (BROTLI_IS_OOM(m)) return;
memset(block_lengths, 0, num_blocks * sizeof(uint32_t));
{
size_t block_idx = 0;
for (i = 0; i < length; ++i) {
BROTLI_DCHECK(block_idx < num_blocks);
++block_lengths[block_idx];
if (i + 1 == length || block_ids[i] != block_ids[i + 1]) {
++block_idx;
}
}
BROTLI_DCHECK(block_idx == num_blocks);
}
for (i = 0; i < num_blocks; i += HISTOGRAMS_PER_BATCH) {
const size_t num_to_combine =
BROTLI_MIN(size_t, num_blocks - i, HISTOGRAMS_PER_BATCH);
size_t num_new_clusters;
size_t j;
for (j = 0; j < num_to_combine; ++j) {
size_t k;
FN(HistogramClear)(&histograms[j]);
for (k = 0; k < block_lengths[i + j]; ++k) {
FN(HistogramAdd)(&histograms[j], data[pos++]);
}
histograms[j].bit_cost_ = FN(BrotliPopulationCost)(&histograms[j]);
new_clusters[j] = (uint32_t)j;
symbols[j] = (uint32_t)j;
sizes[j] = 1;
}
num_new_clusters = FN(BrotliHistogramCombine)(
histograms, sizes, symbols, new_clusters, pairs, num_to_combine,
num_to_combine, HISTOGRAMS_PER_BATCH, max_num_pairs);
BROTLI_ENSURE_CAPACITY(m, HistogramType, all_histograms,
all_histograms_capacity, all_histograms_size + num_new_clusters);
BROTLI_ENSURE_CAPACITY(m, uint32_t, cluster_size,
cluster_size_capacity, cluster_size_size + num_new_clusters);
if (BROTLI_IS_OOM(m)) return;
for (j = 0; j < num_new_clusters; ++j) {
all_histograms[all_histograms_size++] = histograms[new_clusters[j]];
cluster_size[cluster_size_size++] = sizes[new_clusters[j]];
remap[new_clusters[j]] = (uint32_t)j;
}
for (j = 0; j < num_to_combine; ++j) {
histogram_symbols[i + j] = (uint32_t)num_clusters + remap[symbols[j]];
}
num_clusters += num_new_clusters;
BROTLI_DCHECK(num_clusters == cluster_size_size);
BROTLI_DCHECK(num_clusters == all_histograms_size);
}
BROTLI_FREE(m, histograms);
max_num_pairs =
BROTLI_MIN(size_t, 64 * num_clusters, (num_clusters / 2) * num_clusters);
if (pairs_capacity < max_num_pairs + 1) {
BROTLI_FREE(m, pairs);
pairs = BROTLI_ALLOC(m, HistogramPair, max_num_pairs + 1);
if (BROTLI_IS_OOM(m)) return;
}
clusters = BROTLI_ALLOC(m, uint32_t, num_clusters);
if (BROTLI_IS_OOM(m)) return;
for (i = 0; i < num_clusters; ++i) {
clusters[i] = (uint32_t)i;
}
num_final_clusters = FN(BrotliHistogramCombine)(
all_histograms, cluster_size, histogram_symbols, clusters, pairs,
num_clusters, num_blocks, BROTLI_MAX_NUMBER_OF_BLOCK_TYPES,
max_num_pairs);
BROTLI_FREE(m, pairs);
BROTLI_FREE(m, cluster_size);
new_index = BROTLI_ALLOC(m, uint32_t, num_clusters);
if (BROTLI_IS_OOM(m)) return;
for (i = 0; i < num_clusters; ++i) new_index[i] = kInvalidIndex;
pos = 0;
{
uint32_t next_index = 0;
for (i = 0; i < num_blocks; ++i) {
HistogramType histo;
size_t j;
uint32_t best_out;
double best_bits;
FN(HistogramClear)(&histo);
for (j = 0; j < block_lengths[i]; ++j) {
FN(HistogramAdd)(&histo, data[pos++]);
}
best_out = (i == 0) ? histogram_symbols[0] : histogram_symbols[i - 1];
best_bits =
FN(BrotliHistogramBitCostDistance)(&histo, &all_histograms[best_out]);
for (j = 0; j < num_final_clusters; ++j) {
const double cur_bits = FN(BrotliHistogramBitCostDistance)(
&histo, &all_histograms[clusters[j]]);
if (cur_bits < best_bits) {
best_bits = cur_bits;
best_out = clusters[j];
}
}
histogram_symbols[i] = best_out;
if (new_index[best_out] == kInvalidIndex) {
new_index[best_out] = next_index++;
}
}
}
BROTLI_FREE(m, clusters);
BROTLI_FREE(m, all_histograms);
BROTLI_ENSURE_CAPACITY(
m, uint8_t, split->types, split->types_alloc_size, num_blocks);
BROTLI_ENSURE_CAPACITY(
m, uint32_t, split->lengths, split->lengths_alloc_size, num_blocks);
if (BROTLI_IS_OOM(m)) return;
{
uint32_t cur_length = 0;
size_t block_idx = 0;
uint8_t max_type = 0;
for (i = 0; i < num_blocks; ++i) {
cur_length += block_lengths[i];
if (i + 1 == num_blocks ||
histogram_symbols[i] != histogram_symbols[i + 1]) {
const uint8_t id = (uint8_t)new_index[histogram_symbols[i]];
split->types[block_idx] = id;
split->lengths[block_idx] = cur_length;
max_type = BROTLI_MAX(uint8_t, max_type, id);
cur_length = 0;
++block_idx;
}
}
split->num_blocks = block_idx;
split->num_types = (size_t)max_type + 1;
}
BROTLI_FREE(m, new_index);
BROTLI_FREE(m, block_lengths);
BROTLI_FREE(m, histogram_symbols);
}
static void FN(SplitByteVector)(MemoryManager* m,
const DataType* data, const size_t length,
const size_t literals_per_histogram,
const size_t max_histograms,
const size_t sampling_stride_length,
const double block_switch_cost,
const BrotliEncoderParams* params,
BlockSplit* split) {
const size_t data_size = FN(HistogramDataSize)();
size_t num_histograms = length / literals_per_histogram + 1;
HistogramType* histograms;
if (num_histograms > max_histograms) {
num_histograms = max_histograms;
}
if (length == 0) {
split->num_types = 1;
return;
} else if (length < kMinLengthForBlockSplitting) {
BROTLI_ENSURE_CAPACITY(m, uint8_t,
split->types, split->types_alloc_size, split->num_blocks + 1);
BROTLI_ENSURE_CAPACITY(m, uint32_t,
split->lengths, split->lengths_alloc_size, split->num_blocks + 1);
if (BROTLI_IS_OOM(m)) return;
split->num_types = 1;
split->types[split->num_blocks] = 0;
split->lengths[split->num_blocks] = (uint32_t)length;
split->num_blocks++;
return;
}
histograms = BROTLI_ALLOC(m, HistogramType, num_histograms);
if (BROTLI_IS_OOM(m)) return;
/* Find good entropy codes. */
FN(InitialEntropyCodes)(data, length,
sampling_stride_length,
num_histograms, histograms);
FN(RefineEntropyCodes)(data, length,
sampling_stride_length,
num_histograms, histograms);
{
/* Find a good path through literals with the good entropy codes. */
uint8_t* block_ids = BROTLI_ALLOC(m, uint8_t, length);
size_t num_blocks = 0;
const size_t bitmaplen = (num_histograms + 7) >> 3;
double* insert_cost = BROTLI_ALLOC(m, double, data_size * num_histograms);
double* cost = BROTLI_ALLOC(m, double, num_histograms);
uint8_t* switch_signal = BROTLI_ALLOC(m, uint8_t, length * bitmaplen);
uint16_t* new_id = BROTLI_ALLOC(m, uint16_t, num_histograms);
const size_t iters = params->quality < HQ_ZOPFLIFICATION_QUALITY ? 3 : 10;
size_t i;
if (BROTLI_IS_OOM(m)) return;
for (i = 0; i < iters; ++i) {
num_blocks = FN(FindBlocks)(data, length,
block_switch_cost,
num_histograms, histograms,
insert_cost, cost, switch_signal,
block_ids);
num_histograms = FN(RemapBlockIds)(block_ids, length,
new_id, num_histograms);
FN(BuildBlockHistograms)(data, length, block_ids,
num_histograms, histograms);
}
BROTLI_FREE(m, insert_cost);
BROTLI_FREE(m, cost);
BROTLI_FREE(m, switch_signal);
BROTLI_FREE(m, new_id);
BROTLI_FREE(m, histograms);
FN(ClusterBlocks)(m, data, length, num_blocks, block_ids, split);
if (BROTLI_IS_OOM(m)) return;
BROTLI_FREE(m, block_ids);
}
}
#undef HistogramType

File diff suppressed because it is too large Load Diff

View File

@ -1,103 +0,0 @@
/* Copyright 2014 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Functions to convert brotli-related data structures into the
brotli bit stream. The functions here operate under
assumption that there is enough space in the storage, i.e., there are
no out-of-range checks anywhere.
These functions do bit addressing into a byte array. The byte array
is called "storage" and the index to the bit is called storage_ix
in function arguments. */
#ifndef BROTLI_ENC_BROTLI_BIT_STREAM_H_
#define BROTLI_ENC_BROTLI_BIT_STREAM_H_
#include "../common/platform.h"
#include <brotli/types.h>
#include "./command.h"
#include "./context.h"
#include "./entropy_encode.h"
#include "./memory.h"
#include "./metablock.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
/* All Store functions here will use a storage_ix, which is always the bit
position for the current storage. */
BROTLI_INTERNAL void BrotliStoreHuffmanTree(const uint8_t* depths, size_t num,
HuffmanTree* tree, size_t *storage_ix, uint8_t *storage);
BROTLI_INTERNAL void BrotliBuildAndStoreHuffmanTreeFast(
MemoryManager* m, const uint32_t* histogram, const size_t histogram_total,
const size_t max_bits, uint8_t* depth, uint16_t* bits, size_t* storage_ix,
uint8_t* storage);
/* REQUIRES: length > 0 */
/* REQUIRES: length <= (1 << 24) */
BROTLI_INTERNAL void BrotliStoreMetaBlock(MemoryManager* m,
const uint8_t* input,
size_t start_pos,
size_t length,
size_t mask,
uint8_t prev_byte,
uint8_t prev_byte2,
BROTLI_BOOL is_final_block,
uint32_t num_direct_distance_codes,
uint32_t distance_postfix_bits,
ContextType literal_context_mode,
const Command* commands,
size_t n_commands,
const MetaBlockSplit* mb,
size_t* storage_ix,
uint8_t* storage);
/* Stores the meta-block without doing any block splitting, just collects
one histogram per block category and uses that for entropy coding.
REQUIRES: length > 0
REQUIRES: length <= (1 << 24) */
BROTLI_INTERNAL void BrotliStoreMetaBlockTrivial(MemoryManager* m,
const uint8_t* input,
size_t start_pos,
size_t length,
size_t mask,
BROTLI_BOOL is_last,
const Command *commands,
size_t n_commands,
size_t* storage_ix,
uint8_t* storage);
/* Same as above, but uses static prefix codes for histograms with a only a few
symbols, and uses static code length prefix codes for all other histograms.
REQUIRES: length > 0
REQUIRES: length <= (1 << 24) */
BROTLI_INTERNAL void BrotliStoreMetaBlockFast(MemoryManager* m,
const uint8_t* input,
size_t start_pos,
size_t length,
size_t mask,
BROTLI_BOOL is_last,
const Command *commands,
size_t n_commands,
size_t* storage_ix,
uint8_t* storage);
/* This is for storing uncompressed blocks (simple raw storage of
bytes-as-bytes).
REQUIRES: length > 0
REQUIRES: length <= (1 << 24) */
BROTLI_INTERNAL void BrotliStoreUncompressedMetaBlock(
BROTLI_BOOL is_final_block, const uint8_t* input, size_t position,
size_t mask, size_t len, size_t* storage_ix, uint8_t* storage);
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
#endif /* BROTLI_ENC_BROTLI_BIT_STREAM_H_ */

View File

@ -1,56 +0,0 @@
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Functions for clustering similar histograms together. */
#include "./cluster.h"
#include "../common/platform.h"
#include <brotli/types.h>
#include "./bit_cost.h" /* BrotliPopulationCost */
#include "./fast_log.h"
#include "./histogram.h"
#include "./memory.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static BROTLI_INLINE BROTLI_BOOL HistogramPairIsLess(
const HistogramPair* p1, const HistogramPair* p2) {
if (p1->cost_diff != p2->cost_diff) {
return TO_BROTLI_BOOL(p1->cost_diff > p2->cost_diff);
}
return TO_BROTLI_BOOL((p1->idx2 - p1->idx1) > (p2->idx2 - p2->idx1));
}
/* Returns entropy reduction of the context map when we combine two clusters. */
static BROTLI_INLINE double ClusterCostDiff(size_t size_a, size_t size_b) {
size_t size_c = size_a + size_b;
return (double)size_a * FastLog2(size_a) +
(double)size_b * FastLog2(size_b) -
(double)size_c * FastLog2(size_c);
}
#define CODE(X) X
#define FN(X) X ## Literal
#include "./cluster_inc.h" /* NOLINT(build/include) */
#undef FN
#define FN(X) X ## Command
#include "./cluster_inc.h" /* NOLINT(build/include) */
#undef FN
#define FN(X) X ## Distance
#include "./cluster_inc.h" /* NOLINT(build/include) */
#undef FN
#undef CODE
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif

View File

@ -1,48 +0,0 @@
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Functions for clustering similar histograms together. */
#ifndef BROTLI_ENC_CLUSTER_H_
#define BROTLI_ENC_CLUSTER_H_
#include "../common/platform.h"
#include <brotli/types.h>
#include "./histogram.h"
#include "./memory.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
typedef struct HistogramPair {
uint32_t idx1;
uint32_t idx2;
double cost_combo;
double cost_diff;
} HistogramPair;
#define CODE(X) /* Declaration */;
#define FN(X) X ## Literal
#include "./cluster_inc.h" /* NOLINT(build/include) */
#undef FN
#define FN(X) X ## Command
#include "./cluster_inc.h" /* NOLINT(build/include) */
#undef FN
#define FN(X) X ## Distance
#include "./cluster_inc.h" /* NOLINT(build/include) */
#undef FN
#undef CODE
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
#endif /* BROTLI_ENC_CLUSTER_H_ */

View File

@ -1,317 +0,0 @@
/* NOLINT(build/header_guard) */
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* template parameters: FN, CODE */
#define HistogramType FN(Histogram)
/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */
BROTLI_INTERNAL void FN(BrotliCompareAndPushToQueue)(
const HistogramType* out, const uint32_t* cluster_size, uint32_t idx1,
uint32_t idx2, size_t max_num_pairs, HistogramPair* pairs,
size_t* num_pairs) CODE({
BROTLI_BOOL is_good_pair = BROTLI_FALSE;
HistogramPair p;
p.idx1 = p.idx2 = 0;
p.cost_diff = p.cost_combo = 0;
if (idx1 == idx2) {
return;
}
if (idx2 < idx1) {
uint32_t t = idx2;
idx2 = idx1;
idx1 = t;
}
p.idx1 = idx1;
p.idx2 = idx2;
p.cost_diff = 0.5 * ClusterCostDiff(cluster_size[idx1], cluster_size[idx2]);
p.cost_diff -= out[idx1].bit_cost_;
p.cost_diff -= out[idx2].bit_cost_;
if (out[idx1].total_count_ == 0) {
p.cost_combo = out[idx2].bit_cost_;
is_good_pair = BROTLI_TRUE;
} else if (out[idx2].total_count_ == 0) {
p.cost_combo = out[idx1].bit_cost_;
is_good_pair = BROTLI_TRUE;
} else {
double threshold = *num_pairs == 0 ? 1e99 :
BROTLI_MAX(double, 0.0, pairs[0].cost_diff);
HistogramType combo = out[idx1];
double cost_combo;
FN(HistogramAddHistogram)(&combo, &out[idx2]);
cost_combo = FN(BrotliPopulationCost)(&combo);
if (cost_combo < threshold - p.cost_diff) {
p.cost_combo = cost_combo;
is_good_pair = BROTLI_TRUE;
}
}
if (is_good_pair) {
p.cost_diff += p.cost_combo;
if (*num_pairs > 0 && HistogramPairIsLess(&pairs[0], &p)) {
/* Replace the top of the queue if needed. */
if (*num_pairs < max_num_pairs) {
pairs[*num_pairs] = pairs[0];
++(*num_pairs);
}
pairs[0] = p;
} else if (*num_pairs < max_num_pairs) {
pairs[*num_pairs] = p;
++(*num_pairs);
}
}
})
BROTLI_INTERNAL size_t FN(BrotliHistogramCombine)(HistogramType* out,
uint32_t* cluster_size,
uint32_t* symbols,
uint32_t* clusters,
HistogramPair* pairs,
size_t num_clusters,
size_t symbols_size,
size_t max_clusters,
size_t max_num_pairs) CODE({
double cost_diff_threshold = 0.0;
size_t min_cluster_size = 1;
size_t num_pairs = 0;
{
/* We maintain a vector of histogram pairs, with the property that the pair
with the maximum bit cost reduction is the first. */
size_t idx1;
for (idx1 = 0; idx1 < num_clusters; ++idx1) {
size_t idx2;
for (idx2 = idx1 + 1; idx2 < num_clusters; ++idx2) {
FN(BrotliCompareAndPushToQueue)(out, cluster_size, clusters[idx1],
clusters[idx2], max_num_pairs, &pairs[0], &num_pairs);
}
}
}
while (num_clusters > min_cluster_size) {
uint32_t best_idx1;
uint32_t best_idx2;
size_t i;
if (pairs[0].cost_diff >= cost_diff_threshold) {
cost_diff_threshold = 1e99;
min_cluster_size = max_clusters;
continue;
}
/* Take the best pair from the top of heap. */
best_idx1 = pairs[0].idx1;
best_idx2 = pairs[0].idx2;
FN(HistogramAddHistogram)(&out[best_idx1], &out[best_idx2]);
out[best_idx1].bit_cost_ = pairs[0].cost_combo;
cluster_size[best_idx1] += cluster_size[best_idx2];
for (i = 0; i < symbols_size; ++i) {
if (symbols[i] == best_idx2) {
symbols[i] = best_idx1;
}
}
for (i = 0; i < num_clusters; ++i) {
if (clusters[i] == best_idx2) {
memmove(&clusters[i], &clusters[i + 1],
(num_clusters - i - 1) * sizeof(clusters[0]));
break;
}
}
--num_clusters;
{
/* Remove pairs intersecting the just combined best pair. */
size_t copy_to_idx = 0;
for (i = 0; i < num_pairs; ++i) {
HistogramPair* p = &pairs[i];
if (p->idx1 == best_idx1 || p->idx2 == best_idx1 ||
p->idx1 == best_idx2 || p->idx2 == best_idx2) {
/* Remove invalid pair from the queue. */
continue;
}
if (HistogramPairIsLess(&pairs[0], p)) {
/* Replace the top of the queue if needed. */
HistogramPair front = pairs[0];
pairs[0] = *p;
pairs[copy_to_idx] = front;
} else {
pairs[copy_to_idx] = *p;
}
++copy_to_idx;
}
num_pairs = copy_to_idx;
}
/* Push new pairs formed with the combined histogram to the heap. */
for (i = 0; i < num_clusters; ++i) {
FN(BrotliCompareAndPushToQueue)(out, cluster_size, best_idx1, clusters[i],
max_num_pairs, &pairs[0], &num_pairs);
}
}
return num_clusters;
})
/* What is the bit cost of moving histogram from cur_symbol to candidate. */
BROTLI_INTERNAL double FN(BrotliHistogramBitCostDistance)(
const HistogramType* histogram, const HistogramType* candidate) CODE({
if (histogram->total_count_ == 0) {
return 0.0;
} else {
HistogramType tmp = *histogram;
FN(HistogramAddHistogram)(&tmp, candidate);
return FN(BrotliPopulationCost)(&tmp) - candidate->bit_cost_;
}
})
/* Find the best 'out' histogram for each of the 'in' histograms.
When called, clusters[0..num_clusters) contains the unique values from
symbols[0..in_size), but this property is not preserved in this function.
Note: we assume that out[]->bit_cost_ is already up-to-date. */
BROTLI_INTERNAL void FN(BrotliHistogramRemap)(const HistogramType* in,
size_t in_size, const uint32_t* clusters, size_t num_clusters,
HistogramType* out, uint32_t* symbols) CODE({
size_t i;
for (i = 0; i < in_size; ++i) {
uint32_t best_out = i == 0 ? symbols[0] : symbols[i - 1];
double best_bits =
FN(BrotliHistogramBitCostDistance)(&in[i], &out[best_out]);
size_t j;
for (j = 0; j < num_clusters; ++j) {
const double cur_bits =
FN(BrotliHistogramBitCostDistance)(&in[i], &out[clusters[j]]);
if (cur_bits < best_bits) {
best_bits = cur_bits;
best_out = clusters[j];
}
}
symbols[i] = best_out;
}
/* Recompute each out based on raw and symbols. */
for (i = 0; i < num_clusters; ++i) {
FN(HistogramClear)(&out[clusters[i]]);
}
for (i = 0; i < in_size; ++i) {
FN(HistogramAddHistogram)(&out[symbols[i]], &in[i]);
}
})
/* Reorders elements of the out[0..length) array and changes values in
symbols[0..length) array in the following way:
* when called, symbols[] contains indexes into out[], and has N unique
values (possibly N < length)
* on return, symbols'[i] = f(symbols[i]) and
out'[symbols'[i]] = out[symbols[i]], for each 0 <= i < length,
where f is a bijection between the range of symbols[] and [0..N), and
the first occurrences of values in symbols'[i] come in consecutive
increasing order.
Returns N, the number of unique values in symbols[]. */
BROTLI_INTERNAL size_t FN(BrotliHistogramReindex)(MemoryManager* m,
HistogramType* out, uint32_t* symbols, size_t length) CODE({
static const uint32_t kInvalidIndex = BROTLI_UINT32_MAX;
uint32_t* new_index = BROTLI_ALLOC(m, uint32_t, length);
uint32_t next_index;
HistogramType* tmp;
size_t i;
if (BROTLI_IS_OOM(m)) return 0;
for (i = 0; i < length; ++i) {
new_index[i] = kInvalidIndex;
}
next_index = 0;
for (i = 0; i < length; ++i) {
if (new_index[symbols[i]] == kInvalidIndex) {
new_index[symbols[i]] = next_index;
++next_index;
}
}
/* TODO: by using idea of "cycle-sort" we can avoid allocation of
tmp and reduce the number of copying by the factor of 2. */
tmp = BROTLI_ALLOC(m, HistogramType, next_index);
if (BROTLI_IS_OOM(m)) return 0;
next_index = 0;
for (i = 0; i < length; ++i) {
if (new_index[symbols[i]] == next_index) {
tmp[next_index] = out[symbols[i]];
++next_index;
}
symbols[i] = new_index[symbols[i]];
}
BROTLI_FREE(m, new_index);
for (i = 0; i < next_index; ++i) {
out[i] = tmp[i];
}
BROTLI_FREE(m, tmp);
return next_index;
})
BROTLI_INTERNAL void FN(BrotliClusterHistograms)(
MemoryManager* m, const HistogramType* in, const size_t in_size,
size_t max_histograms, HistogramType* out, size_t* out_size,
uint32_t* histogram_symbols) CODE({
uint32_t* cluster_size = BROTLI_ALLOC(m, uint32_t, in_size);
uint32_t* clusters = BROTLI_ALLOC(m, uint32_t, in_size);
size_t num_clusters = 0;
const size_t max_input_histograms = 64;
size_t pairs_capacity = max_input_histograms * max_input_histograms / 2;
/* For the first pass of clustering, we allow all pairs. */
HistogramPair* pairs = BROTLI_ALLOC(m, HistogramPair, pairs_capacity + 1);
size_t i;
if (BROTLI_IS_OOM(m)) return;
for (i = 0; i < in_size; ++i) {
cluster_size[i] = 1;
}
for (i = 0; i < in_size; ++i) {
out[i] = in[i];
out[i].bit_cost_ = FN(BrotliPopulationCost)(&in[i]);
histogram_symbols[i] = (uint32_t)i;
}
for (i = 0; i < in_size; i += max_input_histograms) {
size_t num_to_combine =
BROTLI_MIN(size_t, in_size - i, max_input_histograms);
size_t num_new_clusters;
size_t j;
for (j = 0; j < num_to_combine; ++j) {
clusters[num_clusters + j] = (uint32_t)(i + j);
}
num_new_clusters =
FN(BrotliHistogramCombine)(out, cluster_size,
&histogram_symbols[i],
&clusters[num_clusters], pairs,
num_to_combine, num_to_combine,
max_histograms, pairs_capacity);
num_clusters += num_new_clusters;
}
{
/* For the second pass, we limit the total number of histogram pairs.
After this limit is reached, we only keep searching for the best pair. */
size_t max_num_pairs = BROTLI_MIN(size_t,
64 * num_clusters, (num_clusters / 2) * num_clusters);
BROTLI_ENSURE_CAPACITY(
m, HistogramPair, pairs, pairs_capacity, max_num_pairs + 1);
if (BROTLI_IS_OOM(m)) return;
/* Collapse similar histograms. */
num_clusters = FN(BrotliHistogramCombine)(out, cluster_size,
histogram_symbols, clusters,
pairs, num_clusters, in_size,
max_histograms, max_num_pairs);
}
BROTLI_FREE(m, pairs);
BROTLI_FREE(m, cluster_size);
/* Find the optimal map from original histograms to the final ones. */
FN(BrotliHistogramRemap)(in, in_size, clusters, num_clusters,
out, histogram_symbols);
BROTLI_FREE(m, clusters);
/* Convert the context map to a canonical form. */
*out_size = FN(BrotliHistogramReindex)(m, out, histogram_symbols, in_size);
if (BROTLI_IS_OOM(m)) return;
})
#undef HistogramType

View File

@ -1,180 +0,0 @@
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* This class models a sequence of literals and a backward reference copy. */
#ifndef BROTLI_ENC_COMMAND_H_
#define BROTLI_ENC_COMMAND_H_
#include "../common/constants.h"
#include "../common/platform.h"
#include <brotli/types.h>
#include "./fast_log.h"
#include "./prefix.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static uint32_t kInsBase[] = { 0, 1, 2, 3, 4, 5, 6, 8, 10, 14, 18, 26, 34, 50,
66, 98, 130, 194, 322, 578, 1090, 2114, 6210, 22594 };
static uint32_t kInsExtra[] = { 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4,
5, 5, 6, 7, 8, 9, 10, 12, 14, 24 };
static uint32_t kCopyBase[] = { 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 18, 22, 30,
38, 54, 70, 102, 134, 198, 326, 582, 1094, 2118 };
static uint32_t kCopyExtra[] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 7, 8, 9, 10, 24 };
static BROTLI_INLINE uint16_t GetInsertLengthCode(size_t insertlen) {
if (insertlen < 6) {
return (uint16_t)insertlen;
} else if (insertlen < 130) {
uint32_t nbits = Log2FloorNonZero(insertlen - 2) - 1u;
return (uint16_t)((nbits << 1) + ((insertlen - 2) >> nbits) + 2);
} else if (insertlen < 2114) {
return (uint16_t)(Log2FloorNonZero(insertlen - 66) + 10);
} else if (insertlen < 6210) {
return 21u;
} else if (insertlen < 22594) {
return 22u;
} else {
return 23u;
}
}
static BROTLI_INLINE uint16_t GetCopyLengthCode(size_t copylen) {
if (copylen < 10) {
return (uint16_t)(copylen - 2);
} else if (copylen < 134) {
uint32_t nbits = Log2FloorNonZero(copylen - 6) - 1u;
return (uint16_t)((nbits << 1) + ((copylen - 6) >> nbits) + 4);
} else if (copylen < 2118) {
return (uint16_t)(Log2FloorNonZero(copylen - 70) + 12);
} else {
return 23u;
}
}
static BROTLI_INLINE uint16_t CombineLengthCodes(
uint16_t inscode, uint16_t copycode, BROTLI_BOOL use_last_distance) {
uint16_t bits64 =
(uint16_t)((copycode & 0x7u) | ((inscode & 0x7u) << 3));
if (use_last_distance && inscode < 8 && copycode < 16) {
return (copycode < 8) ? bits64 : (bits64 | 64);
} else {
/* Specification: 5 Encoding of ... (last table) */
/* offset = 2 * index, where index is in range [0..8] */
int offset = 2 * ((copycode >> 3) + 3 * (inscode >> 3));
/* All values in specification are K * 64,
where K = [2, 3, 6, 4, 5, 8, 7, 9, 10],
i + 1 = [1, 2, 3, 4, 5, 6, 7, 8, 9],
K - i - 1 = [1, 1, 3, 0, 0, 2, 0, 1, 2] = D.
All values in D require only 2 bits to encode.
Magic constant is shifted 6 bits left, to avoid final multiplication. */
offset = (offset << 5) + 0x40 + ((0x520D40 >> offset) & 0xC0);
return (uint16_t)offset | bits64;
}
}
static BROTLI_INLINE void GetLengthCode(size_t insertlen, size_t copylen,
BROTLI_BOOL use_last_distance,
uint16_t* code) {
uint16_t inscode = GetInsertLengthCode(insertlen);
uint16_t copycode = GetCopyLengthCode(copylen);
*code = CombineLengthCodes(inscode, copycode, use_last_distance);
}
static BROTLI_INLINE uint32_t GetInsertBase(uint16_t inscode) {
return kInsBase[inscode];
}
static BROTLI_INLINE uint32_t GetInsertExtra(uint16_t inscode) {
return kInsExtra[inscode];
}
static BROTLI_INLINE uint32_t GetCopyBase(uint16_t copycode) {
return kCopyBase[copycode];
}
static BROTLI_INLINE uint32_t GetCopyExtra(uint16_t copycode) {
return kCopyExtra[copycode];
}
typedef struct Command {
uint32_t insert_len_;
/* Stores copy_len in low 24 bits and copy_len XOR copy_code in high 8 bit. */
uint32_t copy_len_;
uint32_t dist_extra_;
uint16_t cmd_prefix_;
uint16_t dist_prefix_;
} Command;
/* distance_code is e.g. 0 for same-as-last short code, or 16 for offset 1. */
static BROTLI_INLINE void InitCommand(Command* self, size_t insertlen,
size_t copylen, int copylen_code_delta, size_t distance_code) {
/* Don't rely on signed int representation, use honest casts. */
uint32_t delta = (uint8_t)((int8_t)copylen_code_delta);
self->insert_len_ = (uint32_t)insertlen;
self->copy_len_ = (uint32_t)(copylen | (delta << 24));
/* The distance prefix and extra bits are stored in this Command as if
npostfix and ndirect were 0, they are only recomputed later after the
clustering if needed. */
PrefixEncodeCopyDistance(
distance_code, 0, 0, &self->dist_prefix_, &self->dist_extra_);
GetLengthCode(
insertlen, (size_t)((int)copylen + copylen_code_delta),
TO_BROTLI_BOOL(self->dist_prefix_ == 0), &self->cmd_prefix_);
}
static BROTLI_INLINE void InitInsertCommand(Command* self, size_t insertlen) {
self->insert_len_ = (uint32_t)insertlen;
self->copy_len_ = 4 << 24;
self->dist_extra_ = 0;
self->dist_prefix_ = BROTLI_NUM_DISTANCE_SHORT_CODES;
GetLengthCode(insertlen, 4, BROTLI_FALSE, &self->cmd_prefix_);
}
static BROTLI_INLINE uint32_t CommandRestoreDistanceCode(const Command* self) {
if (self->dist_prefix_ < BROTLI_NUM_DISTANCE_SHORT_CODES) {
return self->dist_prefix_;
} else {
uint32_t nbits = self->dist_extra_ >> 24;
uint32_t extra = self->dist_extra_ & 0xffffff;
/* It is assumed that the distance was first encoded with NPOSTFIX = 0 and
NDIRECT = 0, so the code itself is of this form:
BROTLI_NUM_DISTANCE_SHORT_CODES + 2 * (nbits - 1) + prefix_bit
Therefore, the following expression results in (2 + prefix_bit). */
uint32_t prefix =
self->dist_prefix_ + 4u - BROTLI_NUM_DISTANCE_SHORT_CODES - 2u * nbits;
/* Subtract 4 for offset (Chapter 4.) and
increase by BROTLI_NUM_DISTANCE_SHORT_CODES - 1 */
return (prefix << nbits) + extra + BROTLI_NUM_DISTANCE_SHORT_CODES - 4u;
}
}
static BROTLI_INLINE uint32_t CommandDistanceContext(const Command* self) {
uint32_t r = self->cmd_prefix_ >> 6;
uint32_t c = self->cmd_prefix_ & 7;
if ((r == 0 || r == 2 || r == 4 || r == 7) && (c <= 2)) {
return c;
}
return 3;
}
static BROTLI_INLINE uint32_t CommandCopyLen(const Command* self) {
return self->copy_len_ & 0xFFFFFF;
}
static BROTLI_INLINE uint32_t CommandCopyLenCode(const Command* self) {
int32_t delta = (int8_t)((uint8_t)(self->copy_len_ >> 24));
return (uint32_t)((int32_t)(self->copy_len_ & 0xFFFFFF) + delta);
}
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
#endif /* BROTLI_ENC_COMMAND_H_ */

View File

@ -1,790 +0,0 @@
/* Copyright 2015 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Function for fast encoding of an input fragment, independently from the input
history. This function uses one-pass processing: when we find a backward
match, we immediately emit the corresponding command and literal codes to
the bit stream.
Adapted from the CompressFragment() function in
https://github.com/google/snappy/blob/master/snappy.cc */
#include "./compress_fragment.h"
#include <string.h> /* memcmp, memcpy, memset */
#include "../common/constants.h"
#include "../common/platform.h"
#include <brotli/types.h>
#include "./brotli_bit_stream.h"
#include "./entropy_encode.h"
#include "./fast_log.h"
#include "./find_match_length.h"
#include "./memory.h"
#include "./write_bits.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#define MAX_DISTANCE (long)BROTLI_MAX_BACKWARD_LIMIT(18)
/* kHashMul32 multiplier has these properties:
* The multiplier must be odd. Otherwise we may lose the highest bit.
* No long streaks of ones or zeros.
* There is no effort to ensure that it is a prime, the oddity is enough
for this use.
* The number has been tuned heuristically against compression benchmarks. */
static const uint32_t kHashMul32 = 0x1e35a7bd;
static BROTLI_INLINE uint32_t Hash(const uint8_t* p, size_t shift) {
const uint64_t h = (BROTLI_UNALIGNED_LOAD64LE(p) << 24) * kHashMul32;
return (uint32_t)(h >> shift);
}
static BROTLI_INLINE uint32_t HashBytesAtOffset(
uint64_t v, int offset, size_t shift) {
BROTLI_DCHECK(offset >= 0);
BROTLI_DCHECK(offset <= 3);
{
const uint64_t h = ((v >> (8 * offset)) << 24) * kHashMul32;
return (uint32_t)(h >> shift);
}
}
static BROTLI_INLINE BROTLI_BOOL IsMatch(const uint8_t* p1, const uint8_t* p2) {
return TO_BROTLI_BOOL(
BrotliUnalignedRead32(p1) == BrotliUnalignedRead32(p2) &&
p1[4] == p2[4]);
}
/* Builds a literal prefix code into "depths" and "bits" based on the statistics
of the "input" string and stores it into the bit stream.
Note that the prefix code here is built from the pre-LZ77 input, therefore
we can only approximate the statistics of the actual literal stream.
Moreover, for long inputs we build a histogram from a sample of the input
and thus have to assign a non-zero depth for each literal.
Returns estimated compression ratio millibytes/char for encoding given input
with generated code. */
static size_t BuildAndStoreLiteralPrefixCode(MemoryManager* m,
const uint8_t* input,
const size_t input_size,
uint8_t depths[256],
uint16_t bits[256],
size_t* storage_ix,
uint8_t* storage) {
uint32_t histogram[256] = { 0 };
size_t histogram_total;
size_t i;
if (input_size < (1 << 15)) {
for (i = 0; i < input_size; ++i) {
++histogram[input[i]];
}
histogram_total = input_size;
for (i = 0; i < 256; ++i) {
/* We weigh the first 11 samples with weight 3 to account for the
balancing effect of the LZ77 phase on the histogram. */
const uint32_t adjust = 2 * BROTLI_MIN(uint32_t, histogram[i], 11u);
histogram[i] += adjust;
histogram_total += adjust;
}
} else {
static const size_t kSampleRate = 29;
for (i = 0; i < input_size; i += kSampleRate) {
++histogram[input[i]];
}
histogram_total = (input_size + kSampleRate - 1) / kSampleRate;
for (i = 0; i < 256; ++i) {
/* We add 1 to each population count to avoid 0 bit depths (since this is
only a sample and we don't know if the symbol appears or not), and we
weigh the first 11 samples with weight 3 to account for the balancing
effect of the LZ77 phase on the histogram (more frequent symbols are
more likely to be in backward references instead as literals). */
const uint32_t adjust = 1 + 2 * BROTLI_MIN(uint32_t, histogram[i], 11u);
histogram[i] += adjust;
histogram_total += adjust;
}
}
BrotliBuildAndStoreHuffmanTreeFast(m, histogram, histogram_total,
/* max_bits = */ 8,
depths, bits, storage_ix, storage);
if (BROTLI_IS_OOM(m)) return 0;
{
size_t literal_ratio = 0;
for (i = 0; i < 256; ++i) {
if (histogram[i]) literal_ratio += histogram[i] * depths[i];
}
/* Estimated encoding ratio, millibytes per symbol. */
return (literal_ratio * 125) / histogram_total;
}
}
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and
"bits" based on "histogram" and stores it into the bit stream. */
static void BuildAndStoreCommandPrefixCode(const uint32_t histogram[128],
uint8_t depth[128], uint16_t bits[128], size_t* storage_ix,
uint8_t* storage) {
/* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */
HuffmanTree tree[129];
uint8_t cmd_depth[BROTLI_NUM_COMMAND_SYMBOLS] = { 0 };
uint16_t cmd_bits[64];
BrotliCreateHuffmanTree(histogram, 64, 15, tree, depth);
BrotliCreateHuffmanTree(&histogram[64], 64, 14, tree, &depth[64]);
/* We have to jump through a few hoops here in order to compute
the command bits because the symbols are in a different order than in
the full alphabet. This looks complicated, but having the symbols
in this order in the command bits saves a few branches in the Emit*
functions. */
memcpy(cmd_depth, depth, 24);
memcpy(cmd_depth + 24, depth + 40, 8);
memcpy(cmd_depth + 32, depth + 24, 8);
memcpy(cmd_depth + 40, depth + 48, 8);
memcpy(cmd_depth + 48, depth + 32, 8);
memcpy(cmd_depth + 56, depth + 56, 8);
BrotliConvertBitDepthsToSymbols(cmd_depth, 64, cmd_bits);
memcpy(bits, cmd_bits, 48);
memcpy(bits + 24, cmd_bits + 32, 16);
memcpy(bits + 32, cmd_bits + 48, 16);
memcpy(bits + 40, cmd_bits + 24, 16);
memcpy(bits + 48, cmd_bits + 40, 16);
memcpy(bits + 56, cmd_bits + 56, 16);
BrotliConvertBitDepthsToSymbols(&depth[64], 64, &bits[64]);
{
/* Create the bit length array for the full command alphabet. */
size_t i;
memset(cmd_depth, 0, 64); /* only 64 first values were used */
memcpy(cmd_depth, depth, 8);
memcpy(cmd_depth + 64, depth + 8, 8);
memcpy(cmd_depth + 128, depth + 16, 8);
memcpy(cmd_depth + 192, depth + 24, 8);
memcpy(cmd_depth + 384, depth + 32, 8);
for (i = 0; i < 8; ++i) {
cmd_depth[128 + 8 * i] = depth[40 + i];
cmd_depth[256 + 8 * i] = depth[48 + i];
cmd_depth[448 + 8 * i] = depth[56 + i];
}
BrotliStoreHuffmanTree(
cmd_depth, BROTLI_NUM_COMMAND_SYMBOLS, tree, storage_ix, storage);
}
BrotliStoreHuffmanTree(&depth[64], 64, tree, storage_ix, storage);
}
/* REQUIRES: insertlen < 6210 */
static BROTLI_INLINE void EmitInsertLen(size_t insertlen,
const uint8_t depth[128],
const uint16_t bits[128],
uint32_t histo[128],
size_t* storage_ix,
uint8_t* storage) {
if (insertlen < 6) {
const size_t code = insertlen + 40;
BrotliWriteBits(depth[code], bits[code], storage_ix, storage);
++histo[code];
} else if (insertlen < 130) {
const size_t tail = insertlen - 2;
const uint32_t nbits = Log2FloorNonZero(tail) - 1u;
const size_t prefix = tail >> nbits;
const size_t inscode = (nbits << 1) + prefix + 42;
BrotliWriteBits(depth[inscode], bits[inscode], storage_ix, storage);
BrotliWriteBits(nbits, tail - (prefix << nbits), storage_ix, storage);
++histo[inscode];
} else if (insertlen < 2114) {
const size_t tail = insertlen - 66;
const uint32_t nbits = Log2FloorNonZero(tail);
const size_t code = nbits + 50;
BrotliWriteBits(depth[code], bits[code], storage_ix, storage);
BrotliWriteBits(nbits, tail - ((size_t)1 << nbits), storage_ix, storage);
++histo[code];
} else {
BrotliWriteBits(depth[61], bits[61], storage_ix, storage);
BrotliWriteBits(12, insertlen - 2114, storage_ix, storage);
++histo[21];
}
}
static BROTLI_INLINE void EmitLongInsertLen(size_t insertlen,
const uint8_t depth[128],
const uint16_t bits[128],
uint32_t histo[128],
size_t* storage_ix,
uint8_t* storage) {
if (insertlen < 22594) {
BrotliWriteBits(depth[62], bits[62], storage_ix, storage);
BrotliWriteBits(14, insertlen - 6210, storage_ix, storage);
++histo[22];
} else {
BrotliWriteBits(depth[63], bits[63], storage_ix, storage);
BrotliWriteBits(24, insertlen - 22594, storage_ix, storage);
++histo[23];
}
}
static BROTLI_INLINE void EmitCopyLen(size_t copylen,
const uint8_t depth[128],
const uint16_t bits[128],
uint32_t histo[128],
size_t* storage_ix,
uint8_t* storage) {
if (copylen < 10) {
BrotliWriteBits(
depth[copylen + 14], bits[copylen + 14], storage_ix, storage);
++histo[copylen + 14];
} else if (copylen < 134) {
const size_t tail = copylen - 6;
const uint32_t nbits = Log2FloorNonZero(tail) - 1u;
const size_t prefix = tail >> nbits;
const size_t code = (nbits << 1) + prefix + 20;
BrotliWriteBits(depth[code], bits[code], storage_ix, storage);
BrotliWriteBits(nbits, tail - (prefix << nbits), storage_ix, storage);
++histo[code];
} else if (copylen < 2118) {
const size_t tail = copylen - 70;
const uint32_t nbits = Log2FloorNonZero(tail);
const size_t code = nbits + 28;
BrotliWriteBits(depth[code], bits[code], storage_ix, storage);
BrotliWriteBits(nbits, tail - ((size_t)1 << nbits), storage_ix, storage);
++histo[code];
} else {
BrotliWriteBits(depth[39], bits[39], storage_ix, storage);
BrotliWriteBits(24, copylen - 2118, storage_ix, storage);
++histo[47];
}
}
static BROTLI_INLINE void EmitCopyLenLastDistance(size_t copylen,
const uint8_t depth[128],
const uint16_t bits[128],
uint32_t histo[128],
size_t* storage_ix,
uint8_t* storage) {
if (copylen < 12) {
BrotliWriteBits(depth[copylen - 4], bits[copylen - 4], storage_ix, storage);
++histo[copylen - 4];
} else if (copylen < 72) {
const size_t tail = copylen - 8;
const uint32_t nbits = Log2FloorNonZero(tail) - 1;
const size_t prefix = tail >> nbits;
const size_t code = (nbits << 1) + prefix + 4;
BrotliWriteBits(depth[code], bits[code], storage_ix, storage);
BrotliWriteBits(nbits, tail - (prefix << nbits), storage_ix, storage);
++histo[code];
} else if (copylen < 136) {
const size_t tail = copylen - 8;
const size_t code = (tail >> 5) + 30;
BrotliWriteBits(depth[code], bits[code], storage_ix, storage);
BrotliWriteBits(5, tail & 31, storage_ix, storage);
BrotliWriteBits(depth[64], bits[64], storage_ix, storage);
++histo[code];
++histo[64];
} else if (copylen < 2120) {
const size_t tail = copylen - 72;
const uint32_t nbits = Log2FloorNonZero(tail);
const size_t code = nbits + 28;
BrotliWriteBits(depth[code], bits[code], storage_ix, storage);
BrotliWriteBits(nbits, tail - ((size_t)1 << nbits), storage_ix, storage);
BrotliWriteBits(depth[64], bits[64], storage_ix, storage);
++histo[code];
++histo[64];
} else {
BrotliWriteBits(depth[39], bits[39], storage_ix, storage);
BrotliWriteBits(24, copylen - 2120, storage_ix, storage);
BrotliWriteBits(depth[64], bits[64], storage_ix, storage);
++histo[47];
++histo[64];
}
}
static BROTLI_INLINE void EmitDistance(size_t distance,
const uint8_t depth[128],
const uint16_t bits[128],
uint32_t histo[128],
size_t* storage_ix, uint8_t* storage) {
const size_t d = distance + 3;
const uint32_t nbits = Log2FloorNonZero(d) - 1u;
const size_t prefix = (d >> nbits) & 1;
const size_t offset = (2 + prefix) << nbits;
const size_t distcode = 2 * (nbits - 1) + prefix + 80;
BrotliWriteBits(depth[distcode], bits[distcode], storage_ix, storage);
BrotliWriteBits(nbits, d - offset, storage_ix, storage);
++histo[distcode];
}
static BROTLI_INLINE void EmitLiterals(const uint8_t* input, const size_t len,
const uint8_t depth[256],
const uint16_t bits[256],
size_t* storage_ix, uint8_t* storage) {
size_t j;
for (j = 0; j < len; j++) {
const uint8_t lit = input[j];
BrotliWriteBits(depth[lit], bits[lit], storage_ix, storage);
}
}
/* REQUIRES: len <= 1 << 24. */
static void BrotliStoreMetaBlockHeader(
size_t len, BROTLI_BOOL is_uncompressed, size_t* storage_ix,
uint8_t* storage) {
size_t nibbles = 6;
/* ISLAST */
BrotliWriteBits(1, 0, storage_ix, storage);
if (len <= (1U << 16)) {
nibbles = 4;
} else if (len <= (1U << 20)) {
nibbles = 5;
}
BrotliWriteBits(2, nibbles - 4, storage_ix, storage);
BrotliWriteBits(nibbles * 4, len - 1, storage_ix, storage);
/* ISUNCOMPRESSED */
BrotliWriteBits(1, (uint64_t)is_uncompressed, storage_ix, storage);
}
static void UpdateBits(size_t n_bits, uint32_t bits, size_t pos,
uint8_t *array) {
while (n_bits > 0) {
size_t byte_pos = pos >> 3;
size_t n_unchanged_bits = pos & 7;
size_t n_changed_bits = BROTLI_MIN(size_t, n_bits, 8 - n_unchanged_bits);
size_t total_bits = n_unchanged_bits + n_changed_bits;
uint32_t mask =
(~((1u << total_bits) - 1u)) | ((1u << n_unchanged_bits) - 1u);
uint32_t unchanged_bits = array[byte_pos] & mask;
uint32_t changed_bits = bits & ((1u << n_changed_bits) - 1u);
array[byte_pos] =
(uint8_t)((changed_bits << n_unchanged_bits) | unchanged_bits);
n_bits -= n_changed_bits;
bits >>= n_changed_bits;
pos += n_changed_bits;
}
}
static void RewindBitPosition(const size_t new_storage_ix,
size_t* storage_ix, uint8_t* storage) {
const size_t bitpos = new_storage_ix & 7;
const size_t mask = (1u << bitpos) - 1;
storage[new_storage_ix >> 3] &= (uint8_t)mask;
*storage_ix = new_storage_ix;
}
static BROTLI_BOOL ShouldMergeBlock(
const uint8_t* data, size_t len, const uint8_t* depths) {
size_t histo[256] = { 0 };
static const size_t kSampleRate = 43;
size_t i;
for (i = 0; i < len; i += kSampleRate) {
++histo[data[i]];
}
{
const size_t total = (len + kSampleRate - 1) / kSampleRate;
double r = (FastLog2(total) + 0.5) * (double)total + 200;
for (i = 0; i < 256; ++i) {
r -= (double)histo[i] * (depths[i] + FastLog2(histo[i]));
}
return TO_BROTLI_BOOL(r >= 0.0);
}
}
/* Acceptable loss for uncompressible speedup is 2% */
#define MIN_RATIO 980
static BROTLI_INLINE BROTLI_BOOL ShouldUseUncompressedMode(
const uint8_t* metablock_start, const uint8_t* next_emit,
const size_t insertlen, const size_t literal_ratio) {
const size_t compressed = (size_t)(next_emit - metablock_start);
if (compressed * 50 > insertlen) {
return BROTLI_FALSE;
} else {
return TO_BROTLI_BOOL(literal_ratio > MIN_RATIO);
}
}
static void EmitUncompressedMetaBlock(const uint8_t* begin, const uint8_t* end,
const size_t storage_ix_start,
size_t* storage_ix, uint8_t* storage) {
const size_t len = (size_t)(end - begin);
RewindBitPosition(storage_ix_start, storage_ix, storage);
BrotliStoreMetaBlockHeader(len, 1, storage_ix, storage);
*storage_ix = (*storage_ix + 7u) & ~7u;
memcpy(&storage[*storage_ix >> 3], begin, len);
*storage_ix += len << 3;
storage[*storage_ix >> 3] = 0;
}
static uint32_t kCmdHistoSeed[128] = {
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 0, 0, 0, 0,
};
static BROTLI_INLINE void BrotliCompressFragmentFastImpl(
MemoryManager* m, const uint8_t* input, size_t input_size,
BROTLI_BOOL is_last, int* table, size_t table_bits, uint8_t cmd_depth[128],
uint16_t cmd_bits[128], size_t* cmd_code_numbits, uint8_t* cmd_code,
size_t* storage_ix, uint8_t* storage) {
uint32_t cmd_histo[128];
const uint8_t* ip_end;
/* "next_emit" is a pointer to the first byte that is not covered by a
previous copy. Bytes between "next_emit" and the start of the next copy or
the end of the input will be emitted as literal bytes. */
const uint8_t* next_emit = input;
/* Save the start of the first block for position and distance computations.
*/
const uint8_t* base_ip = input;
static const size_t kFirstBlockSize = 3 << 15;
static const size_t kMergeBlockSize = 1 << 16;
const size_t kInputMarginBytes = BROTLI_WINDOW_GAP;
const size_t kMinMatchLen = 5;
const uint8_t* metablock_start = input;
size_t block_size = BROTLI_MIN(size_t, input_size, kFirstBlockSize);
size_t total_block_size = block_size;
/* Save the bit position of the MLEN field of the meta-block header, so that
we can update it later if we decide to extend this meta-block. */
size_t mlen_storage_ix = *storage_ix + 3;
uint8_t lit_depth[256];
uint16_t lit_bits[256];
size_t literal_ratio;
const uint8_t* ip;
int last_distance;
const size_t shift = 64u - table_bits;
BrotliStoreMetaBlockHeader(block_size, 0, storage_ix, storage);
/* No block splits, no contexts. */
BrotliWriteBits(13, 0, storage_ix, storage);
literal_ratio = BuildAndStoreLiteralPrefixCode(
m, input, block_size, lit_depth, lit_bits, storage_ix, storage);
if (BROTLI_IS_OOM(m)) return;
{
/* Store the pre-compressed command and distance prefix codes. */
size_t i;
for (i = 0; i + 7 < *cmd_code_numbits; i += 8) {
BrotliWriteBits(8, cmd_code[i >> 3], storage_ix, storage);
}
}
BrotliWriteBits(*cmd_code_numbits & 7, cmd_code[*cmd_code_numbits >> 3],
storage_ix, storage);
emit_commands:
/* Initialize the command and distance histograms. We will gather
statistics of command and distance codes during the processing
of this block and use it to update the command and distance
prefix codes for the next block. */
memcpy(cmd_histo, kCmdHistoSeed, sizeof(kCmdHistoSeed));
/* "ip" is the input pointer. */
ip = input;
last_distance = -1;
ip_end = input + block_size;
if (BROTLI_PREDICT_TRUE(block_size >= kInputMarginBytes)) {
/* For the last block, we need to keep a 16 bytes margin so that we can be
sure that all distances are at most window size - 16.
For all other blocks, we only need to keep a margin of 5 bytes so that
we don't go over the block size with a copy. */
const size_t len_limit = BROTLI_MIN(size_t, block_size - kMinMatchLen,
input_size - kInputMarginBytes);
const uint8_t* ip_limit = input + len_limit;
uint32_t next_hash;
for (next_hash = Hash(++ip, shift); ; ) {
/* Step 1: Scan forward in the input looking for a 5-byte-long match.
If we get close to exhausting the input then goto emit_remainder.
Heuristic match skipping: If 32 bytes are scanned with no matches
found, start looking only at every other byte. If 32 more bytes are
scanned, look at every third byte, etc.. When a match is found,
immediately go back to looking at every byte. This is a small loss
(~5% performance, ~0.1% density) for compressible data due to more
bookkeeping, but for non-compressible data (such as JPEG) it's a huge
win since the compressor quickly "realizes" the data is incompressible
and doesn't bother looking for matches everywhere.
The "skip" variable keeps track of how many bytes there are since the
last match; dividing it by 32 (i.e. right-shifting by five) gives the
number of bytes to move ahead for each iteration. */
uint32_t skip = 32;
const uint8_t* next_ip = ip;
const uint8_t* candidate;
BROTLI_DCHECK(next_emit < ip);
trawl:
do {
uint32_t hash = next_hash;
uint32_t bytes_between_hash_lookups = skip++ >> 5;
BROTLI_DCHECK(hash == Hash(next_ip, shift));
ip = next_ip;
next_ip = ip + bytes_between_hash_lookups;
if (BROTLI_PREDICT_FALSE(next_ip > ip_limit)) {
goto emit_remainder;
}
next_hash = Hash(next_ip, shift);
candidate = ip - last_distance;
if (IsMatch(ip, candidate)) {
if (BROTLI_PREDICT_TRUE(candidate < ip)) {
table[hash] = (int)(ip - base_ip);
break;
}
}
candidate = base_ip + table[hash];
BROTLI_DCHECK(candidate >= base_ip);
BROTLI_DCHECK(candidate < ip);
table[hash] = (int)(ip - base_ip);
} while (BROTLI_PREDICT_TRUE(!IsMatch(ip, candidate)));
/* Check copy distance. If candidate is not feasible, continue search.
Checking is done outside of hot loop to reduce overhead. */
if (ip - candidate > MAX_DISTANCE) goto trawl;
/* Step 2: Emit the found match together with the literal bytes from
"next_emit" to the bit stream, and then see if we can find a next match
immediately afterwards. Repeat until we find no match for the input
without emitting some literal bytes. */
{
/* We have a 5-byte match at ip, and we need to emit bytes in
[next_emit, ip). */
const uint8_t* base = ip;
size_t matched = 5 + FindMatchLengthWithLimit(
candidate + 5, ip + 5, (size_t)(ip_end - ip) - 5);
int distance = (int)(base - candidate); /* > 0 */
size_t insert = (size_t)(base - next_emit);
ip += matched;
BROTLI_DCHECK(0 == memcmp(base, candidate, matched));
if (BROTLI_PREDICT_TRUE(insert < 6210)) {
EmitInsertLen(insert, cmd_depth, cmd_bits, cmd_histo,
storage_ix, storage);
} else if (ShouldUseUncompressedMode(metablock_start, next_emit, insert,
literal_ratio)) {
EmitUncompressedMetaBlock(metablock_start, base, mlen_storage_ix - 3,
storage_ix, storage);
input_size -= (size_t)(base - input);
input = base;
next_emit = input;
goto next_block;
} else {
EmitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo,
storage_ix, storage);
}
EmitLiterals(next_emit, insert, lit_depth, lit_bits,
storage_ix, storage);
if (distance == last_distance) {
BrotliWriteBits(cmd_depth[64], cmd_bits[64], storage_ix, storage);
++cmd_histo[64];
} else {
EmitDistance((size_t)distance, cmd_depth, cmd_bits,
cmd_histo, storage_ix, storage);
last_distance = distance;
}
EmitCopyLenLastDistance(matched, cmd_depth, cmd_bits, cmd_histo,
storage_ix, storage);
next_emit = ip;
if (BROTLI_PREDICT_FALSE(ip >= ip_limit)) {
goto emit_remainder;
}
/* We could immediately start working at ip now, but to improve
compression we first update "table" with the hashes of some positions
within the last copy. */
{
uint64_t input_bytes = BROTLI_UNALIGNED_LOAD64LE(ip - 3);
uint32_t prev_hash = HashBytesAtOffset(input_bytes, 0, shift);
uint32_t cur_hash = HashBytesAtOffset(input_bytes, 3, shift);
table[prev_hash] = (int)(ip - base_ip - 3);
prev_hash = HashBytesAtOffset(input_bytes, 1, shift);
table[prev_hash] = (int)(ip - base_ip - 2);
prev_hash = HashBytesAtOffset(input_bytes, 2, shift);
table[prev_hash] = (int)(ip - base_ip - 1);
candidate = base_ip + table[cur_hash];
table[cur_hash] = (int)(ip - base_ip);
}
}
while (IsMatch(ip, candidate)) {
/* We have a 5-byte match at ip, and no need to emit any literal bytes
prior to ip. */
const uint8_t* base = ip;
size_t matched = 5 + FindMatchLengthWithLimit(
candidate + 5, ip + 5, (size_t)(ip_end - ip) - 5);
if (ip - candidate > MAX_DISTANCE) break;
ip += matched;
last_distance = (int)(base - candidate); /* > 0 */
BROTLI_DCHECK(0 == memcmp(base, candidate, matched));
EmitCopyLen(matched, cmd_depth, cmd_bits, cmd_histo,
storage_ix, storage);
EmitDistance((size_t)last_distance, cmd_depth, cmd_bits,
cmd_histo, storage_ix, storage);
next_emit = ip;
if (BROTLI_PREDICT_FALSE(ip >= ip_limit)) {
goto emit_remainder;
}
/* We could immediately start working at ip now, but to improve
compression we first update "table" with the hashes of some positions
within the last copy. */
{
uint64_t input_bytes = BROTLI_UNALIGNED_LOAD64LE(ip - 3);
uint32_t prev_hash = HashBytesAtOffset(input_bytes, 0, shift);
uint32_t cur_hash = HashBytesAtOffset(input_bytes, 3, shift);
table[prev_hash] = (int)(ip - base_ip - 3);
prev_hash = HashBytesAtOffset(input_bytes, 1, shift);
table[prev_hash] = (int)(ip - base_ip - 2);
prev_hash = HashBytesAtOffset(input_bytes, 2, shift);
table[prev_hash] = (int)(ip - base_ip - 1);
candidate = base_ip + table[cur_hash];
table[cur_hash] = (int)(ip - base_ip);
}
}
next_hash = Hash(++ip, shift);
}
}
emit_remainder:
BROTLI_DCHECK(next_emit <= ip_end);
input += block_size;
input_size -= block_size;
block_size = BROTLI_MIN(size_t, input_size, kMergeBlockSize);
/* Decide if we want to continue this meta-block instead of emitting the
last insert-only command. */
if (input_size > 0 &&
total_block_size + block_size <= (1 << 20) &&
ShouldMergeBlock(input, block_size, lit_depth)) {
BROTLI_DCHECK(total_block_size > (1 << 16));
/* Update the size of the current meta-block and continue emitting commands.
We can do this because the current size and the new size both have 5
nibbles. */
total_block_size += block_size;
UpdateBits(20, (uint32_t)(total_block_size - 1), mlen_storage_ix, storage);
goto emit_commands;
}
/* Emit the remaining bytes as literals. */
if (next_emit < ip_end) {
const size_t insert = (size_t)(ip_end - next_emit);
if (BROTLI_PREDICT_TRUE(insert < 6210)) {
EmitInsertLen(insert, cmd_depth, cmd_bits, cmd_histo,
storage_ix, storage);
EmitLiterals(next_emit, insert, lit_depth, lit_bits, storage_ix, storage);
} else if (ShouldUseUncompressedMode(metablock_start, next_emit, insert,
literal_ratio)) {
EmitUncompressedMetaBlock(metablock_start, ip_end, mlen_storage_ix - 3,
storage_ix, storage);
} else {
EmitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo,
storage_ix, storage);
EmitLiterals(next_emit, insert, lit_depth, lit_bits,
storage_ix, storage);
}
}
next_emit = ip_end;
next_block:
/* If we have more data, write a new meta-block header and prefix codes and
then continue emitting commands. */
if (input_size > 0) {
metablock_start = input;
block_size = BROTLI_MIN(size_t, input_size, kFirstBlockSize);
total_block_size = block_size;
/* Save the bit position of the MLEN field of the meta-block header, so that
we can update it later if we decide to extend this meta-block. */
mlen_storage_ix = *storage_ix + 3;
BrotliStoreMetaBlockHeader(block_size, 0, storage_ix, storage);
/* No block splits, no contexts. */
BrotliWriteBits(13, 0, storage_ix, storage);
literal_ratio = BuildAndStoreLiteralPrefixCode(
m, input, block_size, lit_depth, lit_bits, storage_ix, storage);
if (BROTLI_IS_OOM(m)) return;
BuildAndStoreCommandPrefixCode(cmd_histo, cmd_depth, cmd_bits,
storage_ix, storage);
goto emit_commands;
}
if (!is_last) {
/* If this is not the last block, update the command and distance prefix
codes for the next block and store the compressed forms. */
cmd_code[0] = 0;
*cmd_code_numbits = 0;
BuildAndStoreCommandPrefixCode(cmd_histo, cmd_depth, cmd_bits,
cmd_code_numbits, cmd_code);
}
}
#define FOR_TABLE_BITS_(X) X(9) X(11) X(13) X(15)
#define BAKE_METHOD_PARAM_(B) \
static BROTLI_NOINLINE void BrotliCompressFragmentFastImpl ## B( \
MemoryManager* m, const uint8_t* input, size_t input_size, \
BROTLI_BOOL is_last, int* table, uint8_t cmd_depth[128], \
uint16_t cmd_bits[128], size_t* cmd_code_numbits, uint8_t* cmd_code, \
size_t* storage_ix, uint8_t* storage) { \
BrotliCompressFragmentFastImpl(m, input, input_size, is_last, table, B, \
cmd_depth, cmd_bits, cmd_code_numbits, cmd_code, storage_ix, storage); \
}
FOR_TABLE_BITS_(BAKE_METHOD_PARAM_)
#undef BAKE_METHOD_PARAM_
void BrotliCompressFragmentFast(
MemoryManager* m, const uint8_t* input, size_t input_size,
BROTLI_BOOL is_last, int* table, size_t table_size, uint8_t cmd_depth[128],
uint16_t cmd_bits[128], size_t* cmd_code_numbits, uint8_t* cmd_code,
size_t* storage_ix, uint8_t* storage) {
const size_t initial_storage_ix = *storage_ix;
const size_t table_bits = Log2FloorNonZero(table_size);
if (input_size == 0) {
BROTLI_DCHECK(is_last);
BrotliWriteBits(1, 1, storage_ix, storage); /* islast */
BrotliWriteBits(1, 1, storage_ix, storage); /* isempty */
*storage_ix = (*storage_ix + 7u) & ~7u;
return;
}
switch (table_bits) {
#define CASE_(B) \
case B: \
BrotliCompressFragmentFastImpl ## B( \
m, input, input_size, is_last, table, cmd_depth, cmd_bits, \
cmd_code_numbits, cmd_code, storage_ix, storage); \
break;
FOR_TABLE_BITS_(CASE_)
#undef CASE_
default: BROTLI_DCHECK(0); break;
}
/* If output is larger than single uncompressed block, rewrite it. */
if (*storage_ix - initial_storage_ix > 31 + (input_size << 3)) {
EmitUncompressedMetaBlock(input, input + input_size, initial_storage_ix,
storage_ix, storage);
}
if (is_last) {
BrotliWriteBits(1, 1, storage_ix, storage); /* islast */
BrotliWriteBits(1, 1, storage_ix, storage); /* isempty */
*storage_ix = (*storage_ix + 7u) & ~7u;
}
}
#undef FOR_TABLE_BITS_
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif

View File

@ -1,61 +0,0 @@
/* Copyright 2015 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Function for fast encoding of an input fragment, independently from the input
history. This function uses one-pass processing: when we find a backward
match, we immediately emit the corresponding command and literal codes to
the bit stream. */
#ifndef BROTLI_ENC_COMPRESS_FRAGMENT_H_
#define BROTLI_ENC_COMPRESS_FRAGMENT_H_
#include "../common/platform.h"
#include <brotli/types.h>
#include "./memory.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
/* Compresses "input" string to the "*storage" buffer as one or more complete
meta-blocks, and updates the "*storage_ix" bit position.
If "is_last" is 1, emits an additional empty last meta-block.
"cmd_depth" and "cmd_bits" contain the command and distance prefix codes
(see comment in encode.h) used for the encoding of this input fragment.
If "is_last" is 0, they are updated to reflect the statistics
of this input fragment, to be used for the encoding of the next fragment.
"*cmd_code_numbits" is the number of bits of the compressed representation
of the command and distance prefix codes, and "cmd_code" is an array of
at least "(*cmd_code_numbits + 7) >> 3" size that contains the compressed
command and distance prefix codes. If "is_last" is 0, these are also
updated to represent the updated "cmd_depth" and "cmd_bits".
REQUIRES: "input_size" is greater than zero, or "is_last" is 1.
REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24).
REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
REQUIRES: "table_size" is an odd (9, 11, 13, 15) power of two
OUTPUT: maximal copy distance <= |input_size|
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */
BROTLI_INTERNAL void BrotliCompressFragmentFast(MemoryManager* m,
const uint8_t* input,
size_t input_size,
BROTLI_BOOL is_last,
int* table, size_t table_size,
uint8_t cmd_depth[128],
uint16_t cmd_bits[128],
size_t* cmd_code_numbits,
uint8_t* cmd_code,
size_t* storage_ix,
uint8_t* storage);
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
#endif /* BROTLI_ENC_COMPRESS_FRAGMENT_H_ */

View File

@ -1,611 +0,0 @@
/* Copyright 2015 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Function for fast encoding of an input fragment, independently from the input
history. This function uses two-pass processing: in the first pass we save
the found backward matches and literal bytes into a buffer, and in the
second pass we emit them into the bit stream using prefix codes built based
on the actual command and literal byte histograms. */
#include "./compress_fragment_two_pass.h"
#include <string.h> /* memcmp, memcpy, memset */
#include "../common/constants.h"
#include "../common/platform.h"
#include <brotli/types.h>
#include "./bit_cost.h"
#include "./brotli_bit_stream.h"
#include "./entropy_encode.h"
#include "./fast_log.h"
#include "./find_match_length.h"
#include "./memory.h"
#include "./write_bits.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#define MAX_DISTANCE (long)BROTLI_MAX_BACKWARD_LIMIT(18)
/* kHashMul32 multiplier has these properties:
* The multiplier must be odd. Otherwise we may lose the highest bit.
* No long streaks of ones or zeros.
* There is no effort to ensure that it is a prime, the oddity is enough
for this use.
* The number has been tuned heuristically against compression benchmarks. */
static const uint32_t kHashMul32 = 0x1e35a7bd;
static BROTLI_INLINE uint32_t Hash(const uint8_t* p, size_t shift) {
const uint64_t h = (BROTLI_UNALIGNED_LOAD64LE(p) << 16) * kHashMul32;
return (uint32_t)(h >> shift);
}
static BROTLI_INLINE uint32_t HashBytesAtOffset(
uint64_t v, int offset, size_t shift) {
BROTLI_DCHECK(offset >= 0);
BROTLI_DCHECK(offset <= 2);
{
const uint64_t h = ((v >> (8 * offset)) << 16) * kHashMul32;
return (uint32_t)(h >> shift);
}
}
static BROTLI_INLINE BROTLI_BOOL IsMatch(const uint8_t* p1, const uint8_t* p2) {
return TO_BROTLI_BOOL(
BrotliUnalignedRead32(p1) == BrotliUnalignedRead32(p2) &&
p1[4] == p2[4] &&
p1[5] == p2[5]);
}
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and
"bits" based on "histogram" and stores it into the bit stream. */
static void BuildAndStoreCommandPrefixCode(
const uint32_t histogram[128],
uint8_t depth[128], uint16_t bits[128],
size_t* storage_ix, uint8_t* storage) {
/* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */
HuffmanTree tree[129];
uint8_t cmd_depth[BROTLI_NUM_COMMAND_SYMBOLS] = { 0 };
uint16_t cmd_bits[64];
BrotliCreateHuffmanTree(histogram, 64, 15, tree, depth);
BrotliCreateHuffmanTree(&histogram[64], 64, 14, tree, &depth[64]);
/* We have to jump through a few hoops here in order to compute
the command bits because the symbols are in a different order than in
the full alphabet. This looks complicated, but having the symbols
in this order in the command bits saves a few branches in the Emit*
functions. */
memcpy(cmd_depth, depth + 24, 24);
memcpy(cmd_depth + 24, depth, 8);
memcpy(cmd_depth + 32, depth + 48, 8);
memcpy(cmd_depth + 40, depth + 8, 8);
memcpy(cmd_depth + 48, depth + 56, 8);
memcpy(cmd_depth + 56, depth + 16, 8);
BrotliConvertBitDepthsToSymbols(cmd_depth, 64, cmd_bits);
memcpy(bits, cmd_bits + 24, 16);
memcpy(bits + 8, cmd_bits + 40, 16);
memcpy(bits + 16, cmd_bits + 56, 16);
memcpy(bits + 24, cmd_bits, 48);
memcpy(bits + 48, cmd_bits + 32, 16);
memcpy(bits + 56, cmd_bits + 48, 16);
BrotliConvertBitDepthsToSymbols(&depth[64], 64, &bits[64]);
{
/* Create the bit length array for the full command alphabet. */
size_t i;
memset(cmd_depth, 0, 64); /* only 64 first values were used */
memcpy(cmd_depth, depth + 24, 8);
memcpy(cmd_depth + 64, depth + 32, 8);
memcpy(cmd_depth + 128, depth + 40, 8);
memcpy(cmd_depth + 192, depth + 48, 8);
memcpy(cmd_depth + 384, depth + 56, 8);
for (i = 0; i < 8; ++i) {
cmd_depth[128 + 8 * i] = depth[i];
cmd_depth[256 + 8 * i] = depth[8 + i];
cmd_depth[448 + 8 * i] = depth[16 + i];
}
BrotliStoreHuffmanTree(
cmd_depth, BROTLI_NUM_COMMAND_SYMBOLS, tree, storage_ix, storage);
}
BrotliStoreHuffmanTree(&depth[64], 64, tree, storage_ix, storage);
}
static BROTLI_INLINE void EmitInsertLen(
uint32_t insertlen, uint32_t** commands) {
if (insertlen < 6) {
**commands = insertlen;
} else if (insertlen < 130) {
const uint32_t tail = insertlen - 2;
const uint32_t nbits = Log2FloorNonZero(tail) - 1u;
const uint32_t prefix = tail >> nbits;
const uint32_t inscode = (nbits << 1) + prefix + 2;
const uint32_t extra = tail - (prefix << nbits);
**commands = inscode | (extra << 8);
} else if (insertlen < 2114) {
const uint32_t tail = insertlen - 66;
const uint32_t nbits = Log2FloorNonZero(tail);
const uint32_t code = nbits + 10;
const uint32_t extra = tail - (1u << nbits);
**commands = code | (extra << 8);
} else if (insertlen < 6210) {
const uint32_t extra = insertlen - 2114;
**commands = 21 | (extra << 8);
} else if (insertlen < 22594) {
const uint32_t extra = insertlen - 6210;
**commands = 22 | (extra << 8);
} else {
const uint32_t extra = insertlen - 22594;
**commands = 23 | (extra << 8);
}
++(*commands);
}
static BROTLI_INLINE void EmitCopyLen(size_t copylen, uint32_t** commands) {
if (copylen < 10) {
**commands = (uint32_t)(copylen + 38);
} else if (copylen < 134) {
const size_t tail = copylen - 6;
const size_t nbits = Log2FloorNonZero(tail) - 1;
const size_t prefix = tail >> nbits;
const size_t code = (nbits << 1) + prefix + 44;
const size_t extra = tail - (prefix << nbits);
**commands = (uint32_t)(code | (extra << 8));
} else if (copylen < 2118) {
const size_t tail = copylen - 70;
const size_t nbits = Log2FloorNonZero(tail);
const size_t code = nbits + 52;
const size_t extra = tail - ((size_t)1 << nbits);
**commands = (uint32_t)(code | (extra << 8));
} else {
const size_t extra = copylen - 2118;
**commands = (uint32_t)(63 | (extra << 8));
}
++(*commands);
}
static BROTLI_INLINE void EmitCopyLenLastDistance(
size_t copylen, uint32_t** commands) {
if (copylen < 12) {
**commands = (uint32_t)(copylen + 20);
++(*commands);
} else if (copylen < 72) {
const size_t tail = copylen - 8;
const size_t nbits = Log2FloorNonZero(tail) - 1;
const size_t prefix = tail >> nbits;
const size_t code = (nbits << 1) + prefix + 28;
const size_t extra = tail - (prefix << nbits);
**commands = (uint32_t)(code | (extra << 8));
++(*commands);
} else if (copylen < 136) {
const size_t tail = copylen - 8;
const size_t code = (tail >> 5) + 54;
const size_t extra = tail & 31;
**commands = (uint32_t)(code | (extra << 8));
++(*commands);
**commands = 64;
++(*commands);
} else if (copylen < 2120) {
const size_t tail = copylen - 72;
const size_t nbits = Log2FloorNonZero(tail);
const size_t code = nbits + 52;
const size_t extra = tail - ((size_t)1 << nbits);
**commands = (uint32_t)(code | (extra << 8));
++(*commands);
**commands = 64;
++(*commands);
} else {
const size_t extra = copylen - 2120;
**commands = (uint32_t)(63 | (extra << 8));
++(*commands);
**commands = 64;
++(*commands);
}
}
static BROTLI_INLINE void EmitDistance(uint32_t distance, uint32_t** commands) {
uint32_t d = distance + 3;
uint32_t nbits = Log2FloorNonZero(d) - 1;
const uint32_t prefix = (d >> nbits) & 1;
const uint32_t offset = (2 + prefix) << nbits;
const uint32_t distcode = 2 * (nbits - 1) + prefix + 80;
uint32_t extra = d - offset;
**commands = distcode | (extra << 8);
++(*commands);
}
/* REQUIRES: len <= 1 << 24. */
static void BrotliStoreMetaBlockHeader(
size_t len, BROTLI_BOOL is_uncompressed, size_t* storage_ix,
uint8_t* storage) {
size_t nibbles = 6;
/* ISLAST */
BrotliWriteBits(1, 0, storage_ix, storage);
if (len <= (1U << 16)) {
nibbles = 4;
} else if (len <= (1U << 20)) {
nibbles = 5;
}
BrotliWriteBits(2, nibbles - 4, storage_ix, storage);
BrotliWriteBits(nibbles * 4, len - 1, storage_ix, storage);
/* ISUNCOMPRESSED */
BrotliWriteBits(1, (uint64_t)is_uncompressed, storage_ix, storage);
}
static BROTLI_INLINE void CreateCommands(const uint8_t* input,
size_t block_size, size_t input_size, const uint8_t* base_ip, int* table,
size_t table_bits, uint8_t** literals, uint32_t** commands) {
/* "ip" is the input pointer. */
const uint8_t* ip = input;
const size_t shift = 64u - table_bits;
const uint8_t* ip_end = input + block_size;
/* "next_emit" is a pointer to the first byte that is not covered by a
previous copy. Bytes between "next_emit" and the start of the next copy or
the end of the input will be emitted as literal bytes. */
const uint8_t* next_emit = input;
int last_distance = -1;
const size_t kInputMarginBytes = BROTLI_WINDOW_GAP;
const size_t kMinMatchLen = 6;
if (BROTLI_PREDICT_TRUE(block_size >= kInputMarginBytes)) {
/* For the last block, we need to keep a 16 bytes margin so that we can be
sure that all distances are at most window size - 16.
For all other blocks, we only need to keep a margin of 5 bytes so that
we don't go over the block size with a copy. */
const size_t len_limit = BROTLI_MIN(size_t, block_size - kMinMatchLen,
input_size - kInputMarginBytes);
const uint8_t* ip_limit = input + len_limit;
uint32_t next_hash;
for (next_hash = Hash(++ip, shift); ; ) {
/* Step 1: Scan forward in the input looking for a 6-byte-long match.
If we get close to exhausting the input then goto emit_remainder.
Heuristic match skipping: If 32 bytes are scanned with no matches
found, start looking only at every other byte. If 32 more bytes are
scanned, look at every third byte, etc.. When a match is found,
immediately go back to looking at every byte. This is a small loss
(~5% performance, ~0.1% density) for compressible data due to more
bookkeeping, but for non-compressible data (such as JPEG) it's a huge
win since the compressor quickly "realizes" the data is incompressible
and doesn't bother looking for matches everywhere.
The "skip" variable keeps track of how many bytes there are since the
last match; dividing it by 32 (ie. right-shifting by five) gives the
number of bytes to move ahead for each iteration. */
uint32_t skip = 32;
const uint8_t* next_ip = ip;
const uint8_t* candidate;
BROTLI_DCHECK(next_emit < ip);
trawl:
do {
uint32_t hash = next_hash;
uint32_t bytes_between_hash_lookups = skip++ >> 5;
ip = next_ip;
BROTLI_DCHECK(hash == Hash(ip, shift));
next_ip = ip + bytes_between_hash_lookups;
if (BROTLI_PREDICT_FALSE(next_ip > ip_limit)) {
goto emit_remainder;
}
next_hash = Hash(next_ip, shift);
candidate = ip - last_distance;
if (IsMatch(ip, candidate)) {
if (BROTLI_PREDICT_TRUE(candidate < ip)) {
table[hash] = (int)(ip - base_ip);
break;
}
}
candidate = base_ip + table[hash];
BROTLI_DCHECK(candidate >= base_ip);
BROTLI_DCHECK(candidate < ip);
table[hash] = (int)(ip - base_ip);
} while (BROTLI_PREDICT_TRUE(!IsMatch(ip, candidate)));
/* Check copy distance. If candidate is not feasible, continue search.
Checking is done outside of hot loop to reduce overhead. */
if (ip - candidate > MAX_DISTANCE) goto trawl;
/* Step 2: Emit the found match together with the literal bytes from
"next_emit", and then see if we can find a next match immediately
afterwards. Repeat until we find no match for the input
without emitting some literal bytes. */
{
/* We have a 6-byte match at ip, and we need to emit bytes in
[next_emit, ip). */
const uint8_t* base = ip;
size_t matched = 6 + FindMatchLengthWithLimit(
candidate + 6, ip + 6, (size_t)(ip_end - ip) - 6);
int distance = (int)(base - candidate); /* > 0 */
int insert = (int)(base - next_emit);
ip += matched;
BROTLI_DCHECK(0 == memcmp(base, candidate, matched));
EmitInsertLen((uint32_t)insert, commands);
memcpy(*literals, next_emit, (size_t)insert);
*literals += insert;
if (distance == last_distance) {
**commands = 64;
++(*commands);
} else {
EmitDistance((uint32_t)distance, commands);
last_distance = distance;
}
EmitCopyLenLastDistance(matched, commands);
next_emit = ip;
if (BROTLI_PREDICT_FALSE(ip >= ip_limit)) {
goto emit_remainder;
}
{
/* We could immediately start working at ip now, but to improve
compression we first update "table" with the hashes of some
positions within the last copy. */
uint64_t input_bytes = BROTLI_UNALIGNED_LOAD64LE(ip - 5);
uint32_t prev_hash = HashBytesAtOffset(input_bytes, 0, shift);
uint32_t cur_hash;
table[prev_hash] = (int)(ip - base_ip - 5);
prev_hash = HashBytesAtOffset(input_bytes, 1, shift);
table[prev_hash] = (int)(ip - base_ip - 4);
prev_hash = HashBytesAtOffset(input_bytes, 2, shift);
table[prev_hash] = (int)(ip - base_ip - 3);
input_bytes = BROTLI_UNALIGNED_LOAD64LE(ip - 2);
cur_hash = HashBytesAtOffset(input_bytes, 2, shift);
prev_hash = HashBytesAtOffset(input_bytes, 0, shift);
table[prev_hash] = (int)(ip - base_ip - 2);
prev_hash = HashBytesAtOffset(input_bytes, 1, shift);
table[prev_hash] = (int)(ip - base_ip - 1);
candidate = base_ip + table[cur_hash];
table[cur_hash] = (int)(ip - base_ip);
}
}
while (ip - candidate <= MAX_DISTANCE && IsMatch(ip, candidate)) {
/* We have a 6-byte match at ip, and no need to emit any
literal bytes prior to ip. */
const uint8_t* base = ip;
size_t matched = 6 + FindMatchLengthWithLimit(
candidate + 6, ip + 6, (size_t)(ip_end - ip) - 6);
ip += matched;
last_distance = (int)(base - candidate); /* > 0 */
BROTLI_DCHECK(0 == memcmp(base, candidate, matched));
EmitCopyLen(matched, commands);
EmitDistance((uint32_t)last_distance, commands);
next_emit = ip;
if (BROTLI_PREDICT_FALSE(ip >= ip_limit)) {
goto emit_remainder;
}
{
/* We could immediately start working at ip now, but to improve
compression we first update "table" with the hashes of some
positions within the last copy. */
uint64_t input_bytes = BROTLI_UNALIGNED_LOAD64LE(ip - 5);
uint32_t prev_hash = HashBytesAtOffset(input_bytes, 0, shift);
uint32_t cur_hash;
table[prev_hash] = (int)(ip - base_ip - 5);
prev_hash = HashBytesAtOffset(input_bytes, 1, shift);
table[prev_hash] = (int)(ip - base_ip - 4);
prev_hash = HashBytesAtOffset(input_bytes, 2, shift);
table[prev_hash] = (int)(ip - base_ip - 3);
input_bytes = BROTLI_UNALIGNED_LOAD64LE(ip - 2);
cur_hash = HashBytesAtOffset(input_bytes, 2, shift);
prev_hash = HashBytesAtOffset(input_bytes, 0, shift);
table[prev_hash] = (int)(ip - base_ip - 2);
prev_hash = HashBytesAtOffset(input_bytes, 1, shift);
table[prev_hash] = (int)(ip - base_ip - 1);
candidate = base_ip + table[cur_hash];
table[cur_hash] = (int)(ip - base_ip);
}
}
next_hash = Hash(++ip, shift);
}
}
emit_remainder:
BROTLI_DCHECK(next_emit <= ip_end);
/* Emit the remaining bytes as literals. */
if (next_emit < ip_end) {
const uint32_t insert = (uint32_t)(ip_end - next_emit);
EmitInsertLen(insert, commands);
memcpy(*literals, next_emit, insert);
*literals += insert;
}
}
static void StoreCommands(MemoryManager* m,
const uint8_t* literals, const size_t num_literals,
const uint32_t* commands, const size_t num_commands,
size_t* storage_ix, uint8_t* storage) {
static const uint32_t kNumExtraBits[128] = {
0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 12, 14, 24,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 24,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16,
17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24,
};
static const uint32_t kInsertOffset[24] = {
0, 1, 2, 3, 4, 5, 6, 8, 10, 14, 18, 26, 34, 50, 66, 98, 130, 194, 322, 578,
1090, 2114, 6210, 22594,
};
uint8_t lit_depths[256];
uint16_t lit_bits[256];
uint32_t lit_histo[256] = { 0 };
uint8_t cmd_depths[128] = { 0 };
uint16_t cmd_bits[128] = { 0 };
uint32_t cmd_histo[128] = { 0 };
size_t i;
for (i = 0; i < num_literals; ++i) {
++lit_histo[literals[i]];
}
BrotliBuildAndStoreHuffmanTreeFast(m, lit_histo, num_literals,
/* max_bits = */ 8,
lit_depths, lit_bits,
storage_ix, storage);
if (BROTLI_IS_OOM(m)) return;
for (i = 0; i < num_commands; ++i) {
const uint32_t code = commands[i] & 0xFF;
BROTLI_DCHECK(code < 128);
++cmd_histo[code];
}
cmd_histo[1] += 1;
cmd_histo[2] += 1;
cmd_histo[64] += 1;
cmd_histo[84] += 1;
BuildAndStoreCommandPrefixCode(cmd_histo, cmd_depths, cmd_bits,
storage_ix, storage);
for (i = 0; i < num_commands; ++i) {
const uint32_t cmd = commands[i];
const uint32_t code = cmd & 0xFF;
const uint32_t extra = cmd >> 8;
BROTLI_DCHECK(code < 128);
BrotliWriteBits(cmd_depths[code], cmd_bits[code], storage_ix, storage);
BrotliWriteBits(kNumExtraBits[code], extra, storage_ix, storage);
if (code < 24) {
const uint32_t insert = kInsertOffset[code] + extra;
uint32_t j;
for (j = 0; j < insert; ++j) {
const uint8_t lit = *literals;
BrotliWriteBits(lit_depths[lit], lit_bits[lit], storage_ix, storage);
++literals;
}
}
}
}
/* Acceptable loss for uncompressible speedup is 2% */
#define MIN_RATIO 0.98
#define SAMPLE_RATE 43
static BROTLI_BOOL ShouldCompress(
const uint8_t* input, size_t input_size, size_t num_literals) {
double corpus_size = (double)input_size;
if (num_literals < MIN_RATIO * corpus_size) {
return BROTLI_TRUE;
} else {
uint32_t literal_histo[256] = { 0 };
const double max_total_bit_cost = corpus_size * 8 * MIN_RATIO / SAMPLE_RATE;
size_t i;
for (i = 0; i < input_size; i += SAMPLE_RATE) {
++literal_histo[input[i]];
}
return TO_BROTLI_BOOL(BitsEntropy(literal_histo, 256) < max_total_bit_cost);
}
}
static void RewindBitPosition(const size_t new_storage_ix,
size_t* storage_ix, uint8_t* storage) {
const size_t bitpos = new_storage_ix & 7;
const size_t mask = (1u << bitpos) - 1;
storage[new_storage_ix >> 3] &= (uint8_t)mask;
*storage_ix = new_storage_ix;
}
static void EmitUncompressedMetaBlock(const uint8_t* input, size_t input_size,
size_t* storage_ix, uint8_t* storage) {
BrotliStoreMetaBlockHeader(input_size, 1, storage_ix, storage);
*storage_ix = (*storage_ix + 7u) & ~7u;
memcpy(&storage[*storage_ix >> 3], input, input_size);
*storage_ix += input_size << 3;
storage[*storage_ix >> 3] = 0;
}
static BROTLI_INLINE void BrotliCompressFragmentTwoPassImpl(
MemoryManager* m, const uint8_t* input, size_t input_size,
BROTLI_BOOL is_last, uint32_t* command_buf, uint8_t* literal_buf,
int* table, size_t table_bits, size_t* storage_ix, uint8_t* storage) {
/* Save the start of the first block for position and distance computations.
*/
const uint8_t* base_ip = input;
BROTLI_UNUSED(is_last);
while (input_size > 0) {
size_t block_size =
BROTLI_MIN(size_t, input_size, kCompressFragmentTwoPassBlockSize);
uint32_t* commands = command_buf;
uint8_t* literals = literal_buf;
size_t num_literals;
CreateCommands(input, block_size, input_size, base_ip, table, table_bits,
&literals, &commands);
num_literals = (size_t)(literals - literal_buf);
if (ShouldCompress(input, block_size, num_literals)) {
const size_t num_commands = (size_t)(commands - command_buf);
BrotliStoreMetaBlockHeader(block_size, 0, storage_ix, storage);
/* No block splits, no contexts. */
BrotliWriteBits(13, 0, storage_ix, storage);
StoreCommands(m, literal_buf, num_literals, command_buf, num_commands,
storage_ix, storage);
if (BROTLI_IS_OOM(m)) return;
} else {
/* Since we did not find many backward references and the entropy of
the data is close to 8 bits, we can simply emit an uncompressed block.
This makes compression speed of uncompressible data about 3x faster. */
EmitUncompressedMetaBlock(input, block_size, storage_ix, storage);
}
input += block_size;
input_size -= block_size;
}
}
#define FOR_TABLE_BITS_(X) \
X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17)
#define BAKE_METHOD_PARAM_(B) \
static BROTLI_NOINLINE void BrotliCompressFragmentTwoPassImpl ## B( \
MemoryManager* m, const uint8_t* input, size_t input_size, \
BROTLI_BOOL is_last, uint32_t* command_buf, uint8_t* literal_buf, \
int* table, size_t* storage_ix, uint8_t* storage) { \
BrotliCompressFragmentTwoPassImpl(m, input, input_size, is_last, command_buf,\
literal_buf, table, B, storage_ix, storage); \
}
FOR_TABLE_BITS_(BAKE_METHOD_PARAM_)
#undef BAKE_METHOD_PARAM_
void BrotliCompressFragmentTwoPass(
MemoryManager* m, const uint8_t* input, size_t input_size,
BROTLI_BOOL is_last, uint32_t* command_buf, uint8_t* literal_buf,
int* table, size_t table_size, size_t* storage_ix, uint8_t* storage) {
const size_t initial_storage_ix = *storage_ix;
const size_t table_bits = Log2FloorNonZero(table_size);
switch (table_bits) {
#define CASE_(B) \
case B: \
BrotliCompressFragmentTwoPassImpl ## B( \
m, input, input_size, is_last, command_buf, \
literal_buf, table, storage_ix, storage); \
break;
FOR_TABLE_BITS_(CASE_)
#undef CASE_
default: BROTLI_DCHECK(0); break;
}
/* If output is larger than single uncompressed block, rewrite it. */
if (*storage_ix - initial_storage_ix > 31 + (input_size << 3)) {
RewindBitPosition(initial_storage_ix, storage_ix, storage);
EmitUncompressedMetaBlock(input, input_size, storage_ix, storage);
}
if (is_last) {
BrotliWriteBits(1, 1, storage_ix, storage); /* islast */
BrotliWriteBits(1, 1, storage_ix, storage); /* isempty */
*storage_ix = (*storage_ix + 7u) & ~7u;
}
}
#undef FOR_TABLE_BITS_
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif

View File

@ -1,54 +0,0 @@
/* Copyright 2015 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Function for fast encoding of an input fragment, independently from the input
history. This function uses two-pass processing: in the first pass we save
the found backward matches and literal bytes into a buffer, and in the
second pass we emit them into the bit stream using prefix codes built based
on the actual command and literal byte histograms. */
#ifndef BROTLI_ENC_COMPRESS_FRAGMENT_TWO_PASS_H_
#define BROTLI_ENC_COMPRESS_FRAGMENT_TWO_PASS_H_
#include "../common/platform.h"
#include <brotli/types.h>
#include "./memory.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static const size_t kCompressFragmentTwoPassBlockSize = 1 << 17;
/* Compresses "input" string to the "*storage" buffer as one or more complete
meta-blocks, and updates the "*storage_ix" bit position.
If "is_last" is 1, emits an additional empty last meta-block.
REQUIRES: "input_size" is greater than zero, or "is_last" is 1.
REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24).
REQUIRES: "command_buf" and "literal_buf" point to at least
kCompressFragmentTwoPassBlockSize long arrays.
REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
REQUIRES: "table_size" is a power of two
OUTPUT: maximal copy distance <= |input_size|
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */
BROTLI_INTERNAL void BrotliCompressFragmentTwoPass(MemoryManager* m,
const uint8_t* input,
size_t input_size,
BROTLI_BOOL is_last,
uint32_t* command_buf,
uint8_t* literal_buf,
int* table,
size_t table_size,
size_t* storage_ix,
uint8_t* storage);
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
#endif /* BROTLI_ENC_COMPRESS_FRAGMENT_TWO_PASS_H_ */

View File

@ -1,184 +0,0 @@
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Functions to map previous bytes into a context id. */
#ifndef BROTLI_ENC_CONTEXT_H_
#define BROTLI_ENC_CONTEXT_H_
#include "../common/platform.h"
#include <brotli/types.h>
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
/* Second-order context lookup table for UTF8 byte streams.
If p1 and p2 are the previous two bytes, we calculate the context as
context = kUTF8ContextLookup[p1] | kUTF8ContextLookup[p2 + 256].
If the previous two bytes are ASCII characters (i.e. < 128), this will be
equivalent to
context = 4 * context1(p1) + context2(p2),
where context1 is based on the previous byte in the following way:
0 : non-ASCII control
1 : \t, \n, \r
2 : space
3 : other punctuation
4 : " '
5 : %
6 : ( < [ {
7 : ) > ] }
8 : , ; :
9 : .
10 : =
11 : number
12 : upper-case vowel
13 : upper-case consonant
14 : lower-case vowel
15 : lower-case consonant
and context2 is based on the second last byte:
0 : control, space
1 : punctuation
2 : upper-case letter, number
3 : lower-case letter
If the last byte is ASCII, and the second last byte is not (in a valid UTF8
stream it will be a continuation byte, value between 128 and 191), the
context is the same as if the second last byte was an ASCII control or space.
If the last byte is a UTF8 lead byte (value >= 192), then the next byte will
be a continuation byte and the context id is 2 or 3 depending on the LSB of
the last byte and to a lesser extent on the second last byte if it is ASCII.
If the last byte is a UTF8 continuation byte, the second last byte can be:
- continuation byte: the next byte is probably ASCII or lead byte (assuming
4-byte UTF8 characters are rare) and the context id is 0 or 1.
- lead byte (192 - 207): next byte is ASCII or lead byte, context is 0 or 1
- lead byte (208 - 255): next byte is continuation byte, context is 2 or 3
The possible value combinations of the previous two bytes, the range of
context ids and the type of the next byte is summarized in the table below:
|--------\-----------------------------------------------------------------|
| \ Last byte |
| Second \---------------------------------------------------------------|
| last byte \ ASCII | cont. byte | lead byte |
| \ (0-127) | (128-191) | (192-) |
|=============|===================|=====================|==================|
| ASCII | next: ASCII/lead | not valid | next: cont. |
| (0-127) | context: 4 - 63 | | context: 2 - 3 |
|-------------|-------------------|---------------------|------------------|
| cont. byte | next: ASCII/lead | next: ASCII/lead | next: cont. |
| (128-191) | context: 4 - 63 | context: 0 - 1 | context: 2 - 3 |
|-------------|-------------------|---------------------|------------------|
| lead byte | not valid | next: ASCII/lead | not valid |
| (192-207) | | context: 0 - 1 | |
|-------------|-------------------|---------------------|------------------|
| lead byte | not valid | next: cont. | not valid |
| (208-) | | context: 2 - 3 | |
|-------------|-------------------|---------------------|------------------|
*/
static const uint8_t kUTF8ContextLookup[512] = {
/* Last byte. */
/* */
/* ASCII range. */
0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, 4, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
8, 12, 16, 12, 12, 20, 12, 16, 24, 28, 12, 12, 32, 12, 36, 12,
44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 32, 32, 24, 40, 28, 12,
12, 48, 52, 52, 52, 48, 52, 52, 52, 48, 52, 52, 52, 52, 52, 48,
52, 52, 52, 52, 52, 48, 52, 52, 52, 52, 52, 24, 12, 28, 12, 12,
12, 56, 60, 60, 60, 56, 60, 60, 60, 56, 60, 60, 60, 60, 60, 56,
60, 60, 60, 60, 60, 56, 60, 60, 60, 60, 60, 24, 12, 28, 12, 0,
/* UTF8 continuation byte range. */
0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
/* UTF8 lead byte range. */
2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
/* Second last byte. */
/* */
/* ASCII range. */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1,
1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1,
1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 0,
/* UTF8 continuation byte range. */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* UTF8 lead byte range. */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
};
/* Context lookup table for small signed integers. */
static const uint8_t kSigned3BitContextLookup[] = {
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7,
};
typedef enum ContextType {
CONTEXT_LSB6 = 0,
CONTEXT_MSB6 = 1,
CONTEXT_UTF8 = 2,
CONTEXT_SIGNED = 3
} ContextType;
static BROTLI_INLINE uint8_t Context(uint8_t p1, uint8_t p2, ContextType mode) {
switch (mode) {
case CONTEXT_LSB6:
return p1 & 0x3f;
case CONTEXT_MSB6:
return (uint8_t)(p1 >> 2);
case CONTEXT_UTF8:
return kUTF8ContextLookup[p1] | kUTF8ContextLookup[p2 + 256];
case CONTEXT_SIGNED:
return (uint8_t)((kSigned3BitContextLookup[p1] << 3) +
kSigned3BitContextLookup[p2]);
default:
return 0;
}
}
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
#endif /* BROTLI_ENC_CONTEXT_H_ */

Some files were not shown because too many files have changed in this diff Show More