enc_best.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501
  1. // Copyright 2019+ Klaus Post. All rights reserved.
  2. // License information can be found in the LICENSE file.
  3. // Based on work by Yann Collet, released under BSD License.
  4. package zstd
  5. import (
  6. "fmt"
  7. "math/bits"
  8. )
  9. const (
  10. bestLongTableBits = 20 // Bits used in the long match table
  11. bestLongTableSize = 1 << bestLongTableBits // Size of the table
  12. // Note: Increasing the short table bits or making the hash shorter
  13. // can actually lead to compression degradation since it will 'steal' more from the
  14. // long match table and match offsets are quite big.
  15. // This greatly depends on the type of input.
  16. bestShortTableBits = 16 // Bits used in the short match table
  17. bestShortTableSize = 1 << bestShortTableBits // Size of the table
  18. )
  19. // bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches.
  20. // The long match table contains the previous entry with the same hash,
  21. // effectively making it a "chain" of length 2.
  22. // When we find a long match we choose between the two values and select the longest.
  23. // When we find a short match, after checking the long, we check if we can find a long at n+1
  24. // and that it is longer (lazy matching).
  25. type bestFastEncoder struct {
  26. fastBase
  27. table [bestShortTableSize]prevEntry
  28. longTable [bestLongTableSize]prevEntry
  29. dictTable []prevEntry
  30. dictLongTable []prevEntry
  31. }
  32. // Encode improves compression...
  33. func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
  34. const (
  35. // Input margin is the number of bytes we read (8)
  36. // and the maximum we will read ahead (2)
  37. inputMargin = 8 + 4
  38. minNonLiteralBlockSize = 16
  39. )
  40. // Protect against e.cur wraparound.
  41. for e.cur >= bufferReset {
  42. if len(e.hist) == 0 {
  43. for i := range e.table[:] {
  44. e.table[i] = prevEntry{}
  45. }
  46. for i := range e.longTable[:] {
  47. e.longTable[i] = prevEntry{}
  48. }
  49. e.cur = e.maxMatchOff
  50. break
  51. }
  52. // Shift down everything in the table that isn't already too far away.
  53. minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
  54. for i := range e.table[:] {
  55. v := e.table[i].offset
  56. v2 := e.table[i].prev
  57. if v < minOff {
  58. v = 0
  59. v2 = 0
  60. } else {
  61. v = v - e.cur + e.maxMatchOff
  62. if v2 < minOff {
  63. v2 = 0
  64. } else {
  65. v2 = v2 - e.cur + e.maxMatchOff
  66. }
  67. }
  68. e.table[i] = prevEntry{
  69. offset: v,
  70. prev: v2,
  71. }
  72. }
  73. for i := range e.longTable[:] {
  74. v := e.longTable[i].offset
  75. v2 := e.longTable[i].prev
  76. if v < minOff {
  77. v = 0
  78. v2 = 0
  79. } else {
  80. v = v - e.cur + e.maxMatchOff
  81. if v2 < minOff {
  82. v2 = 0
  83. } else {
  84. v2 = v2 - e.cur + e.maxMatchOff
  85. }
  86. }
  87. e.longTable[i] = prevEntry{
  88. offset: v,
  89. prev: v2,
  90. }
  91. }
  92. e.cur = e.maxMatchOff
  93. break
  94. }
  95. s := e.addBlock(src)
  96. blk.size = len(src)
  97. if len(src) < minNonLiteralBlockSize {
  98. blk.extraLits = len(src)
  99. blk.literals = blk.literals[:len(src)]
  100. copy(blk.literals, src)
  101. return
  102. }
  103. // Override src
  104. src = e.hist
  105. sLimit := int32(len(src)) - inputMargin
  106. const kSearchStrength = 10
  107. // nextEmit is where in src the next emitLiteral should start from.
  108. nextEmit := s
  109. cv := load6432(src, s)
  110. // Relative offsets
  111. offset1 := int32(blk.recentOffsets[0])
  112. offset2 := int32(blk.recentOffsets[1])
  113. offset3 := int32(blk.recentOffsets[2])
  114. addLiterals := func(s *seq, until int32) {
  115. if until == nextEmit {
  116. return
  117. }
  118. blk.literals = append(blk.literals, src[nextEmit:until]...)
  119. s.litLen = uint32(until - nextEmit)
  120. }
  121. _ = addLiterals
  122. if debug {
  123. println("recent offsets:", blk.recentOffsets)
  124. }
  125. encodeLoop:
  126. for {
  127. // We allow the encoder to optionally turn off repeat offsets across blocks
  128. canRepeat := len(blk.sequences) > 2
  129. if debugAsserts && canRepeat && offset1 == 0 {
  130. panic("offset0 was 0")
  131. }
  132. type match struct {
  133. offset int32
  134. s int32
  135. length int32
  136. rep int32
  137. }
  138. matchAt := func(offset int32, s int32, first uint32, rep int32) match {
  139. if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
  140. return match{offset: offset, s: s}
  141. }
  142. return match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
  143. }
  144. bestOf := func(a, b match) match {
  145. aScore := b.s - a.s + a.length
  146. bScore := a.s - b.s + b.length
  147. if a.rep < 0 {
  148. aScore = aScore - int32(bits.Len32(uint32(a.offset)))/8
  149. }
  150. if b.rep < 0 {
  151. bScore = bScore - int32(bits.Len32(uint32(b.offset)))/8
  152. }
  153. if aScore >= bScore {
  154. return a
  155. }
  156. return b
  157. }
  158. const goodEnough = 100
  159. nextHashL := hash8(cv, bestLongTableBits)
  160. nextHashS := hash4x64(cv, bestShortTableBits)
  161. candidateL := e.longTable[nextHashL]
  162. candidateS := e.table[nextHashS]
  163. best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
  164. best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
  165. best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1))
  166. if canRepeat && best.length < goodEnough {
  167. best = bestOf(best, matchAt(s-offset1+1, s+1, uint32(cv>>8), 1))
  168. best = bestOf(best, matchAt(s-offset2+1, s+1, uint32(cv>>8), 2))
  169. best = bestOf(best, matchAt(s-offset3+1, s+1, uint32(cv>>8), 3))
  170. if best.length > 0 {
  171. best = bestOf(best, matchAt(s-offset1+3, s+3, uint32(cv>>24), 1))
  172. best = bestOf(best, matchAt(s-offset2+3, s+3, uint32(cv>>24), 2))
  173. best = bestOf(best, matchAt(s-offset3+3, s+3, uint32(cv>>24), 3))
  174. }
  175. }
  176. // Load next and check...
  177. e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset}
  178. e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset}
  179. // Look far ahead, unless we have a really long match already...
  180. if best.length < goodEnough {
  181. // No match found, move forward on input, no need to check forward...
  182. if best.length < 4 {
  183. s += 1 + (s-nextEmit)>>(kSearchStrength-1)
  184. if s >= sLimit {
  185. break encodeLoop
  186. }
  187. cv = load6432(src, s)
  188. continue
  189. }
  190. s++
  191. candidateS = e.table[hash4x64(cv>>8, bestShortTableBits)]
  192. cv = load6432(src, s)
  193. cv2 := load6432(src, s+1)
  194. candidateL = e.longTable[hash8(cv, bestLongTableBits)]
  195. candidateL2 := e.longTable[hash8(cv2, bestLongTableBits)]
  196. best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
  197. best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1))
  198. best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
  199. best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1))
  200. best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1))
  201. // See if we can find a better match by checking where the current best ends.
  202. // Use that offset to see if we can find a better full match.
  203. if sAt := best.s + best.length; sAt < sLimit {
  204. nextHashL := hash8(load6432(src, sAt), bestLongTableBits)
  205. candidateEnd := e.longTable[nextHashL]
  206. if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 {
  207. bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1))
  208. if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 {
  209. bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1))
  210. }
  211. best = bestEnd
  212. }
  213. }
  214. }
  215. // We have a match, we can store the forward value
  216. if best.rep > 0 {
  217. s = best.s
  218. var seq seq
  219. seq.matchLen = uint32(best.length - zstdMinMatch)
  220. // We might be able to match backwards.
  221. // Extend as long as we can.
  222. start := best.s
  223. // We end the search early, so we don't risk 0 literals
  224. // and have to do special offset treatment.
  225. startLimit := nextEmit + 1
  226. tMin := s - e.maxMatchOff
  227. if tMin < 0 {
  228. tMin = 0
  229. }
  230. repIndex := best.offset
  231. for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
  232. repIndex--
  233. start--
  234. seq.matchLen++
  235. }
  236. addLiterals(&seq, start)
  237. // rep 0
  238. seq.offset = uint32(best.rep)
  239. if debugSequences {
  240. println("repeat sequence", seq, "next s:", s)
  241. }
  242. blk.sequences = append(blk.sequences, seq)
  243. // Index match start+1 (long) -> s - 1
  244. index0 := s
  245. s = best.s + best.length
  246. nextEmit = s
  247. if s >= sLimit {
  248. if debug {
  249. println("repeat ended", s, best.length)
  250. }
  251. break encodeLoop
  252. }
  253. // Index skipped...
  254. off := index0 + e.cur
  255. for index0 < s-1 {
  256. cv0 := load6432(src, index0)
  257. h0 := hash8(cv0, bestLongTableBits)
  258. h1 := hash4x64(cv0, bestShortTableBits)
  259. e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
  260. e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
  261. off++
  262. index0++
  263. }
  264. switch best.rep {
  265. case 2:
  266. offset1, offset2 = offset2, offset1
  267. case 3:
  268. offset1, offset2, offset3 = offset3, offset1, offset2
  269. }
  270. cv = load6432(src, s)
  271. continue
  272. }
  273. // A 4-byte match has been found. Update recent offsets.
  274. // We'll later see if more than 4 bytes.
  275. s = best.s
  276. t := best.offset
  277. offset1, offset2, offset3 = s-t, offset1, offset2
  278. if debugAsserts && s <= t {
  279. panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
  280. }
  281. if debugAsserts && canRepeat && int(offset1) > len(src) {
  282. panic("invalid offset")
  283. }
  284. // Extend the n-byte match as long as possible.
  285. l := best.length
  286. // Extend backwards
  287. tMin := s - e.maxMatchOff
  288. if tMin < 0 {
  289. tMin = 0
  290. }
  291. for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
  292. s--
  293. t--
  294. l++
  295. }
  296. // Write our sequence
  297. var seq seq
  298. seq.litLen = uint32(s - nextEmit)
  299. seq.matchLen = uint32(l - zstdMinMatch)
  300. if seq.litLen > 0 {
  301. blk.literals = append(blk.literals, src[nextEmit:s]...)
  302. }
  303. seq.offset = uint32(s-t) + 3
  304. s += l
  305. if debugSequences {
  306. println("sequence", seq, "next s:", s)
  307. }
  308. blk.sequences = append(blk.sequences, seq)
  309. nextEmit = s
  310. if s >= sLimit {
  311. break encodeLoop
  312. }
  313. // Index match start+1 (long) -> s - 1
  314. index0 := s - l + 1
  315. // every entry
  316. for index0 < s-1 {
  317. cv0 := load6432(src, index0)
  318. h0 := hash8(cv0, bestLongTableBits)
  319. h1 := hash4x64(cv0, bestShortTableBits)
  320. off := index0 + e.cur
  321. e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
  322. e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
  323. index0++
  324. }
  325. cv = load6432(src, s)
  326. if !canRepeat {
  327. continue
  328. }
  329. // Check offset 2
  330. for {
  331. o2 := s - offset2
  332. if load3232(src, o2) != uint32(cv) {
  333. // Do regular search
  334. break
  335. }
  336. // Store this, since we have it.
  337. nextHashS := hash4x64(cv, bestShortTableBits)
  338. nextHashL := hash8(cv, bestLongTableBits)
  339. // We have at least 4 byte match.
  340. // No need to check backwards. We come straight from a match
  341. l := 4 + e.matchlen(s+4, o2+4, src)
  342. e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
  343. e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset}
  344. seq.matchLen = uint32(l) - zstdMinMatch
  345. seq.litLen = 0
  346. // Since litlen is always 0, this is offset 1.
  347. seq.offset = 1
  348. s += l
  349. nextEmit = s
  350. if debugSequences {
  351. println("sequence", seq, "next s:", s)
  352. }
  353. blk.sequences = append(blk.sequences, seq)
  354. // Swap offset 1 and 2.
  355. offset1, offset2 = offset2, offset1
  356. if s >= sLimit {
  357. // Finished
  358. break encodeLoop
  359. }
  360. cv = load6432(src, s)
  361. }
  362. }
  363. if int(nextEmit) < len(src) {
  364. blk.literals = append(blk.literals, src[nextEmit:]...)
  365. blk.extraLits = len(src) - int(nextEmit)
  366. }
  367. blk.recentOffsets[0] = uint32(offset1)
  368. blk.recentOffsets[1] = uint32(offset2)
  369. blk.recentOffsets[2] = uint32(offset3)
  370. if debug {
  371. println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
  372. }
  373. }
  374. // EncodeNoHist will encode a block with no history and no following blocks.
  375. // Most notable difference is that src will not be copied for history and
  376. // we do not need to check for max match length.
  377. func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
  378. e.ensureHist(len(src))
  379. e.Encode(blk, src)
  380. }
  381. // ResetDict will reset and set a dictionary if not nil
  382. func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
  383. e.resetBase(d, singleBlock)
  384. if d == nil {
  385. return
  386. }
  387. // Init or copy dict table
  388. if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
  389. if len(e.dictTable) != len(e.table) {
  390. e.dictTable = make([]prevEntry, len(e.table))
  391. }
  392. end := int32(len(d.content)) - 8 + e.maxMatchOff
  393. for i := e.maxMatchOff; i < end; i += 4 {
  394. const hashLog = bestShortTableBits
  395. cv := load6432(d.content, i-e.maxMatchOff)
  396. nextHash := hash4x64(cv, hashLog) // 0 -> 4
  397. nextHash1 := hash4x64(cv>>8, hashLog) // 1 -> 5
  398. nextHash2 := hash4x64(cv>>16, hashLog) // 2 -> 6
  399. nextHash3 := hash4x64(cv>>24, hashLog) // 3 -> 7
  400. e.dictTable[nextHash] = prevEntry{
  401. prev: e.dictTable[nextHash].offset,
  402. offset: i,
  403. }
  404. e.dictTable[nextHash1] = prevEntry{
  405. prev: e.dictTable[nextHash1].offset,
  406. offset: i + 1,
  407. }
  408. e.dictTable[nextHash2] = prevEntry{
  409. prev: e.dictTable[nextHash2].offset,
  410. offset: i + 2,
  411. }
  412. e.dictTable[nextHash3] = prevEntry{
  413. prev: e.dictTable[nextHash3].offset,
  414. offset: i + 3,
  415. }
  416. }
  417. e.lastDictID = d.id
  418. }
  419. // Init or copy dict table
  420. if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID {
  421. if len(e.dictLongTable) != len(e.longTable) {
  422. e.dictLongTable = make([]prevEntry, len(e.longTable))
  423. }
  424. if len(d.content) >= 8 {
  425. cv := load6432(d.content, 0)
  426. h := hash8(cv, bestLongTableBits)
  427. e.dictLongTable[h] = prevEntry{
  428. offset: e.maxMatchOff,
  429. prev: e.dictLongTable[h].offset,
  430. }
  431. end := int32(len(d.content)) - 8 + e.maxMatchOff
  432. off := 8 // First to read
  433. for i := e.maxMatchOff + 1; i < end; i++ {
  434. cv = cv>>8 | (uint64(d.content[off]) << 56)
  435. h := hash8(cv, bestLongTableBits)
  436. e.dictLongTable[h] = prevEntry{
  437. offset: i,
  438. prev: e.dictLongTable[h].offset,
  439. }
  440. off++
  441. }
  442. }
  443. e.lastDictID = d.id
  444. }
  445. // Reset table to initial state
  446. copy(e.longTable[:], e.dictLongTable)
  447. e.cur = e.maxMatchOff
  448. // Reset table to initial state
  449. copy(e.table[:], e.dictTable)
  450. }