1 : // Copyright 2005 Google Inc. All Rights Reserved.
2 : //
3 : // Redistribution and use in source and binary forms, with or without
4 : // modification, are permitted provided that the following conditions are
5 : // met:
6 : //
7 : // * Redistributions of source code must retain the above copyright
8 : // notice, this list of conditions and the following disclaimer.
9 : // * Redistributions in binary form must reproduce the above
10 : // copyright notice, this list of conditions and the following disclaimer
11 : // in the documentation and/or other materials provided with the
12 : // distribution.
13 : // * Neither the name of Google Inc. nor the names of its
14 : // contributors may be used to endorse or promote products derived from
15 : // this software without specific prior written permission.
16 : //
17 : // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 : // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 : // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 : // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 : // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 : // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 : // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 : // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 : // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 : // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 : // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 :
29 : #include "snappy.h"
30 : #include "snappy-internal.h"
31 : #include "snappy-sinksource.h"
32 :
33 : #include <stdio.h>
34 :
35 : #include <algorithm>
36 : #include <string>
37 : #include <vector>
38 :
39 :
40 : namespace snappy {
41 :
42 : // Any hash function will produce a valid compressed bitstream, but a good
43 : // hash function reduces the number of collisions and thus yields better
44 : // compression for compressible input, and more speed for incompressible
45 : // input. Of course, it doesn't hurt if the hash function is reasonably fast
46 : // either, as it gets called a lot.
47 35465 : static inline uint32 HashBytes(uint32 bytes, int shift) {
48 35465 : uint32 kMul = 0x1e35a7bd;
49 35465 : return (bytes * kMul) >> shift;
50 : }
51 31957 : static inline uint32 Hash(const char* p, int shift) {
52 31957 : return HashBytes(UNALIGNED_LOAD32(p), shift);
53 : }
54 :
55 3674 : size_t MaxCompressedLength(size_t source_len) {
56 : // Compressed data can be defined as:
57 : // compressed := item* literal*
58 : // item := literal* copy
59 : //
60 : // The trailing literal sequence has a space blowup of at most 62/60
61 : // since a literal of length 60 needs one tag byte + one extra byte
62 : // for length information.
63 : //
64 : // Item blowup is trickier to measure. Suppose the "copy" op copies
65 : // 4 bytes of data. Because of a special check in the encoding code,
66 : // we produce a 4-byte copy only if the offset is < 65536. Therefore
67 : // the copy op takes 3 bytes to encode, and this type of item leads
68 : // to at most the 62/60 blowup for representing literals.
69 : //
70 : // Suppose the "copy" op copies 5 bytes of data. If the offset is big
71 : // enough, it will take 5 bytes to encode the copy op. Therefore the
72 : // worst case here is a one-byte literal followed by a five-byte copy.
73 : // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
74 : //
75 : // This last factor dominates the blowup, so the final estimate is:
76 3674 : return 32 + source_len + source_len/6;
77 : }
78 :
79 : enum {
80 : LITERAL = 0,
81 : COPY_1_BYTE_OFFSET = 1, // 3 bit length + 3 bits of offset in opcode
82 : COPY_2_BYTE_OFFSET = 2,
83 : COPY_4_BYTE_OFFSET = 3
84 : };
85 :
86 : // Copy "len" bytes from "src" to "op", one byte at a time. Used for
87 : // handling COPY operations where the input and output regions may
88 : // overlap. For example, suppose:
89 : // src == "ab"
90 : // op == src + 2
91 : // len == 20
92 : // After IncrementalCopy(src, op, len), the result will have
93 : // eleven copies of "ab"
94 : // ababababababababababab
95 : // Note that this does not match the semantics of either memcpy()
96 : // or memmove().
97 8 : static inline void IncrementalCopy(const char* src, char* op, int len) {
98 8 : DCHECK_GT(len, 0);
99 93 : do {
100 93 : *op++ = *src++;
101 : } while (--len > 0);
102 8 : }
103 :
104 : // Equivalent to IncrementalCopy except that it can write up to ten extra
105 : // bytes after the end of the copy, and that it is faster.
106 : //
107 : // The main part of this loop is a simple copy of eight bytes at a time until
108 : // we've copied (at least) the requested amount of bytes. However, if op and
109 : // src are less than eight bytes apart (indicating a repeating pattern of
110 : // length < 8), we first need to expand the pattern in order to get the correct
111 : // results. For instance, if the buffer looks like this, with the eight-byte
112 : // <src> and <op> patterns marked as intervals:
113 : //
114 : // abxxxxxxxxxxxx
115 : // [------] src
116 : // [------] op
117 : //
118 : // a single eight-byte copy from <src> to <op> will repeat the pattern once,
119 : // after which we can move <op> two bytes without moving <src>:
120 : //
121 : // ababxxxxxxxxxx
122 : // [------] src
123 : // [------] op
124 : //
125 : // and repeat the exercise until the two no longer overlap.
126 : //
127 : // This allows us to do very well in the special case of one single byte
128 : // repeated many times, without taking a big hit for more general cases.
129 : //
130 : // The worst case of extra writing past the end of the match occurs when
131 : // op - src == 1 and len == 1; the last copy will read from byte positions
132 : // [0..7] and write to [4..11], whereas it was only supposed to write to
133 : // position 1. Thus, ten excess bytes.
134 :
135 : namespace {
136 :
137 : const int kMaxIncrementCopyOverflow = 10;
138 :
139 : } // namespace
140 :
141 999 : static inline void IncrementalCopyFastPath(const char* src, char* op, int len) {
142 4078 : while (op - src < 8) {
143 2080 : UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src));
144 2080 : len -= op - src;
145 2080 : op += op - src;
146 : }
147 2997 : while (len > 0) {
148 999 : UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src));
149 999 : src += 8;
150 999 : op += 8;
151 999 : len -= 8;
152 : }
153 999 : }
154 :
155 3066 : static inline char* EmitLiteral(char* op,
156 : const char* literal,
157 : int len,
158 : bool allow_fast_path) {
159 3066 : int n = len - 1; // Zero-length literals are disallowed
160 3066 : if (n < 60) {
161 : // Fits in tag byte
162 3066 : *op++ = LITERAL | (n << 2);
163 :
164 : // The vast majority of copies are below 16 bytes, for which a
165 : // call to memcpy is overkill. This fast path can sometimes
166 : // copy up to 15 bytes too much, but that is okay in the
167 : // main loop, since we have a bit to go on for both sides:
168 : //
169 : // - The input will always have kInputMarginBytes = 15 extra
170 : // available bytes, as long as we're in the main loop, and
171 : // if not, allow_fast_path = false.
172 : // - The output will always have 32 spare bytes (see
173 : // MaxCompressedLength).
174 3066 : if (allow_fast_path && len <= 16) {
175 831 : UNALIGNED_STORE64(op, UNALIGNED_LOAD64(literal));
176 831 : UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(literal + 8));
177 831 : return op + len;
178 : }
179 : } else {
180 : // Encode in upcoming bytes
181 0 : char* base = op;
182 0 : int count = 0;
183 0 : op++;
184 0 : while (n > 0) {
185 0 : *op++ = n & 0xff;
186 0 : n >>= 8;
187 0 : count++;
188 : }
189 0 : assert(count >= 1);
190 0 : assert(count <= 4);
191 0 : *base = LITERAL | ((59+count) << 2);
192 : }
193 2235 : memcpy(op, literal, len);
194 2235 : return op + len;
195 : }
196 :
197 1657 : static inline char* EmitCopyLessThan64(char* op, size_t offset, int len) {
198 1657 : DCHECK_LE(len, 64);
199 1657 : DCHECK_GE(len, 4);
200 1657 : DCHECK_LT(offset, 65536);
201 :
202 1657 : if ((len < 12) && (offset < 2048)) {
203 1600 : size_t len_minus_4 = len - 4;
204 1600 : assert(len_minus_4 < 8); // Must fit in 3 bits
205 1600 : *op++ = COPY_1_BYTE_OFFSET | ((len_minus_4) << 2) | ((offset >> 8) << 5);
206 1600 : *op++ = offset & 0xff;
207 : } else {
208 57 : *op++ = COPY_2_BYTE_OFFSET | ((len-1) << 2);
209 57 : LittleEndian::Store16(op, offset);
210 57 : op += 2;
211 : }
212 1657 : return op;
213 : }
214 :
215 1657 : static inline char* EmitCopy(char* op, size_t offset, int len) {
216 : // Emit 64 byte copies but make sure to keep at least four bytes reserved
217 3314 : while (len >= 68) {
218 0 : op = EmitCopyLessThan64(op, offset, 64);
219 0 : len -= 64;
220 : }
221 :
222 : // Emit an extra 60 byte copy if have too much data to fit in one copy
223 1657 : if (len > 64) {
224 0 : op = EmitCopyLessThan64(op, offset, 60);
225 0 : len -= 60;
226 : }
227 :
228 : // Emit remainder
229 1657 : op = EmitCopyLessThan64(op, offset, len);
230 1657 : return op;
231 : }
232 :
233 :
234 1970 : bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
235 1970 : uint32 v = 0;
236 1970 : const char* limit = start + n;
237 1970 : if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
238 1970 : *result = v;
239 1970 : return true;
240 : } else {
241 0 : return false;
242 : }
243 : }
244 :
245 : namespace internal {
246 1837 : uint16* WorkingMemory::GetHashTable(size_t input_size, int* table_size) {
247 : // Use smaller hash table when input.size() is smaller, since we
248 : // fill the table, incurring O(hash table size) overhead for
249 : // compression, and if the input is short, we won't need that
250 : // many hash table entries anyway.
251 : assert(kMaxHashTableSize >= 256);
252 1837 : size_t htsize = 256;
253 3674 : while (htsize < kMaxHashTableSize && htsize < input_size) {
254 0 : htsize <<= 1;
255 : }
256 1837 : CHECK_EQ(0, htsize & (htsize - 1)) << ": must be power of two";
257 1837 : CHECK_LE(htsize, kMaxHashTableSize) << ": hash table too large";
258 :
259 : uint16* table;
260 1837 : if (htsize <= ARRAYSIZE(small_table_)) {
261 1837 : table = small_table_;
262 : } else {
263 0 : if (large_table_ == NULL) {
264 0 : large_table_ = new uint16[kMaxHashTableSize];
265 : }
266 0 : table = large_table_;
267 : }
268 :
269 1837 : *table_size = htsize;
270 1837 : memset(table, 0, htsize * sizeof(*table));
271 1837 : return table;
272 : }
273 : } // end namespace internal
274 :
275 : // For 0 <= offset <= 4, GetUint32AtOffset(UNALIGNED_LOAD64(p), offset) will
276 : // equal UNALIGNED_LOAD32(p + offset). Motivation: On x86-64 hardware we have
277 : // empirically found that overlapping loads such as
278 : // UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
279 : // are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
280 4820 : static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
281 4820 : DCHECK(0 <= offset && offset <= 4) << offset;
282 4820 : return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
283 : }
284 :
285 : // Flat array compression that does not emit the "uncompressed length"
286 : // prefix. Compresses "input" string to the "*op" buffer.
287 : //
288 : // REQUIRES: "input" is at most "kBlockSize" bytes long.
289 : // REQUIRES: "op" points to an array of memory that is at least
290 : // "MaxCompressedLength(input.size())" in size.
291 : // REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
292 : // REQUIRES: "table_size" is a power of two
293 : //
294 : // Returns an "end" pointer into "op" buffer.
295 : // "end - op" is the compressed size of "input".
296 : namespace internal {
297 1837 : char* CompressFragment(const char* input,
298 : size_t input_size,
299 : char* op,
300 : uint16* table,
301 : const int table_size) {
302 : // "ip" is the input pointer, and "op" is the output pointer.
303 1837 : const char* ip = input;
304 1837 : CHECK_LE(input_size, kBlockSize);
305 1837 : CHECK_EQ(table_size & (table_size - 1), 0) << ": table must be power of two";
306 1837 : const int shift = 32 - Bits::Log2Floor(table_size);
307 1837 : DCHECK_EQ(static_cast<int>(kuint32max >> shift), table_size - 1);
308 1837 : const char* ip_end = input + input_size;
309 1837 : const char* base_ip = ip;
310 : // Bytes in [next_emit, ip) will be emitted as literal bytes. Or
311 : // [next_emit, ip_end) after the main loop.
312 1837 : const char* next_emit = ip;
313 :
314 1837 : const size_t kInputMarginBytes = 15;
315 1837 : if (PREDICT_TRUE(input_size >= kInputMarginBytes)) {
316 1701 : const char* ip_limit = input + input_size - kInputMarginBytes;
317 :
318 2585 : for (uint32 next_hash = Hash(++ip, shift); ; ) {
319 2585 : DCHECK_LT(next_emit, ip);
320 : // The body of this loop calls EmitLiteral once and then EmitCopy one or
321 : // more times. (The exception is that when we're close to exhausting
322 : // the input we goto emit_remainder.)
323 : //
324 : // In the first iteration of this loop we're just starting, so
325 : // there's nothing to copy, so calling EmitLiteral once is
326 : // necessary. And we only start a new iteration when the
327 : // current iteration has determined that a call to EmitLiteral will
328 : // precede the next call to EmitCopy (if any).
329 : //
330 : // Step 1: Scan forward in the input looking for a 4-byte-long match.
331 : // If we get close to exhausting the input then goto emit_remainder.
332 : //
333 : // Heuristic match skipping: If 32 bytes are scanned with no matches
334 : // found, start looking only at every other byte. If 32 more bytes are
335 : // scanned, look at every third byte, etc.. When a match is found,
336 : // immediately go back to looking at every byte. This is a small loss
337 : // (~5% performance, ~0.1% density) for compressible data due to more
338 : // bookkeeping, but for non-compressible data (such as JPEG) it's a huge
339 : // win since the compressor quickly "realizes" the data is incompressible
340 : // and doesn't bother looking for matches everywhere.
341 : //
342 : // The "skip" variable keeps track of how many bytes there are since the
343 : // last match; dividing it by 32 (ie. right-shifting by five) gives the
344 : // number of bytes to move ahead for each iteration.
345 2585 : uint32 skip = 32;
346 :
347 2585 : const char* next_ip = ip;
348 : const char* candidate;
349 14450 : do {
350 15806 : ip = next_ip;
351 15806 : uint32 hash = next_hash;
352 15806 : DCHECK_EQ(hash, Hash(ip, shift));
353 15806 : uint32 bytes_between_hash_lookups = skip++ >> 5;
354 15806 : next_ip = ip + bytes_between_hash_lookups;
355 15806 : if (PREDICT_FALSE(next_ip > ip_limit)) {
356 1356 : goto emit_remainder;
357 : }
358 14450 : next_hash = Hash(next_ip, shift);
359 14450 : candidate = base_ip + table[hash];
360 14450 : DCHECK_GE(candidate, base_ip);
361 14450 : DCHECK_LT(candidate, ip);
362 :
363 14450 : table[hash] = ip - base_ip;
364 : } while (PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
365 : UNALIGNED_LOAD32(candidate)));
366 :
367 : // Step 2: A 4-byte match has been found. We'll later see if more
368 : // than 4 bytes match. But, prior to the match, input
369 : // bytes [next_emit, ip) are unmatched. Emit them as "literal bytes."
370 1229 : DCHECK_LE(next_emit + 16, ip_end);
371 1229 : op = EmitLiteral(op, next_emit, ip - next_emit, true);
372 :
373 : // Step 3: Call EmitCopy, and then see if another EmitCopy could
374 : // be our next move. Repeat until we find no match for the
375 : // input immediately after what was consumed by the last EmitCopy call.
376 : //
377 : // If we exit this loop normally then we need to call EmitLiteral next,
378 : // though we don't yet know how big the literal will be. We handle that
379 : // by proceeding to the next iteration of the main loop. We also can exit
380 : // this loop via goto if we get close to exhausting the input.
381 1229 : uint64 input_bytes = 0;
382 1229 : uint32 candidate_bytes = 0;
383 :
384 1312 : do {
385 : // We have a 4-byte match at ip, and no need to emit any
386 : // "literal bytes" prior to ip.
387 1657 : const char* base = ip;
388 1657 : int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end);
389 1657 : ip += matched;
390 1657 : size_t offset = base - candidate;
391 1657 : DCHECK_EQ(0, memcmp(base, candidate, matched));
392 1657 : op = EmitCopy(op, offset, matched);
393 : // We could immediately start working at ip now, but to improve
394 : // compression we first update table[Hash(ip - 1, ...)].
395 1657 : const char* insert_tail = ip - 1;
396 1657 : next_emit = ip;
397 1657 : if (PREDICT_FALSE(ip >= ip_limit)) {
398 345 : goto emit_remainder;
399 : }
400 1312 : input_bytes = UNALIGNED_LOAD64(insert_tail);
401 1312 : uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
402 1312 : table[prev_hash] = ip - base_ip - 1;
403 1312 : uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
404 1312 : candidate = base_ip + table[cur_hash];
405 1312 : candidate_bytes = UNALIGNED_LOAD32(candidate);
406 1312 : table[cur_hash] = ip - base_ip;
407 1312 : } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
408 :
409 884 : next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
410 884 : ++ip;
411 : }
412 : }
413 :
414 : emit_remainder:
415 : // Emit the remaining bytes as a literal
416 1837 : if (next_emit < ip_end) {
417 1837 : op = EmitLiteral(op, next_emit, ip_end - next_emit, false);
418 : }
419 :
420 1837 : return op;
421 : }
422 : } // end namespace internal
423 :
424 : // Signature of output types needed by decompression code.
425 : // The decompression code is templatized on a type that obeys this
426 : // signature so that we do not pay virtual function call overhead in
427 : // the middle of a tight decompression loop.
428 : //
429 : // class DecompressionWriter {
430 : // public:
431 : // // Called before decompression
432 : // void SetExpectedLength(size_t length);
433 : //
434 : // // Called after decompression
435 : // bool CheckLength() const;
436 : //
437 : // // Called repeatedly during decompression
438 : // bool Append(const char* ip, size_t length);
439 : // bool AppendFromSelf(uint32 offset, size_t length);
440 : //
441 : // // The difference between TryFastAppend and Append is that TryFastAppend
442 : // // is allowed to read up to <available> bytes from the input buffer,
443 : // // whereas Append is allowed to read <length>.
444 : // //
445 : // // Also, TryFastAppend is allowed to return false, declining the append,
446 : // // without it being a fatal error -- just "return false" would be
447 : // // a perfectly legal implementation of TryFastAppend. The intention
448 : // // is for TryFastAppend to allow a fast path in the common case of
449 : // // a small append.
450 : // //
451 : // // NOTE(user): TryFastAppend must always return decline (return false)
452 : // // if <length> is 61 or more, as in this case the literal length is not
453 : // // decoded fully. In practice, this should not be a big problem,
454 : // // as it is unlikely that one would implement a fast path accepting
455 : // // this much data.
456 : // bool TryFastAppend(const char* ip, size_t available, size_t length);
457 : // };
458 :
459 : // -----------------------------------------------------------------------
460 : // Lookup table for decompression code. Generated by ComputeTable() below.
461 : // -----------------------------------------------------------------------
462 :
463 : // Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits
464 : static const uint32 wordmask[] = {
465 : 0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu
466 : };
467 :
468 : // Data stored per entry in lookup table:
469 : // Range Bits-used Description
470 : // ------------------------------------
471 : // 1..64 0..7 Literal/copy length encoded in opcode byte
472 : // 0..7 8..10 Copy offset encoded in opcode byte / 256
473 : // 0..4 11..13 Extra bytes after opcode
474 : //
475 : // We use eight bits for the length even though 7 would have sufficed
476 : // because of efficiency reasons:
477 : // (1) Extracting a byte is faster than a bit-field
478 : // (2) It properly aligns copy offset so we do not need a <<8
479 : static const uint16 char_table[256] = {
480 : 0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
481 : 0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
482 : 0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
483 : 0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
484 : 0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
485 : 0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
486 : 0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
487 : 0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
488 : 0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
489 : 0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
490 : 0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
491 : 0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
492 : 0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
493 : 0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
494 : 0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
495 : 0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
496 : 0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
497 : 0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
498 : 0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
499 : 0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
500 : 0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
501 : 0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
502 : 0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
503 : 0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
504 : 0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
505 : 0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
506 : 0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
507 : 0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
508 : 0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
509 : 0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
510 : 0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
511 : 0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
512 : };
513 :
514 : // In debug mode, allow optional computation of the table at startup.
515 : // Also, check that the decompression table is correct.
516 : #ifndef NDEBUG
517 : DEFINE_bool(snappy_dump_decompression_table, false,
518 : "If true, we print the decompression table at startup.");
519 :
520 0 : static uint16 MakeEntry(unsigned int extra,
521 : unsigned int len,
522 : unsigned int copy_offset) {
523 : // Check that all of the fields fit within the allocated space
524 0 : DCHECK_EQ(extra, extra & 0x7); // At most 3 bits
525 0 : DCHECK_EQ(copy_offset, copy_offset & 0x7); // At most 3 bits
526 0 : DCHECK_EQ(len, len & 0x7f); // At most 7 bits
527 0 : return len | (copy_offset << 8) | (extra << 11);
528 : }
529 :
530 0 : static void ComputeTable() {
531 : uint16 dst[256];
532 :
533 : // Place invalid entries in all places to detect missing initialization
534 0 : int assigned = 0;
535 0 : for (int i = 0; i < 256; i++) {
536 0 : dst[i] = 0xffff;
537 : }
538 :
539 : // Small LITERAL entries. We store (len-1) in the top 6 bits.
540 0 : for (unsigned int len = 1; len <= 60; len++) {
541 0 : dst[LITERAL | ((len-1) << 2)] = MakeEntry(0, len, 0);
542 0 : assigned++;
543 : }
544 :
545 : // Large LITERAL entries. We use 60..63 in the high 6 bits to
546 : // encode the number of bytes of length info that follow the opcode.
547 0 : for (unsigned int extra_bytes = 1; extra_bytes <= 4; extra_bytes++) {
548 : // We set the length field in the lookup table to 1 because extra
549 : // bytes encode len-1.
550 0 : dst[LITERAL | ((extra_bytes+59) << 2)] = MakeEntry(extra_bytes, 1, 0);
551 0 : assigned++;
552 : }
553 :
554 : // COPY_1_BYTE_OFFSET.
555 : //
556 : // The tag byte in the compressed data stores len-4 in 3 bits, and
557 : // offset/256 in 5 bits. offset%256 is stored in the next byte.
558 : //
559 : // This format is used for length in range [4..11] and offset in
560 : // range [0..2047]
561 0 : for (unsigned int len = 4; len < 12; len++) {
562 0 : for (unsigned int offset = 0; offset < 2048; offset += 256) {
563 0 : dst[COPY_1_BYTE_OFFSET | ((len-4)<<2) | ((offset>>8)<<5)] =
564 0 : MakeEntry(1, len, offset>>8);
565 0 : assigned++;
566 : }
567 : }
568 :
569 : // COPY_2_BYTE_OFFSET.
570 : // Tag contains len-1 in top 6 bits, and offset in next two bytes.
571 0 : for (unsigned int len = 1; len <= 64; len++) {
572 0 : dst[COPY_2_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(2, len, 0);
573 0 : assigned++;
574 : }
575 :
576 : // COPY_4_BYTE_OFFSET.
577 : // Tag contents len-1 in top 6 bits, and offset in next four bytes.
578 0 : for (unsigned int len = 1; len <= 64; len++) {
579 0 : dst[COPY_4_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(4, len, 0);
580 0 : assigned++;
581 : }
582 :
583 : // Check that each entry was initialized exactly once.
584 0 : CHECK_EQ(assigned, 256);
585 0 : for (int i = 0; i < 256; i++) {
586 0 : CHECK_NE(dst[i], 0xffff);
587 : }
588 :
589 0 : if (FLAGS_snappy_dump_decompression_table) {
590 0 : printf("static const uint16 char_table[256] = {\n ");
591 0 : for (int i = 0; i < 256; i++) {
592 : printf("0x%04x%s",
593 0 : dst[i],
594 0 : ((i == 255) ? "\n" : (((i%8) == 7) ? ",\n " : ", ")));
595 : }
596 0 : printf("};\n");
597 : }
598 :
599 : // Check that computed table matched recorded table
600 0 : for (int i = 0; i < 256; i++) {
601 0 : CHECK_EQ(dst[i], char_table[i]);
602 : }
603 0 : }
604 : #endif /* !NDEBUG */
605 :
606 : // Helper class for decompression
607 : class SnappyDecompressor {
608 : private:
609 : Source* reader_; // Underlying source of bytes to decompress
610 : const char* ip_; // Points to next buffered byte
611 : const char* ip_limit_; // Points just past buffered bytes
612 : uint32 peeked_; // Bytes peeked from reader (need to skip)
613 : bool eof_; // Hit end of input without an error?
614 : char scratch_[5]; // Temporary buffer for PeekFast() boundaries
615 :
616 : // Ensure that all of the tag metadata for the next tag is available
617 : // in [ip_..ip_limit_-1]. Also ensures that [ip,ip+4] is readable even
618 : // if (ip_limit_ - ip_ < 5).
619 : //
620 : // Returns true on success, false on error or end of input.
621 : bool RefillTag();
622 :
623 : public:
624 1970 : explicit SnappyDecompressor(Source* reader)
625 : : reader_(reader),
626 : ip_(NULL),
627 : ip_limit_(NULL),
628 : peeked_(0),
629 1970 : eof_(false) {
630 1970 : }
631 :
632 1970 : ~SnappyDecompressor() {
633 : // Advance past any bytes we peeked at from the reader
634 1970 : reader_->Skip(peeked_);
635 1970 : }
636 :
637 : // Returns true iff we have hit the end of the input without an error.
638 1970 : bool eof() const {
639 1970 : return eof_;
640 : }
641 :
642 : // Read the uncompressed length stored at the start of the compressed data.
643 : // On succcess, stores the length in *result and returns true.
644 : // On failure, returns false.
645 1970 : bool ReadUncompressedLength(uint32* result) {
646 1970 : DCHECK(ip_ == NULL); // Must not have read anything yet
647 : // Length is encoded in 1..5 bytes
648 1970 : *result = 0;
649 1970 : uint32 shift = 0;
650 224 : while (true) {
651 2194 : if (shift >= 32) return false;
652 : size_t n;
653 2194 : const char* ip = reader_->Peek(&n);
654 2194 : if (n == 0) return false;
655 2194 : const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
656 2194 : reader_->Skip(1);
657 2194 : *result |= static_cast<uint32>(c & 0x7f) << shift;
658 2194 : if (c < 128) {
659 : break;
660 : }
661 224 : shift += 7;
662 : }
663 1970 : return true;
664 : }
665 :
666 : // Process the next item found in the input.
667 : // Returns true if successful, false on error or end of input.
668 : template <class Writer>
669 1970 : void DecompressAllTags(Writer* writer) {
670 1970 : const char* ip = ip_;
671 :
672 : // We could have put this refill fragment only at the beginning of the loop.
673 : // However, duplicating it at the end of each branch gives the compiler more
674 : // scope to optimize the <ip_limit_ - ip> expression based on the local
675 : // context, which overall increases speed.
676 : #define MAYBE_REFILL() \
677 : if (ip_limit_ - ip < 5) { \
678 : ip_ = ip; \
679 : if (!RefillTag()) return; \
680 : ip = ip_; \
681 : }
682 :
683 1970 : MAYBE_REFILL();
684 7461 : for ( ;; ) {
685 9431 : const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
686 :
687 9431 : if ((c & 0x3) == LITERAL) {
688 5181 : size_t literal_length = (c >> 2) + 1u;
689 5181 : if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
690 3845 : DCHECK_LT(literal_length, 61);
691 3845 : ip += literal_length;
692 3845 : MAYBE_REFILL();
693 2619 : continue;
694 : }
695 1336 : if (PREDICT_FALSE(literal_length >= 61)) {
696 : // Long literal.
697 0 : const size_t literal_length_length = literal_length - 60;
698 0 : literal_length =
699 : (LittleEndian::Load32(ip) & wordmask[literal_length_length]) + 1;
700 0 : ip += literal_length_length;
701 : }
702 :
703 1336 : size_t avail = ip_limit_ - ip;
704 2672 : while (avail < literal_length) {
705 0 : if (!writer->Append(ip, avail)) return;
706 0 : literal_length -= avail;
707 0 : reader_->Skip(peeked_);
708 : size_t n;
709 0 : ip = reader_->Peek(&n);
710 0 : avail = n;
711 0 : peeked_ = avail;
712 0 : if (avail == 0) return; // Premature end of input
713 0 : ip_limit_ = ip + avail;
714 : }
715 1336 : if (!writer->Append(ip, literal_length)) {
716 0 : return;
717 : }
718 1336 : ip += literal_length;
719 1336 : MAYBE_REFILL();
720 : } else {
721 4250 : const uint32 entry = char_table[c];
722 4250 : const uint32 trailer = LittleEndian::Load32(ip) & wordmask[entry >> 11];
723 4250 : const uint32 length = entry & 0xff;
724 4250 : ip += entry >> 11;
725 :
726 : // copy_offset/256 is encoded in bits 8..10. By just fetching
727 : // those bits, we get copy_offset (since the bit-field starts at
728 : // bit 8).
729 4250 : const uint32 copy_offset = entry & 0x700;
730 4250 : if (!writer->AppendFromSelf(copy_offset + trailer, length)) {
731 0 : return;
732 : }
733 4250 : MAYBE_REFILL();
734 : }
735 : }
736 :
737 : #undef MAYBE_REFILL
738 : }
739 : };
740 :
741 3945 : bool SnappyDecompressor::RefillTag() {
742 3945 : const char* ip = ip_;
743 3945 : if (ip == ip_limit_) {
744 : // Fetch a new fragment from the reader
745 3940 : reader_->Skip(peeked_); // All peeked bytes are used up
746 : size_t n;
747 3940 : ip = reader_->Peek(&n);
748 3940 : peeked_ = n;
749 3940 : if (n == 0) {
750 1970 : eof_ = true;
751 1970 : return false;
752 : }
753 1970 : ip_limit_ = ip + n;
754 : }
755 :
756 : // Read the tag character
757 1975 : DCHECK_LT(ip, ip_limit_);
758 1975 : const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
759 1975 : const uint32 entry = char_table[c];
760 1975 : const uint32 needed = (entry >> 11) + 1; // +1 byte for 'c'
761 1975 : DCHECK_LE(needed, sizeof(scratch_));
762 :
763 : // Read more bytes from reader if needed
764 1975 : uint32 nbuf = ip_limit_ - ip;
765 1975 : if (nbuf < needed) {
766 : // Stitch together bytes from ip and reader to form the word
767 : // contents. We store the needed bytes in "scratch_". They
768 : // will be consumed immediately by the caller since we do not
769 : // read more than we need.
770 0 : memmove(scratch_, ip, nbuf);
771 0 : reader_->Skip(peeked_); // All peeked bytes are used up
772 0 : peeked_ = 0;
773 0 : while (nbuf < needed) {
774 : size_t length;
775 0 : const char* src = reader_->Peek(&length);
776 0 : if (length == 0) return false;
777 0 : uint32 to_add = min<uint32>(needed - nbuf, length);
778 0 : memcpy(scratch_ + nbuf, src, to_add);
779 0 : nbuf += to_add;
780 0 : reader_->Skip(to_add);
781 : }
782 0 : DCHECK_EQ(nbuf, needed);
783 0 : ip_ = scratch_;
784 0 : ip_limit_ = scratch_ + needed;
785 1975 : } else if (nbuf < 5) {
786 : // Have enough bytes, but move into scratch_ so that we do not
787 : // read past end of input
788 5 : memmove(scratch_, ip, nbuf);
789 5 : reader_->Skip(peeked_); // All peeked bytes are used up
790 5 : peeked_ = 0;
791 5 : ip_ = scratch_;
792 5 : ip_limit_ = scratch_ + nbuf;
793 : } else {
794 : // Pass pointer to buffer returned by reader_.
795 1970 : ip_ = ip;
796 : }
797 1975 : return true;
798 : }
799 :
800 : template <typename Writer>
801 1970 : static bool InternalUncompress(Source* r,
802 : Writer* writer,
803 : uint32 max_len) {
804 : // Read the uncompressed length from the front of the compressed input
805 3940 : SnappyDecompressor decompressor(r);
806 1970 : uint32 uncompressed_len = 0;
807 1970 : if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
808 : // Protect against possible DoS attack
809 1970 : if (static_cast<uint64>(uncompressed_len) > max_len) {
810 0 : return false;
811 : }
812 :
813 1970 : writer->SetExpectedLength(uncompressed_len);
814 :
815 : // Process the entire input
816 1970 : decompressor.DecompressAllTags(writer);
817 1970 : return (decompressor.eof() && writer->CheckLength());
818 : }
819 :
820 0 : bool GetUncompressedLength(Source* source, uint32* result) {
821 0 : SnappyDecompressor decompressor(source);
822 0 : return decompressor.ReadUncompressedLength(result);
823 : }
824 :
825 1837 : size_t Compress(Source* reader, Sink* writer) {
826 1837 : size_t written = 0;
827 1837 : size_t N = reader->Available();
828 : char ulength[Varint::kMax32];
829 1837 : char* p = Varint::Encode32(ulength, N);
830 1837 : writer->Append(ulength, p-ulength);
831 1837 : written += (p - ulength);
832 :
833 3674 : internal::WorkingMemory wmem;
834 1837 : char* scratch = NULL;
835 1837 : char* scratch_output = NULL;
836 :
837 5511 : while (N > 0) {
838 : // Get next block to compress (without copying if possible)
839 : size_t fragment_size;
840 1837 : const char* fragment = reader->Peek(&fragment_size);
841 1837 : DCHECK_NE(fragment_size, 0) << ": premature end of input";
842 1837 : const size_t num_to_read = min(N, kBlockSize);
843 1837 : size_t bytes_read = fragment_size;
844 :
845 1837 : size_t pending_advance = 0;
846 1837 : if (bytes_read >= num_to_read) {
847 : // Buffer returned by reader is large enough
848 1837 : pending_advance = num_to_read;
849 1837 : fragment_size = num_to_read;
850 : } else {
851 : // Read into scratch buffer
852 0 : if (scratch == NULL) {
853 : // If this is the last iteration, we want to allocate N bytes
854 : // of space, otherwise the max possible kBlockSize space.
855 : // num_to_read contains exactly the correct value
856 0 : scratch = new char[num_to_read];
857 : }
858 0 : memcpy(scratch, fragment, bytes_read);
859 0 : reader->Skip(bytes_read);
860 :
861 0 : while (bytes_read < num_to_read) {
862 0 : fragment = reader->Peek(&fragment_size);
863 0 : size_t n = min<size_t>(fragment_size, num_to_read - bytes_read);
864 0 : memcpy(scratch + bytes_read, fragment, n);
865 0 : bytes_read += n;
866 0 : reader->Skip(n);
867 : }
868 0 : DCHECK_EQ(bytes_read, num_to_read);
869 0 : fragment = scratch;
870 0 : fragment_size = num_to_read;
871 : }
872 1837 : DCHECK_EQ(fragment_size, num_to_read);
873 :
874 : // Get encoding table for compression
875 : int table_size;
876 1837 : uint16* table = wmem.GetHashTable(num_to_read, &table_size);
877 :
878 : // Compress input_fragment and append to dest
879 1837 : const int max_output = MaxCompressedLength(num_to_read);
880 :
881 : // Need a scratch buffer for the output, in case the byte sink doesn't
882 : // have room for us directly.
883 1837 : if (scratch_output == NULL) {
884 3674 : scratch_output = new char[max_output];
885 : } else {
886 : // Since we encode kBlockSize regions followed by a region
887 : // which is <= kBlockSize in length, a previously allocated
888 : // scratch_output[] region is big enough for this iteration.
889 : }
890 1837 : char* dest = writer->GetAppendBuffer(max_output, scratch_output);
891 : char* end = internal::CompressFragment(fragment, fragment_size,
892 1837 : dest, table, table_size);
893 1837 : writer->Append(dest, end - dest);
894 1837 : written += (end - dest);
895 :
896 1837 : N -= num_to_read;
897 1837 : reader->Skip(pending_advance);
898 : }
899 :
900 1837 : delete[] scratch;
901 1837 : delete[] scratch_output;
902 :
903 1837 : return written;
904 : }
905 :
906 : // -----------------------------------------------------------------------
907 : // Flat array interfaces
908 : // -----------------------------------------------------------------------
909 :
910 : // A type that writes to a flat array.
911 : // Note that this is not a "ByteSink", but a type that matches the
912 : // Writer template argument to SnappyDecompressor::DecompressAllTags().
913 : class SnappyArrayWriter {
914 : private:
915 : char* base_;
916 : char* op_;
917 : char* op_limit_;
918 :
919 : public:
920 1970 : inline explicit SnappyArrayWriter(char* dst)
921 : : base_(dst),
922 1970 : op_(dst) {
923 1970 : }
924 :
925 1970 : inline void SetExpectedLength(size_t len) {
926 1970 : op_limit_ = op_ + len;
927 1970 : }
928 :
929 1970 : inline bool CheckLength() const {
930 1970 : return op_ == op_limit_;
931 : }
932 :
933 1336 : inline bool Append(const char* ip, size_t len) {
934 1336 : char* op = op_;
935 1336 : const size_t space_left = op_limit_ - op;
936 1336 : if (space_left < len) {
937 0 : return false;
938 : }
939 1336 : memcpy(op, ip, len);
940 1336 : op_ = op + len;
941 1336 : return true;
942 : }
943 :
944 5181 : inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
945 5181 : char* op = op_;
946 5181 : const size_t space_left = op_limit_ - op;
947 5181 : if (len <= 16 && available >= 16 && space_left >= 16) {
948 : // Fast path, used for the majority (about 95%) of invocations.
949 3845 : UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip));
950 3845 : UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(ip + 8));
951 3845 : op_ = op + len;
952 3845 : return true;
953 : } else {
954 1336 : return false;
955 : }
956 : }
957 :
958 4250 : inline bool AppendFromSelf(size_t offset, size_t len) {
959 4250 : char* op = op_;
960 4250 : const size_t space_left = op_limit_ - op;
961 :
962 4250 : if (op - base_ <= offset - 1u) { // -1u catches offset==0
963 0 : return false;
964 : }
965 4250 : if (len <= 16 && offset >= 8 && space_left >= 16) {
966 : // Fast path, used for the majority (70-80%) of dynamic invocations.
967 3243 : UNALIGNED_STORE64(op, UNALIGNED_LOAD64(op - offset));
968 3243 : UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(op - offset + 8));
969 : } else {
970 1007 : if (space_left >= len + kMaxIncrementCopyOverflow) {
971 999 : IncrementalCopyFastPath(op - offset, op, len);
972 : } else {
973 8 : if (space_left < len) {
974 0 : return false;
975 : }
976 8 : IncrementalCopy(op - offset, op, len);
977 : }
978 : }
979 :
980 4250 : op_ = op + len;
981 4250 : return true;
982 : }
983 : };
984 :
985 1970 : bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
986 3940 : ByteArraySource reader(compressed, n);
987 1970 : return RawUncompress(&reader, uncompressed);
988 : }
989 :
990 1970 : bool RawUncompress(Source* compressed, char* uncompressed) {
991 1970 : SnappyArrayWriter output(uncompressed);
992 1970 : return InternalUncompress(compressed, &output, kuint32max);
993 : }
994 :
995 0 : bool Uncompress(const char* compressed, size_t n, string* uncompressed) {
996 : size_t ulength;
997 0 : if (!GetUncompressedLength(compressed, n, &ulength)) {
998 0 : return false;
999 : }
1000 : // Protect against possible DoS attack
1001 0 : if ((static_cast<uint64>(ulength) + uncompressed->size()) >
1002 0 : uncompressed->max_size()) {
1003 0 : return false;
1004 : }
1005 0 : STLStringResizeUninitialized(uncompressed, ulength);
1006 0 : return RawUncompress(compressed, n, string_as_array(uncompressed));
1007 : }
1008 :
1009 :
1010 : // A Writer that drops everything on the floor and just does validation
1011 : class SnappyDecompressionValidator {
1012 : private:
1013 : size_t expected_;
1014 : size_t produced_;
1015 :
1016 : public:
1017 0 : inline SnappyDecompressionValidator() : produced_(0) { }
1018 0 : inline void SetExpectedLength(size_t len) {
1019 0 : expected_ = len;
1020 0 : }
1021 0 : inline bool CheckLength() const {
1022 0 : return expected_ == produced_;
1023 : }
1024 0 : inline bool Append(const char* ip, size_t len) {
1025 0 : produced_ += len;
1026 0 : return produced_ <= expected_;
1027 : }
1028 0 : inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
1029 0 : return false;
1030 : }
1031 0 : inline bool AppendFromSelf(size_t offset, size_t len) {
1032 0 : if (produced_ <= offset - 1u) return false; // -1u catches offset==0
1033 0 : produced_ += len;
1034 0 : return produced_ <= expected_;
1035 : }
1036 : };
1037 :
1038 0 : bool IsValidCompressedBuffer(const char* compressed, size_t n) {
1039 0 : ByteArraySource reader(compressed, n);
1040 0 : SnappyDecompressionValidator writer;
1041 0 : return InternalUncompress(&reader, &writer, kuint32max);
1042 : }
1043 :
1044 1837 : void RawCompress(const char* input,
1045 : size_t input_length,
1046 : char* compressed,
1047 : size_t* compressed_length) {
1048 3674 : ByteArraySource reader(input, input_length);
1049 3674 : UncheckedByteArraySink writer(compressed);
1050 1837 : Compress(&reader, &writer);
1051 :
1052 : // Compute how many bytes were added
1053 1837 : *compressed_length = (writer.CurrentDestination() - compressed);
1054 1837 : }
1055 :
1056 0 : size_t Compress(const char* input, size_t input_length, string* compressed) {
1057 : // Pre-grow the buffer to the max length of the compressed output
1058 0 : compressed->resize(MaxCompressedLength(input_length));
1059 :
1060 : size_t compressed_length;
1061 : RawCompress(input, input_length, string_as_array(compressed),
1062 0 : &compressed_length);
1063 0 : compressed->resize(compressed_length);
1064 0 : return compressed_length;
1065 : }
1066 :
1067 :
1068 4392 : } // end namespace snappy
1069 :
|