FreeBSD kernel kern code
inflate.c
Go to the documentation of this file.
1 /*
2  * Most parts of this file are not covered by:
3  * ----------------------------------------------------------------------------
4  * "THE BEER-WARE LICENSE" (Revision 42):
5  * <phk@FreeBSD.org> wrote this file. As long as you retain this notice you
6  * can do whatever you want with this stuff. If we meet some day, and you think
7  * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
8  * ----------------------------------------------------------------------------
9  */
10 
11 #include <sys/cdefs.h>
12 __FBSDID("$BSDSUniX$");
13 
14 #include <sys/param.h>
15 #include <sys/inflate.h>
16 #ifdef _KERNEL
17 #include <sys/systm.h>
18 #include <sys/kernel.h>
19 #endif
20 #include <sys/malloc.h>
21 
22 #ifdef _KERNEL
23 static MALLOC_DEFINE(M_GZIP, "gzip_trees", "Gzip trees");
24 #endif
25 
26 /* needed to make inflate() work */
27 #define uch u_char
28 #define ush u_short
29 #define ulg u_long
30 
31 /* Stuff to make inflate() work */
32 #ifdef _KERNEL
33 #define memzero(dest,len) bzero(dest,len)
34 #endif
35 #define NOMEMCPY
36 #ifdef _KERNEL
37 #define FPRINTF printf
38 #else
39 extern void putstr (char *);
40 #define FPRINTF putstr
41 #endif
42 
43 #define FLUSH(x,y) { \
44  int foo = (*x->gz_output)(x->gz_private,x->gz_slide,y); \
45  if (foo) \
46  return foo; \
47  }
48 
49 static const int qflag = 0;
50 
51 #ifndef _KERNEL /* want to use this file in kzip also */
52 extern unsigned char *kzipmalloc (int);
53 extern void kzipfree (void*);
54 #define malloc(x, y, z) kzipmalloc((x))
55 #define free(x, y) kzipfree((x))
56 #endif
57 
58 /*
59  * This came from unzip-5.12. I have changed it the flow to pass
60  * a structure pointer around, thus hopefully making it re-entrant.
61  * Poul-Henning
62  */
63 
64 /* inflate.c -- put in the public domain by Mark Adler
65  version c14o, 23 August 1994 */
66 
67 /* You can do whatever you like with this source file, though I would
68  prefer that if you modify it and redistribute it that you include
69  comments to that effect with your name and the date. Thank you.
70 
71  History:
72  vers date who what
73  ---- --------- -------------- ------------------------------------
74  a ~~ Feb 92 M. Adler used full (large, one-step) lookup table
75  b1 21 Mar 92 M. Adler first version with partial lookup tables
76  b2 21 Mar 92 M. Adler fixed bug in fixed-code blocks
77  b3 22 Mar 92 M. Adler sped up match copies, cleaned up some
78  b4 25 Mar 92 M. Adler added prototypes; removed window[] (now
79  is the responsibility of unzip.h--also
80  changed name to slide[]), so needs diffs
81  for unzip.c and unzip.h (this allows
82  compiling in the small model on MSDOS);
83  fixed cast of q in huft_build();
84  b5 26 Mar 92 M. Adler got rid of unintended macro recursion.
85  b6 27 Mar 92 M. Adler got rid of nextbyte() routine. fixed
86  bug in inflate_fixed().
87  c1 30 Mar 92 M. Adler removed lbits, dbits environment variables.
88  changed BMAX to 16 for explode. Removed
89  OUTB usage, and replaced it with flush()--
90  this was a 20% speed improvement! Added
91  an explode.c (to replace unimplod.c) that
92  uses the huft routines here. Removed
93  register union.
94  c2 4 Apr 92 M. Adler fixed bug for file sizes a multiple of 32k.
95  c3 10 Apr 92 M. Adler reduced memory of code tables made by
96  huft_build significantly (factor of two to
97  three).
98  c4 15 Apr 92 M. Adler added NOMEMCPY do kill use of memcpy().
99  worked around a Turbo C optimization bug.
100  c5 21 Apr 92 M. Adler added the GZ_WSIZE #define to allow reducing
101  the 32K window size for specialized
102  applications.
103  c6 31 May 92 M. Adler added some typecasts to eliminate warnings
104  c7 27 Jun 92 G. Roelofs added some more typecasts (444: MSC bug).
105  c8 5 Oct 92 J-l. Gailly added ifdef'd code to deal with PKZIP bug.
106  c9 9 Oct 92 M. Adler removed a memory error message (~line 416).
107  c10 17 Oct 92 G. Roelofs changed ULONG/UWORD/byte to ulg/ush/uch,
108  removed old inflate, renamed inflate_entry
109  to inflate, added Mark's fix to a comment.
110  c10.5 14 Dec 92 M. Adler fix up error messages for incomplete trees.
111  c11 2 Jan 93 M. Adler fixed bug in detection of incomplete
112  tables, and removed assumption that EOB is
113  the longest code (bad assumption).
114  c12 3 Jan 93 M. Adler make tables for fixed blocks only once.
115  c13 5 Jan 93 M. Adler allow all zero length codes (pkzip 2.04c
116  outputs one zero length code for an empty
117  distance tree).
118  c14 12 Mar 93 M. Adler made inflate.c standalone with the
119  introduction of inflate.h.
120  c14b 16 Jul 93 G. Roelofs added (unsigned) typecast to w at 470.
121  c14c 19 Jul 93 J. Bush changed v[N_MAX], l[288], ll[28x+3x] arrays
122  to static for Amiga.
123  c14d 13 Aug 93 J-l. Gailly de-complicatified Mark's c[*p++]++ thing.
124  c14e 8 Oct 93 G. Roelofs changed memset() to memzero().
125  c14f 22 Oct 93 G. Roelofs renamed quietflg to qflag; made Trace()
126  conditional; added inflate_free().
127  c14g 28 Oct 93 G. Roelofs changed l/(lx+1) macro to pointer (Cray bug)
128  c14h 7 Dec 93 C. Ghisler huft_build() optimizations.
129  c14i 9 Jan 94 A. Verheijen set fixed_t{d,l} to NULL after freeing;
130  G. Roelofs check NEXTBYTE macro for GZ_EOF.
131  c14j 23 Jan 94 G. Roelofs removed Ghisler "optimizations"; ifdef'd
132  GZ_EOF check.
133  c14k 27 Feb 94 G. Roelofs added some typecasts to avoid warnings.
134  c14l 9 Apr 94 G. Roelofs fixed split comments on preprocessor lines
135  to avoid bug in Encore compiler.
136  c14m 7 Jul 94 P. Kienitz modified to allow assembler version of
137  inflate_codes() (define ASM_INFLATECODES)
138  c14n 22 Jul 94 G. Roelofs changed fprintf to FPRINTF for DLL versions
139  c14o 23 Aug 94 C. Spieler added a newline to a debug statement;
140  G. Roelofs added another typecast to avoid MSC warning
141  */
142 
143 
144 /*
145  Inflate deflated (PKZIP's method 8 compressed) data. The compression
146  method searches for as much of the current string of bytes (up to a
147  length of 258) in the previous 32K bytes. If it doesn't find any
148  matches (of at least length 3), it codes the next byte. Otherwise, it
149  codes the length of the matched string and its distance backwards from
150  the current position. There is a single Huffman code that codes both
151  single bytes (called "literals") and match lengths. A second Huffman
152  code codes the distance information, which follows a length code. Each
153  length or distance code actually represents a base value and a number
154  of "extra" (sometimes zero) bits to get to add to the base value. At
155  the end of each deflated block is a special end-of-block (EOB) literal/
156  length code. The decoding process is basically: get a literal/length
157  code; if EOB then done; if a literal, emit the decoded byte; if a
158  length then get the distance and emit the referred-to bytes from the
159  sliding window of previously emitted data.
160 
161  There are (currently) three kinds of inflate blocks: stored, fixed, and
162  dynamic. The compressor outputs a chunk of data at a time and decides
163  which method to use on a chunk-by-chunk basis. A chunk might typically
164  be 32K to 64K, uncompressed. If the chunk is uncompressible, then the
165  "stored" method is used. In this case, the bytes are simply stored as
166  is, eight bits per byte, with none of the above coding. The bytes are
167  preceded by a count, since there is no longer an EOB code.
168 
169  If the data is compressible, then either the fixed or dynamic methods
170  are used. In the dynamic method, the compressed data is preceded by
171  an encoding of the literal/length and distance Huffman codes that are
172  to be used to decode this block. The representation is itself Huffman
173  coded, and so is preceded by a description of that code. These code
174  descriptions take up a little space, and so for small blocks, there is
175  a predefined set of codes, called the fixed codes. The fixed method is
176  used if the block ends up smaller that way (usually for quite small
177  chunks); otherwise the dynamic method is used. In the latter case, the
178  codes are customized to the probabilities in the current block and so
179  can code it much better than the pre-determined fixed codes can.
180 
181  The Huffman codes themselves are decoded using a mutli-level table
182  lookup, in order to maximize the speed of decoding plus the speed of
183  building the decoding tables. See the comments below that precede the
184  lbits and dbits tuning parameters.
185  */
186 
187 
188 /*
189  Notes beyond the 1.93a appnote.txt:
190 
191  1. Distance pointers never point before the beginning of the output
192  stream.
193  2. Distance pointers can point back across blocks, up to 32k away.
194  3. There is an implied maximum of 7 bits for the bit length table and
195  15 bits for the actual data.
196  4. If only one code exists, then it is encoded using one bit. (Zero
197  would be more efficient, but perhaps a little confusing.) If two
198  codes exist, they are coded using one bit each (0 and 1).
199  5. There is no way of sending zero distance codes--a dummy must be
200  sent if there are none. (History: a pre 2.0 version of PKZIP would
201  store blocks with no distance codes, but this was discovered to be
202  too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
203  zero distance codes, which is sent as one code of zero bits in
204  length.
205  6. There are up to 286 literal/length codes. Code 256 represents the
206  end-of-block. Note however that the static length tree defines
207  288 codes just to fill out the Huffman codes. Codes 286 and 287
208  cannot be used though, since there is no length base or extra bits
209  defined for them. Similarily, there are up to 30 distance codes.
210  However, static trees define 32 codes (all 5 bits) to fill out the
211  Huffman codes, but the last two had better not show up in the data.
212  7. Unzip can check dynamic Huffman blocks for complete code sets.
213  The exception is that a single code would not be complete (see #4).
214  8. The five bits following the block type is really the number of
215  literal codes sent minus 257.
216  9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
217  (1+6+6). Therefore, to output three times the length, you output
218  three codes (1+1+1), whereas to output four times the same length,
219  you only need two codes (1+3). Hmm.
220  10. In the tree reconstruction algorithm, Code = Code + Increment
221  only if BitLength(i) is not zero. (Pretty obvious.)
222  11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
223  12. Note: length code 284 can represent 227-258, but length code 285
224  really is 258. The last length deserves its own, short code
225  since it gets used a lot in very redundant files. The length
226  258 is special since 258 - 3 (the min match length) is 255.
227  13. The literal/length and distance code bit lengths are read as a
228  single stream of lengths. It is possible (and advantageous) for
229  a repeat code (16, 17, or 18) to go across the boundary between
230  the two sets of lengths.
231  */
232 
233 
234 #define PKZIP_BUG_WORKAROUND /* PKZIP 1.93a problem--live with it */
235 
236 /*
237  inflate.h must supply the uch slide[GZ_WSIZE] array and the NEXTBYTE,
238  FLUSH() and memzero macros. If the window size is not 32K, it
239  should also define GZ_WSIZE. If INFMOD is defined, it can include
240  compiled functions to support the NEXTBYTE and/or FLUSH() macros.
241  There are defaults for NEXTBYTE and FLUSH() below for use as
242  examples of what those functions need to do. Normally, you would
243  also want FLUSH() to compute a crc on the data. inflate.h also
244  needs to provide these typedefs:
245 
246  typedef unsigned char uch;
247  typedef unsigned short ush;
248  typedef unsigned long ulg;
249 
250  This module uses the external functions malloc() and free() (and
251  probably memset() or bzero() in the memzero() macro). Their
252  prototypes are normally found in <string.h> and <stdlib.h>.
253  */
254 #define INFMOD /* tell inflate.h to include code to be
255  * compiled */
256 
257 /* Huffman code lookup table entry--this entry is four bytes for machines
258  that have 16-bit pointers (e.g. PC's in the small or medium model).
259  Valid extra bits are 0..13. e == 15 is EOB (end of block), e == 16
260  means that v is a literal, 16 < e < 32 means that v is a pointer to
261  the next table, which codes e - 16 bits, and lastly e == 99 indicates
262  an unused code. If a code with e == 99 is looked up, this implies an
263  error in the data. */
264 struct huft {
265  uch e; /* number of extra bits or operation */
266  uch b; /* number of bits in this code or subcode */
267  union {
268  ush n; /* literal, length base, or distance
269  * base */
270  struct huft *t; /* pointer to next level of table */
271  } v;
272 };
273 
274 
275 /* Function prototypes */
276 static int huft_build(struct inflate *, unsigned *, unsigned, unsigned, const ush *, const ush *, struct huft **, int *);
277 static int huft_free(struct inflate *, struct huft *);
278 static int inflate_codes(struct inflate *, struct huft *, struct huft *, int, int);
279 static int inflate_stored(struct inflate *);
280 static int xinflate(struct inflate *);
281 static int inflate_fixed(struct inflate *);
282 static int inflate_dynamic(struct inflate *);
283 static int inflate_block(struct inflate *, int *);
284 
285 /* The inflate algorithm uses a sliding 32K byte window on the uncompressed
286  stream to find repeated byte strings. This is implemented here as a
287  circular buffer. The index is updated simply by incrementing and then
288  and'ing with 0x7fff (32K-1). */
289 /* It is left to other modules to supply the 32K area. It is assumed
290  to be usable as if it were declared "uch slide[32768];" or as just
291  "uch *slide;" and then malloc'ed in the latter case. The definition
292  must be in unzip.h, included above. */
293 
294 
295 /* Tables for deflate from PKZIP's appnote.txt. */
296 
297 /* Order of the bit length code lengths */
298 static const unsigned border[] = {
299  16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
300 
301 static const ush cplens[] = { /* Copy lengths for literal codes 257..285 */
302  3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
303  35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
304  /* note: see note #13 above about the 258 in this list. */
305 
306 static const ush cplext[] = { /* Extra bits for literal codes 257..285 */
307  0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
308  3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */
309 
310 static const ush cpdist[] = { /* Copy offsets for distance codes 0..29 */
311  1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
312  257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
313  8193, 12289, 16385, 24577};
314 
315 static const ush cpdext[] = { /* Extra bits for distance codes */
316  0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
317  7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
318  12, 12, 13, 13};
319 
320 /* And'ing with mask[n] masks the lower n bits */
321 static const ush mask[] = {
322  0x0000,
323  0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
324  0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
325 };
326 
327 
328 /* Macros for inflate() bit peeking and grabbing.
329  The usage is:
330 
331  NEEDBITS(glbl,j)
332  x = b & mask[j];
333  DUMPBITS(j)
334 
335  where NEEDBITS makes sure that b has at least j bits in it, and
336  DUMPBITS removes the bits from b. The macros use the variable k
337  for the number of bits in b. Normally, b and k are register
338  variables for speed, and are initialized at the begining of a
339  routine that uses these macros from a global bit buffer and count.
340 
341  In order to not ask for more bits than there are in the compressed
342  stream, the Huffman tables are constructed to only ask for just
343  enough bits to make up the end-of-block code (value 256). Then no
344  bytes need to be "returned" to the buffer at the end of the last
345  block. See the huft_build() routine.
346  */
347 
348 /*
349  * The following 2 were global variables.
350  * They are now fields of the inflate structure.
351  */
352 
353 #define NEEDBITS(glbl,n) { \
354  while(k<(n)) { \
355  int c=(*glbl->gz_input)(glbl->gz_private); \
356  if(c==GZ_EOF) \
357  return 1; \
358  b|=((ulg)c)<<k; \
359  k+=8; \
360  } \
361  }
362 
363 #define DUMPBITS(n) {b>>=(n);k-=(n);}
364 
365 /*
366  Huffman code decoding is performed using a multi-level table lookup.
367  The fastest way to decode is to simply build a lookup table whose
368  size is determined by the longest code. However, the time it takes
369  to build this table can also be a factor if the data being decoded
370  is not very long. The most common codes are necessarily the
371  shortest codes, so those codes dominate the decoding time, and hence
372  the speed. The idea is you can have a shorter table that decodes the
373  shorter, more probable codes, and then point to subsidiary tables for
374  the longer codes. The time it costs to decode the longer codes is
375  then traded against the time it takes to make longer tables.
376 
377  This results of this trade are in the variables lbits and dbits
378  below. lbits is the number of bits the first level table for literal/
379  length codes can decode in one step, and dbits is the same thing for
380  the distance codes. Subsequent tables are also less than or equal to
381  those sizes. These values may be adjusted either when all of the
382  codes are shorter than that, in which case the longest code length in
383  bits is used, or when the shortest code is *longer* than the requested
384  table size, in which case the length of the shortest code in bits is
385  used.
386 
387  There are two different values for the two tables, since they code a
388  different number of possibilities each. The literal/length table
389  codes 286 possible values, or in a flat code, a little over eight
390  bits. The distance table codes 30 possible values, or a little less
391  than five bits, flat. The optimum values for speed end up being
392  about one bit more than those, so lbits is 8+1 and dbits is 5+1.
393  The optimum values may differ though from machine to machine, and
394  possibly even between compilers. Your mileage may vary.
395  */
396 
397 static const int lbits = 9; /* bits in base literal/length lookup table */
398 static const int dbits = 6; /* bits in base distance lookup table */
399 
400 
401 /* If BMAX needs to be larger than 16, then h and x[] should be ulg. */
402 #define BMAX 16 /* maximum bit length of any code (16 for
403  * explode) */
404 #define N_MAX 288 /* maximum number of codes in any set */
405 
406 /* Given a list of code lengths and a maximum table size, make a set of
407  tables to decode that set of codes. Return zero on success, one if
408  the given code set is incomplete (the tables are still built in this
409  case), two if the input is invalid (all zero length codes or an
410  oversubscribed set of lengths), and three if not enough memory.
411  The code with value 256 is special, and the tables are constructed
412  so that no bits beyond that code are fetched when that code is
413  decoded. */
414 static int
415 huft_build(glbl, b, n, s, d, e, t, m)
416  struct inflate *glbl;
417  unsigned *b; /* code lengths in bits (all assumed <= BMAX) */
418  unsigned n; /* number of codes (assumed <= N_MAX) */
419  unsigned s; /* number of simple-valued codes (0..s-1) */
420  const ush *d; /* list of base values for non-simple codes */
421  const ush *e; /* list of extra bits for non-simple codes */
422  struct huft **t; /* result: starting table */
423  int *m; /* maximum lookup bits, returns actual */
424 {
425  unsigned a; /* counter for codes of length k */
426  unsigned c[BMAX + 1]; /* bit length count table */
427  unsigned el; /* length of EOB code (value 256) */
428  unsigned f; /* i repeats in table every f entries */
429  int g; /* maximum code length */
430  int h; /* table level */
431  register unsigned i; /* counter, current code */
432  register unsigned j; /* counter */
433  register int k; /* number of bits in current code */
434  int lx[BMAX + 1]; /* memory for l[-1..BMAX-1] */
435  int *l = lx + 1; /* stack of bits per table */
436  register unsigned *p; /* pointer into c[], b[], or v[] */
437  register struct huft *q;/* points to current table */
438  struct huft r; /* table entry for structure assignment */
439  struct huft *u[BMAX];/* table stack */
440  unsigned v[N_MAX]; /* values in order of bit length */
441  register int w; /* bits before this table == (l * h) */
442  unsigned x[BMAX + 1]; /* bit offsets, then code stack */
443  unsigned *xp; /* pointer into x */
444  int y; /* number of dummy codes added */
445  unsigned z; /* number of entries in current table */
446 
447  /* Generate counts for each bit length */
448  el = n > 256 ? b[256] : BMAX; /* set length of EOB code, if any */
449 #ifdef _KERNEL
450  memzero((char *) c, sizeof(c));
451 #else
452  for (i = 0; i < BMAX+1; i++)
453  c [i] = 0;
454 #endif
455  p = b;
456  i = n;
457  do {
458  c[*p]++;
459  p++; /* assume all entries <= BMAX */
460  } while (--i);
461  if (c[0] == n) { /* null input--all zero length codes */
462  *t = (struct huft *) NULL;
463  *m = 0;
464  return 0;
465  }
466  /* Find minimum and maximum length, bound *m by those */
467  for (j = 1; j <= BMAX; j++)
468  if (c[j])
469  break;
470  k = j; /* minimum code length */
471  if ((unsigned) *m < j)
472  *m = j;
473  for (i = BMAX; i; i--)
474  if (c[i])
475  break;
476  g = i; /* maximum code length */
477  if ((unsigned) *m > i)
478  *m = i;
479 
480  /* Adjust last length count to fill out codes, if needed */
481  for (y = 1 << j; j < i; j++, y <<= 1)
482  if ((y -= c[j]) < 0)
483  return 2; /* bad input: more codes than bits */
484  if ((y -= c[i]) < 0)
485  return 2;
486  c[i] += y;
487 
488  /* Generate starting offsets into the value table for each length */
489  x[1] = j = 0;
490  p = c + 1;
491  xp = x + 2;
492  while (--i) { /* note that i == g from above */
493  *xp++ = (j += *p++);
494  }
495 
496  /* Make a table of values in order of bit lengths */
497  p = b;
498  i = 0;
499  do {
500  if ((j = *p++) != 0)
501  v[x[j]++] = i;
502  } while (++i < n);
503 
504  /* Generate the Huffman codes and for each, make the table entries */
505  x[0] = i = 0; /* first Huffman code is zero */
506  p = v; /* grab values in bit order */
507  h = -1; /* no tables yet--level -1 */
508  w = l[-1] = 0; /* no bits decoded yet */
509  u[0] = (struct huft *) NULL; /* just to keep compilers happy */
510  q = (struct huft *) NULL; /* ditto */
511  z = 0; /* ditto */
512 
513  /* go through the bit lengths (k already is bits in shortest code) */
514  for (; k <= g; k++) {
515  a = c[k];
516  while (a--) {
517  /*
518  * here i is the Huffman code of length k bits for
519  * value *p
520  */
521  /* make tables up to required level */
522  while (k > w + l[h]) {
523  w += l[h++]; /* add bits already decoded */
524 
525  /*
526  * compute minimum size table less than or
527  * equal to *m bits
528  */
529  z = (z = g - w) > (unsigned) *m ? *m : z; /* upper limit */
530  if ((f = 1 << (j = k - w)) > a + 1) { /* try a k-w bit table *//* t
531  * oo few codes for k-w
532  * bit table */
533  f -= a + 1; /* deduct codes from
534  * patterns left */
535  xp = c + k;
536  while (++j < z) { /* try smaller tables up
537  * to z bits */
538  if ((f <<= 1) <= *++xp)
539  break; /* enough codes to use
540  * up j bits */
541  f -= *xp; /* else deduct codes
542  * from patterns */
543  }
544  }
545  if ((unsigned) w + j > el && (unsigned) w < el)
546  j = el - w; /* make EOB code end at
547  * table */
548  z = 1 << j; /* table entries for j-bit
549  * table */
550  l[h] = j; /* set table size in stack */
551 
552  /* allocate and link in new table */
553  if ((q = (struct huft *) malloc((z + 1) * sizeof(struct huft), M_GZIP, M_WAITOK)) ==
554  (struct huft *) NULL) {
555  if (h)
556  huft_free(glbl, u[0]);
557  return 3; /* not enough memory */
558  }
559  glbl->gz_hufts += z + 1; /* track memory usage */
560  *t = q + 1; /* link to list for
561  * huft_free() */
562  *(t = &(q->v.t)) = (struct huft *) NULL;
563  u[h] = ++q; /* table starts after link */
564 
565  /* connect to last table, if there is one */
566  if (h) {
567  x[h] = i; /* save pattern for
568  * backing up */
569  r.b = (uch) l[h - 1]; /* bits to dump before
570  * this table */
571  r.e = (uch) (16 + j); /* bits in this table */
572  r.v.t = q; /* pointer to this table */
573  j = (i & ((1 << w) - 1)) >> (w - l[h - 1]);
574  u[h - 1][j] = r; /* connect to last table */
575  }
576  }
577 
578  /* set up table entry in r */
579  r.b = (uch) (k - w);
580  if (p >= v + n)
581  r.e = 99; /* out of values--invalid
582  * code */
583  else if (*p < s) {
584  r.e = (uch) (*p < 256 ? 16 : 15); /* 256 is end-of-block
585  * code */
586  r.v.n = *p++; /* simple code is just the
587  * value */
588  } else {
589  r.e = (uch) e[*p - s]; /* non-simple--look up
590  * in lists */
591  r.v.n = d[*p++ - s];
592  }
593 
594  /* fill code-like entries with r */
595  f = 1 << (k - w);
596  for (j = i >> w; j < z; j += f)
597  q[j] = r;
598 
599  /* backwards increment the k-bit code i */
600  for (j = 1 << (k - 1); i & j; j >>= 1)
601  i ^= j;
602  i ^= j;
603 
604  /* backup over finished tables */
605  while ((i & ((1 << w) - 1)) != x[h])
606  w -= l[--h]; /* don't need to update q */
607  }
608  }
609 
610  /* return actual size of base table */
611  *m = l[0];
612 
613  /* Return true (1) if we were given an incomplete table */
614  return y != 0 && g != 1;
615 }
616 
617 static int
618 huft_free(glbl, t)
619  struct inflate *glbl;
620  struct huft *t; /* table to free */
621 /* Free the malloc'ed tables built by huft_build(), which makes a linked
622  list of the tables it made, with the links in a dummy first entry of
623  each table. */
624 {
625  register struct huft *p, *q;
626 
627  /* Go through linked list, freeing from the malloced (t[-1]) address. */
628  p = t;
629  while (p != (struct huft *) NULL) {
630  q = (--p)->v.t;
631  free(p, M_GZIP);
632  p = q;
633  }
634  return 0;
635 }
636 
637 /* inflate (decompress) the codes in a deflated (compressed) block.
638  Return an error code or zero if it all goes ok. */
639 static int
640 inflate_codes(glbl, tl, td, bl, bd)
641  struct inflate *glbl;
642  struct huft *tl, *td;/* literal/length and distance decoder tables */
643  int bl, bd; /* number of bits decoded by tl[] and td[] */
644 {
645  register unsigned e; /* table entry flag/number of extra bits */
646  unsigned n, d; /* length and index for copy */
647  unsigned w; /* current window position */
648  struct huft *t; /* pointer to table entry */
649  unsigned ml, md; /* masks for bl and bd bits */
650  register ulg b; /* bit buffer */
651  register unsigned k; /* number of bits in bit buffer */
652 
653  /* make local copies of globals */
654  b = glbl->gz_bb; /* initialize bit buffer */
655  k = glbl->gz_bk;
656  w = glbl->gz_wp; /* initialize window position */
657 
658  /* inflate the coded data */
659  ml = mask[bl]; /* precompute masks for speed */
660  md = mask[bd];
661  while (1) { /* do until end of block */
662  NEEDBITS(glbl, (unsigned) bl)
663  if ((e = (t = tl + ((unsigned) b & ml))->e) > 16)
664  do {
665  if (e == 99)
666  return 1;
667  DUMPBITS(t->b)
668  e -= 16;
669  NEEDBITS(glbl, e)
670  } while ((e = (t = t->v.t + ((unsigned) b & mask[e]))->e) > 16);
671  DUMPBITS(t->b)
672  if (e == 16) { /* then it's a literal */
673  glbl->gz_slide[w++] = (uch) t->v.n;
674  if (w == GZ_WSIZE) {
675  FLUSH(glbl, w);
676  w = 0;
677  }
678  } else { /* it's an EOB or a length */
679  /* exit if end of block */
680  if (e == 15)
681  break;
682 
683  /* get length of block to copy */
684  NEEDBITS(glbl, e)
685  n = t->v.n + ((unsigned) b & mask[e]);
686  DUMPBITS(e);
687 
688  /* decode distance of block to copy */
689  NEEDBITS(glbl, (unsigned) bd)
690  if ((e = (t = td + ((unsigned) b & md))->e) > 16)
691  do {
692  if (e == 99)
693  return 1;
694  DUMPBITS(t->b)
695  e -= 16;
696  NEEDBITS(glbl, e)
697  } while ((e = (t = t->v.t + ((unsigned) b & mask[e]))->e) > 16);
698  DUMPBITS(t->b)
699  NEEDBITS(glbl, e)
700  d = w - t->v.n - ((unsigned) b & mask[e]);
701  DUMPBITS(e)
702  /* do the copy */
703  do {
704  n -= (e = (e = GZ_WSIZE - ((d &= GZ_WSIZE - 1) > w ? d : w)) > n ? n : e);
705 #ifndef NOMEMCPY
706  if (w - d >= e) { /* (this test assumes
707  * unsigned comparison) */
708  memcpy(glbl->gz_slide + w, glbl->gz_slide + d, e);
709  w += e;
710  d += e;
711  } else /* do it slow to avoid memcpy()
712  * overlap */
713 #endif /* !NOMEMCPY */
714  do {
715  glbl->gz_slide[w++] = glbl->gz_slide[d++];
716  } while (--e);
717  if (w == GZ_WSIZE) {
718  FLUSH(glbl, w);
719  w = 0;
720  }
721  } while (n);
722  }
723  }
724 
725  /* restore the globals from the locals */
726  glbl->gz_wp = w; /* restore global window pointer */
727  glbl->gz_bb = b; /* restore global bit buffer */
728  glbl->gz_bk = k;
729 
730  /* done */
731  return 0;
732 }
733 
734 /* "decompress" an inflated type 0 (stored) block. */
735 static int
737  struct inflate *glbl;
738 {
739  unsigned n; /* number of bytes in block */
740  unsigned w; /* current window position */
741  register ulg b; /* bit buffer */
742  register unsigned k; /* number of bits in bit buffer */
743 
744  /* make local copies of globals */
745  b = glbl->gz_bb; /* initialize bit buffer */
746  k = glbl->gz_bk;
747  w = glbl->gz_wp; /* initialize window position */
748 
749  /* go to byte boundary */
750  n = k & 7;
751  DUMPBITS(n);
752 
753  /* get the length and its complement */
754  NEEDBITS(glbl, 16)
755  n = ((unsigned) b & 0xffff);
756  DUMPBITS(16)
757  NEEDBITS(glbl, 16)
758  if (n != (unsigned) ((~b) & 0xffff))
759  return 1; /* error in compressed data */
760  DUMPBITS(16)
761  /* read and output the compressed data */
762  while (n--) {
763  NEEDBITS(glbl, 8)
764  glbl->gz_slide[w++] = (uch) b;
765  if (w == GZ_WSIZE) {
766  FLUSH(glbl, w);
767  w = 0;
768  }
769  DUMPBITS(8)
770  }
771 
772  /* restore the globals from the locals */
773  glbl->gz_wp = w; /* restore global window pointer */
774  glbl->gz_bb = b; /* restore global bit buffer */
775  glbl->gz_bk = k;
776  return 0;
777 }
778 
779 /* decompress an inflated type 1 (fixed Huffman codes) block. We should
780  either replace this with a custom decoder, or at least precompute the
781  Huffman tables. */
782 static int
784  struct inflate *glbl;
785 {
786  /* if first time, set up tables for fixed blocks */
787  if (glbl->gz_fixed_tl == (struct huft *) NULL) {
788  int i; /* temporary variable */
789  static unsigned l[288]; /* length list for huft_build */
790 
791  /* literal table */
792  for (i = 0; i < 144; i++)
793  l[i] = 8;
794  for (; i < 256; i++)
795  l[i] = 9;
796  for (; i < 280; i++)
797  l[i] = 7;
798  for (; i < 288; i++) /* make a complete, but wrong code
799  * set */
800  l[i] = 8;
801  glbl->gz_fixed_bl = 7;
802  if ((i = huft_build(glbl, l, 288, 257, cplens, cplext,
803  &glbl->gz_fixed_tl, &glbl->gz_fixed_bl)) != 0) {
804  glbl->gz_fixed_tl = (struct huft *) NULL;
805  return i;
806  }
807  /* distance table */
808  for (i = 0; i < 30; i++) /* make an incomplete code
809  * set */
810  l[i] = 5;
811  glbl->gz_fixed_bd = 5;
812  if ((i = huft_build(glbl, l, 30, 0, cpdist, cpdext,
813  &glbl->gz_fixed_td, &glbl->gz_fixed_bd)) > 1) {
814  huft_free(glbl, glbl->gz_fixed_tl);
815  glbl->gz_fixed_tl = (struct huft *) NULL;
816  return i;
817  }
818  }
819  /* decompress until an end-of-block code */
820  return inflate_codes(glbl, glbl->gz_fixed_tl, glbl->gz_fixed_td, glbl->gz_fixed_bl, glbl->gz_fixed_bd) != 0;
821 }
822 
823 /* decompress an inflated type 2 (dynamic Huffman codes) block. */
824 static int
826  struct inflate *glbl;
827 {
828  int i; /* temporary variables */
829  unsigned j;
830  unsigned l; /* last length */
831  unsigned m; /* mask for bit lengths table */
832  unsigned n; /* number of lengths to get */
833  struct huft *tl; /* literal/length code table */
834  struct huft *td; /* distance code table */
835  int bl; /* lookup bits for tl */
836  int bd; /* lookup bits for td */
837  unsigned nb; /* number of bit length codes */
838  unsigned nl; /* number of literal/length codes */
839  unsigned nd; /* number of distance codes */
840 #ifdef PKZIP_BUG_WORKAROUND
841  unsigned ll[288 + 32]; /* literal/length and distance code
842  * lengths */
843 #else
844  unsigned ll[286 + 30]; /* literal/length and distance code
845  * lengths */
846 #endif
847  register ulg b; /* bit buffer */
848  register unsigned k; /* number of bits in bit buffer */
849 
850  /* make local bit buffer */
851  b = glbl->gz_bb;
852  k = glbl->gz_bk;
853 
854  /* read in table lengths */
855  NEEDBITS(glbl, 5)
856  nl = 257 + ((unsigned) b & 0x1f); /* number of
857  * literal/length codes */
858  DUMPBITS(5)
859  NEEDBITS(glbl, 5)
860  nd = 1 + ((unsigned) b & 0x1f); /* number of distance codes */
861  DUMPBITS(5)
862  NEEDBITS(glbl, 4)
863  nb = 4 + ((unsigned) b & 0xf); /* number of bit length codes */
864  DUMPBITS(4)
865 #ifdef PKZIP_BUG_WORKAROUND
866  if (nl > 288 || nd > 32)
867 #else
868  if (nl > 286 || nd > 30)
869 #endif
870  return 1; /* bad lengths */
871  /* read in bit-length-code lengths */
872  for (j = 0; j < nb; j++) {
873  NEEDBITS(glbl, 3)
874  ll[border[j]] = (unsigned) b & 7;
875  DUMPBITS(3)
876  }
877  for (; j < 19; j++)
878  ll[border[j]] = 0;
879 
880  /* build decoding table for trees--single level, 7 bit lookup */
881  bl = 7;
882  if ((i = huft_build(glbl, ll, 19, 19, NULL, NULL, &tl, &bl)) != 0) {
883  if (i == 1)
884  huft_free(glbl, tl);
885  return i; /* incomplete code set */
886  }
887  /* read in literal and distance code lengths */
888  n = nl + nd;
889  m = mask[bl];
890  i = l = 0;
891  while ((unsigned) i < n) {
892  NEEDBITS(glbl, (unsigned) bl)
893  j = (td = tl + ((unsigned) b & m))->b;
894  DUMPBITS(j)
895  j = td->v.n;
896  if (j < 16) /* length of code in bits (0..15) */
897  ll[i++] = l = j; /* save last length in l */
898  else if (j == 16) { /* repeat last length 3 to 6 times */
899  NEEDBITS(glbl, 2)
900  j = 3 + ((unsigned) b & 3);
901  DUMPBITS(2)
902  if ((unsigned) i + j > n)
903  return 1;
904  while (j--)
905  ll[i++] = l;
906  } else if (j == 17) { /* 3 to 10 zero length codes */
907  NEEDBITS(glbl, 3)
908  j = 3 + ((unsigned) b & 7);
909  DUMPBITS(3)
910  if ((unsigned) i + j > n)
911  return 1;
912  while (j--)
913  ll[i++] = 0;
914  l = 0;
915  } else { /* j == 18: 11 to 138 zero length codes */
916  NEEDBITS(glbl, 7)
917  j = 11 + ((unsigned) b & 0x7f);
918  DUMPBITS(7)
919  if ((unsigned) i + j > n)
920  return 1;
921  while (j--)
922  ll[i++] = 0;
923  l = 0;
924  }
925  }
926 
927  /* free decoding table for trees */
928  huft_free(glbl, tl);
929 
930  /* restore the global bit buffer */
931  glbl->gz_bb = b;
932  glbl->gz_bk = k;
933 
934  /* build the decoding tables for literal/length and distance codes */
935  bl = lbits;
936  i = huft_build(glbl, ll, nl, 257, cplens, cplext, &tl, &bl);
937  if (i != 0) {
938  if (i == 1 && !qflag) {
939  FPRINTF("(incomplete l-tree) ");
940  huft_free(glbl, tl);
941  }
942  return i; /* incomplete code set */
943  }
944  bd = dbits;
945  i = huft_build(glbl, ll + nl, nd, 0, cpdist, cpdext, &td, &bd);
946  if (i != 0) {
947  if (i == 1 && !qflag) {
948  FPRINTF("(incomplete d-tree) ");
949 #ifdef PKZIP_BUG_WORKAROUND
950  i = 0;
951  }
952 #else
953  huft_free(glbl, td);
954  }
955  huft_free(glbl, tl);
956  return i; /* incomplete code set */
957 #endif
958  }
959  /* decompress until an end-of-block code */
960  if (inflate_codes(glbl, tl, td, bl, bd))
961  return 1;
962 
963  /* free the decoding tables, return */
964  huft_free(glbl, tl);
965  huft_free(glbl, td);
966  return 0;
967 }
968 
969 /* decompress an inflated block */
970 static int
972  struct inflate *glbl;
973  int *e; /* last block flag */
974 {
975  unsigned t; /* block type */
976  register ulg b; /* bit buffer */
977  register unsigned k; /* number of bits in bit buffer */
978 
979  /* make local bit buffer */
980  b = glbl->gz_bb;
981  k = glbl->gz_bk;
982 
983  /* read in last block bit */
984  NEEDBITS(glbl, 1)
985  * e = (int) b & 1;
986  DUMPBITS(1)
987  /* read in block type */
988  NEEDBITS(glbl, 2)
989  t = (unsigned) b & 3;
990  DUMPBITS(2)
991  /* restore the global bit buffer */
992  glbl->gz_bb = b;
993  glbl->gz_bk = k;
994 
995  /* inflate that block type */
996  if (t == 2)
997  return inflate_dynamic(glbl);
998  if (t == 0)
999  return inflate_stored(glbl);
1000  if (t == 1)
1001  return inflate_fixed(glbl);
1002  /* bad block type */
1003  return 2;
1004 }
1005 
1006 
1007 
1008 /* decompress an inflated entry */
1009 static int
1011  struct inflate *glbl;
1012 {
1013  int e; /* last block flag */
1014  int r; /* result code */
1015  unsigned h; /* maximum struct huft's malloc'ed */
1016 
1017  glbl->gz_fixed_tl = (struct huft *) NULL;
1018 
1019  /* initialize window, bit buffer */
1020  glbl->gz_wp = 0;
1021  glbl->gz_bk = 0;
1022  glbl->gz_bb = 0;
1023 
1024  /* decompress until the last block */
1025  h = 0;
1026  do {
1027  glbl->gz_hufts = 0;
1028  if ((r = inflate_block(glbl, &e)) != 0)
1029  return r;
1030  if (glbl->gz_hufts > h)
1031  h = glbl->gz_hufts;
1032  } while (!e);
1033 
1034  /* flush out slide */
1035  FLUSH(glbl, glbl->gz_wp);
1036 
1037  /* return success */
1038  return 0;
1039 }
1040 
1041 /* Nobody uses this - why not? */
1042 int
1043 inflate(glbl)
1044  struct inflate *glbl;
1045 {
1046  int i;
1047 #ifdef _KERNEL
1048  u_char *p = NULL;
1049 
1050  if (!glbl->gz_slide)
1051  p = glbl->gz_slide = malloc(GZ_WSIZE, M_GZIP, M_WAITOK);
1052 #endif
1053  if (!glbl->gz_slide)
1054 #ifdef _KERNEL
1055  return(ENOMEM);
1056 #else
1057  return 3; /* kzip expects 3 */
1058 #endif
1059  i = xinflate(glbl);
1060 
1061  if (glbl->gz_fixed_td != (struct huft *) NULL) {
1062  huft_free(glbl, glbl->gz_fixed_td);
1063  glbl->gz_fixed_td = (struct huft *) NULL;
1064  }
1065  if (glbl->gz_fixed_tl != (struct huft *) NULL) {
1066  huft_free(glbl, glbl->gz_fixed_tl);
1067  glbl->gz_fixed_tl = (struct huft *) NULL;
1068  }
1069 #ifdef _KERNEL
1070  if (p == glbl->gz_slide) {
1071  free(glbl->gz_slide, M_GZIP);
1072  glbl->gz_slide = NULL;
1073  }
1074 #endif
1075  return i;
1076 }
1077 /* ----------------------- END INFLATE.C */
Definition: inflate.c:264
static int inflate_block(struct inflate *, int *)
#define BMAX
Definition: inflate.c:402
#define DUMPBITS(n)
Definition: inflate.c:363
static const int lbits
Definition: inflate.c:397
uch e
Definition: inflate.c:265
static const unsigned border[]
Definition: inflate.c:298
void * malloc(unsigned long size, struct malloc_type *mtp, int flags)
Definition: kern_malloc.c:454
ush n
Definition: inflate.c:268
#define NEEDBITS(glbl, n)
Definition: inflate.c:353
#define N_MAX
Definition: inflate.c:404
static int inflate_codes(struct inflate *, struct huft *, struct huft *, int, int)
int inflate(struct inflate *glbl)
Definition: inflate.c:1043
static const int dbits
Definition: inflate.c:398
static const ush mask[]
Definition: inflate.c:321
static int huft_build(struct inflate *, unsigned *, unsigned, unsigned, const ush *, const ush *, struct huft **, int *)
static int inflate_stored(struct inflate *)
Definition: inflate.c:736
static MALLOC_DEFINE(M_GZIP,"gzip_trees","Gzip trees")
#define ush
Definition: inflate.c:28
static const ush cpdext[]
Definition: inflate.c:315
uch b
Definition: inflate.c:266
union huft::@0 v
static int xinflate(struct inflate *)
Definition: inflate.c:1010
static const int qflag
Definition: inflate.c:49
#define FPRINTF
Definition: inflate.c:37
static const ush cplens[]
Definition: inflate.c:301
__FBSDID("$BSDSUniX$")
static int huft_free(struct inflate *, struct huft *)
void free(void *addr, struct malloc_type *mtp)
Definition: kern_malloc.c:554
#define uch
Definition: inflate.c:27
static int inflate_fixed(struct inflate *)
Definition: inflate.c:783
static const ush cpdist[]
Definition: inflate.c:310
struct huft * t
Definition: inflate.c:270
static const ush cplext[]
Definition: inflate.c:306
#define memzero(dest, len)
Definition: inflate.c:33
#define FLUSH(x, y)
Definition: inflate.c:43
static int inflate_dynamic(struct inflate *)
Definition: inflate.c:825
#define ulg
Definition: inflate.c:29