ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / char / ftape / compressor / zftape-compress.c
1 /*
2  *      Copyright (C) 1994-1997 Claus-Justus Heine
3
4  This program is free software; you can redistribute it and/or
5  modify it under the terms of the GNU General Public License as
6  published by the Free Software Foundation; either version 2, or (at
7  your option) any later version.
8  
9  This program is distributed in the hope that it will be useful, but
10  WITHOUT ANY WARRANTY; without even the implied warranty of
11  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  General Public License for more details.
13  
14  You should have received a copy of the GNU General Public License
15  along with this program; see the file COPYING.  If not, write to
16  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17  USA.
18  
19  *
20  *     This file implements a "generic" interface between the *
21  *     zftape-driver and a compression-algorithm. The *
22  *     compression-algorithm currently used is a LZ77. I use the *
23  *     implementation lzrw3 by Ross N. Williams (Renaissance *
24  *     Software). The compression program itself is in the file
25  *     lzrw3.c * and lzrw3.h.  To adopt another compression algorithm
26  *     the functions * zft_compress() and zft_uncompress() must be
27  *     changed * appropriately. See below.
28  */
29
30  char zftc_src[] ="$Source: /homes/cvs/ftape-stacked/ftape/compressor/zftape-compress.c,v $";
31  char zftc_rev[] = "$Revision: 1.1.6.1 $";
32  char zftc_dat[] = "$Date: 1997/11/16 15:15:56 $";
33
34 #include <linux/version.h>
35 #include <linux/errno.h>
36 #include <linux/mm.h>
37 #include <linux/module.h>
38
39 #include <linux/zftape.h>
40
41 #include <asm/uaccess.h>
42
43 #include "../zftape/zftape-init.h"
44 #include "../zftape/zftape-eof.h"
45 #include "../zftape/zftape-ctl.h"
46 #include "../zftape/zftape-write.h"
47 #include "../zftape/zftape-read.h"
48 #include "../zftape/zftape-rw.h"
49 #include "../compressor/zftape-compress.h"
50 #include "../zftape/zftape-vtbl.h"
51 #include "../compressor/lzrw3.h"
52
53 /*
54  *   global variables
55  */
56
57 /* I handle the allocation of this buffer as a special case, because
58  * it's size varies depending on the tape length inserted.
59  */
60
61 /* local variables 
62  */
63 static void *zftc_wrk_mem = NULL;
64 static __u8 *zftc_buf     = NULL;
65 static void *zftc_scratch_buf  = NULL;
66
67 /* compression statistics 
68  */
69 static unsigned int zftc_wr_uncompressed = 0;
70 static unsigned int zftc_wr_compressed   = 0;
71 static unsigned int zftc_rd_uncompressed = 0;
72 static unsigned int zftc_rd_compressed   = 0;
73
74 /* forward */
75 static int  zftc_write(int *write_cnt,
76                        __u8 *dst_buf, const int seg_sz,
77                        const __u8 *src_buf, const int req_len,
78                        const zft_position *pos, const zft_volinfo *volume);
79 static int  zftc_read(int *read_cnt,
80                       __u8  *dst_buf, const int to_do,
81                       const __u8 *src_buf, const int seg_sz,
82                       const zft_position *pos, const zft_volinfo *volume);
83 static int  zftc_seek(unsigned int new_block_pos, 
84                       zft_position *pos, const zft_volinfo *volume,
85                       __u8 *buffer);
86 static void zftc_lock   (void);
87 static void zftc_reset  (void);
88 static void zftc_cleanup(void);
89 static void zftc_stats      (void);
90
91 /* compressed segment. This conforms to QIC-80-MC, Revision K.
92  * 
93  * Rev. K applies to tapes with `fixed length format' which is
94  * indicated by format code 2,3 and 5. See below for format code 4 and 6
95  *
96  * 2 bytes: offset of compression segment structure
97  *          29k > offset >= 29k-18: data from previous segment ens in this
98  *                                  segment and no compressed block starts
99  *                                  in this segment
100  *                     offset == 0: data from previous segment occupies entire
101  *                                  segment and continues in next segment
102  * n bytes: remainder from previous segment
103  * 
104  * Rev. K:  
105  * 4 bytes: 4 bytes: files set byte offset
106  * Post Rev. K and QIC-3020/3020:
107  * 8 bytes: 8 bytes: files set byte offset
108  * 2 bytes: byte count N (amount of data following)
109  *          bit 15 is set if data is compressed, bit 15 is not
110  *          set if data is uncompressed
111  * N bytes: data (as much as specified in the byte count)
112  * 2 bytes: byte count N_1 of next cluster
113  * N_1 bytes: data of next cluset
114  * 2 bytes: byte count N_2 of next cluster
115  * N_2 bytes: ...  
116  *
117  * Note that the `N' byte count accounts only for the bytes that in the
118  * current segment if the cluster spans to the next segment.
119  */
120
121 typedef struct
122 {
123         int cmpr_pos;             /* actual position in compression buffer */
124         int cmpr_sz;              /* what is left in the compression buffer
125                                    * when copying the compressed data to the
126                                    * deblock buffer
127                                    */
128         unsigned int first_block; /* location of header information in
129                                    * this segment
130                                    */
131         unsigned int count;       /* amount of data of current block
132                                    * contained in current segment 
133                                    */
134         unsigned int offset;      /* offset in current segment */
135         unsigned int spans:1;     /* might continue in next segment */
136         unsigned int uncmpr;      /* 0x8000 if this block contains
137                                    * uncompressed data 
138                                    */
139         __s64 foffs;              /* file set byte offset, same as in 
140                                    * compression map segment
141                                    */
142 } cmpr_info;
143
144 static cmpr_info cseg; /* static data. Must be kept uptodate and shared by 
145                         * read, write and seek functions
146                         */
147
148 #define DUMP_CMPR_INFO(level, msg, info)                                \
149         TRACE(level, msg "\n"                                           \
150               KERN_INFO "cmpr_pos   : %d\n"                             \
151               KERN_INFO "cmpr_sz    : %d\n"                             \
152               KERN_INFO "first_block: %d\n"                             \
153               KERN_INFO "count      : %d\n"                             \
154               KERN_INFO "offset     : %d\n"                             \
155               KERN_INFO "spans      : %d\n"                             \
156               KERN_INFO "uncmpr     : 0x%04x\n"                         \
157               KERN_INFO "foffs      : " LL_X,                           \
158               (info)->cmpr_pos, (info)->cmpr_sz, (info)->first_block,   \
159               (info)->count, (info)->offset, (info)->spans == 1,        \
160               (info)->uncmpr, LL((info)->foffs))
161
162 /*   dispatch compression segment info, return error code
163  *  
164  *   afterwards, cseg->offset points to start of data of the NEXT
165  *   compressed block, and cseg->count contains the amount of data
166  *   left in the actual compressed block. cseg->spans is set to 1 if
167  *   the block is continued in the following segment. Otherwise it is
168  *   set to 0. 
169  */
170 static int get_cseg (cmpr_info *cinfo, const __u8 *buff, 
171                      const unsigned int seg_sz,
172                      const zft_volinfo *volume)
173 {
174         TRACE_FUN(ft_t_flow);
175
176         cinfo->first_block = GET2(buff, 0);
177         if (cinfo->first_block == 0) { /* data spans to next segment */
178                 cinfo->count  = seg_sz - sizeof(__u16);
179                 cinfo->offset = seg_sz;
180                 cinfo->spans = 1;
181         } else { /* cluster definetely ends in this segment */
182                 if (cinfo->first_block > seg_sz) {
183                         /* data corrupted */
184                         TRACE_ABORT(-EIO, ft_t_err, "corrupted data:\n"
185                                     KERN_INFO "segment size: %d\n"
186                                     KERN_INFO "first block : %d",
187                                     seg_sz, cinfo->first_block);
188                 }
189                 cinfo->count  = cinfo->first_block - sizeof(__u16);
190                 cinfo->offset = cinfo->first_block;
191                 cinfo->spans = 0;
192         }
193         /* now get the offset the first block should have in the
194          * uncompressed data stream.
195          *
196          * For this magic `18' refer to CRF-3 standard or QIC-80MC,
197          * Rev. K.  
198          */
199         if ((seg_sz - cinfo->offset) > 18) {
200                 if (volume->qic113) { /* > revision K */
201                         TRACE(ft_t_data_flow, "New QIC-113 compliance");
202                         cinfo->foffs = GET8(buff, cinfo->offset);
203                         cinfo->offset += sizeof(__s64); 
204                 } else {
205                         TRACE(/* ft_t_data_flow */ ft_t_noise, "pre QIC-113 version");
206                         cinfo->foffs   = (__s64)GET4(buff, cinfo->offset);
207                         cinfo->offset += sizeof(__u32); 
208                 }
209         }
210         if (cinfo->foffs > volume->size) {
211                 TRACE_ABORT(-EIO, ft_t_err, "Inconsistency:\n"
212                             KERN_INFO "offset in current volume: %d\n"
213                             KERN_INFO "size of current volume  : %d",
214                             (int)(cinfo->foffs>>10), (int)(volume->size>>10));
215         }
216         if (cinfo->cmpr_pos + cinfo->count > volume->blk_sz) {
217                 TRACE_ABORT(-EIO, ft_t_err, "Inconsistency:\n"
218                             KERN_INFO "block size : %d\n"
219                             KERN_INFO "data record: %d",
220                             volume->blk_sz, cinfo->cmpr_pos + cinfo->count);
221         }
222         DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */, "", cinfo);
223         TRACE_EXIT 0;
224 }
225
226 /*  This one is called, when a new cluster starts in same segment.
227  *  
228  *  Note: if this is the first cluster in the current segment, we must
229  *  not check whether there are more than 18 bytes available because
230  *  this have already been done in get_cseg() and there may be less
231  *  than 18 bytes available due to header information.
232  * 
233  */
234 static void get_next_cluster(cmpr_info *cluster, const __u8 *buff, 
235                              const int seg_sz, const int finish)
236 {
237         TRACE_FUN(ft_t_flow);
238
239         if (seg_sz - cluster->offset > 18 || cluster->foffs != 0) {
240                 cluster->count   = GET2(buff, cluster->offset);
241                 cluster->uncmpr  = cluster->count & 0x8000;
242                 cluster->count  -= cluster->uncmpr;
243                 cluster->offset += sizeof(__u16);
244                 cluster->foffs   = 0;
245                 if ((cluster->offset + cluster->count) < seg_sz) {
246                         cluster->spans = 0;
247                 } else if (cluster->offset + cluster->count == seg_sz) {
248                         cluster->spans = !finish;
249                 } else {
250                         /* either an error or a volume written by an 
251                          * old version. If this is a data error, then we'll
252                          * catch it later.
253                          */
254                         TRACE(ft_t_data_flow, "Either error or old volume");
255                         cluster->spans = 1;
256                         cluster->count = seg_sz - cluster->offset;
257                 }
258         } else {
259                 cluster->count = 0;
260                 cluster->spans = 0;
261                 cluster->foffs = 0;
262         }
263         DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */ , "", cluster);
264         TRACE_EXIT;
265 }
266
267 static void zftc_lock(void)
268 {
269 }
270
271 /*  this function is needed for zftape_reset_position in zftape-io.c 
272  */
273 static void zftc_reset(void)
274 {
275         TRACE_FUN(ft_t_flow);
276
277         memset((void *)&cseg, '\0', sizeof(cseg));
278         zftc_stats();
279         TRACE_EXIT;
280 }
281
282 static int cmpr_mem_initialized = 0;
283 static unsigned int alloc_blksz = 0;
284
285 static int zft_allocate_cmpr_mem(unsigned int blksz)
286 {
287         TRACE_FUN(ft_t_flow);
288
289         if (cmpr_mem_initialized && blksz == alloc_blksz) {
290                 TRACE_EXIT 0;
291         }
292         TRACE_CATCH(zft_vmalloc_once(&zftc_wrk_mem, CMPR_WRK_MEM_SIZE),
293                     zftc_cleanup());
294         TRACE_CATCH(zft_vmalloc_always(&zftc_buf, blksz + CMPR_OVERRUN),
295                     zftc_cleanup());
296         alloc_blksz = blksz;
297         TRACE_CATCH(zft_vmalloc_always(&zftc_scratch_buf, blksz+CMPR_OVERRUN),
298                     zftc_cleanup());
299         cmpr_mem_initialized = 1;
300         TRACE_EXIT 0;
301 }
302
303 static void zftc_cleanup(void)
304 {
305         TRACE_FUN(ft_t_flow);
306
307         zft_vfree(&zftc_wrk_mem, CMPR_WRK_MEM_SIZE);
308         zft_vfree(&zftc_buf, alloc_blksz + CMPR_OVERRUN);
309         zft_vfree(&zftc_scratch_buf, alloc_blksz + CMPR_OVERRUN);
310         cmpr_mem_initialized = alloc_blksz = 0;
311         TRACE_EXIT;
312 }
313
314 /*****************************************************************************
315  *                                                                           *
316  *  The following two functions "ftape_compress()" and                       *
317  *  "ftape_uncompress()" are the interface to the actual compression         *
318  *  algorithm (i.e. they are calling the "compress()" function from          *
319  *  the lzrw3 package for now). These routines could quite easily be         *
320  *  changed to adopt another compression algorithm instead of lzrw3,         *
321  *  which currently is used.                                                 *
322  *                                                                           *
323  *****************************************************************************/
324
325 /* called by zft_compress_write() to perform the compression. Must
326  * return the size of the compressed data.
327  *
328  * NOTE: The size of the compressed data should not exceed the size of
329  *       the uncompressed data. Most compression algorithms have means
330  *       to store data unchanged if the "compressed" data amount would
331  *       exceed the original one. Mostly this is done by storing some
332  *       flag-bytes in front of the compressed data to indicate if it
333  *       is compressed or not. Thus the worst compression result
334  *       length is the original length plus those flag-bytes.
335  *
336  *       We don't want that, as the QIC-80 standard provides a means
337  *       of marking uncompressed blocks by simply setting bit 15 of
338  *       the compressed block's length. Thus a compessed block can
339  *       have at most a length of 2^15-1 bytes. The QIC-80 standard
340  *       restricts the block-length even further, allowing only 29k -
341  *       6 bytes.
342  *
343  *       Currently, the maximum blocksize used by zftape is 28k.
344  *
345  *       In short: don't exceed the length of the input-package, set
346  *       bit 15 of the compressed size to 1 if you have copied data
347  *       instead of compressing it.
348  */
349 static int zft_compress(__u8 *in_buffer, unsigned int in_sz, __u8 *out_buffer)
350
351         __s32 compressed_sz;
352         TRACE_FUN(ft_t_flow);
353         
354
355         lzrw3_compress(COMPRESS_ACTION_COMPRESS, zftc_wrk_mem,
356                        in_buffer, in_sz, out_buffer, &compressed_sz);
357         if (TRACE_LEVEL >= ft_t_info) {
358                 /*  the compiler will optimize this away when
359                  *  compiled with NO_TRACE_AT_ALL option
360                  */
361                 TRACE(ft_t_data_flow, "\n"
362                       KERN_INFO "before compression: %d bytes\n"
363                       KERN_INFO "after compresison : %d bytes", 
364                       in_sz, 
365                       (int)(compressed_sz < 0 
366                       ? -compressed_sz : compressed_sz));
367                 /*  for statistical purposes
368                  */
369                 zftc_wr_compressed   += (compressed_sz < 0 
370                                            ? -compressed_sz : compressed_sz);
371                 zftc_wr_uncompressed += in_sz;
372         }
373         TRACE_EXIT (int)compressed_sz;
374 }
375
376 /* called by zft_compress_read() to decompress the data. Must
377  * return the size of the decompressed data for sanity checks
378  * (compared with zft_blk_sz)
379  *
380  * NOTE: Read the note for zft_compress() above!  If bit 15 of the
381  *       parameter in_sz is set, then the data in in_buffer isn't
382  *       compressed, which must be handled by the un-compression
383  *       algorithm. (I changed lzrw3 to handle this.)
384  *
385  *  The parameter max_out_sz is needed to prevent buffer overruns when 
386  *  uncompressing corrupt data.
387  */
388 static unsigned int zft_uncompress(__u8 *in_buffer, 
389                                    int in_sz, 
390                                    __u8 *out_buffer,
391                                    unsigned int max_out_sz)
392
393         TRACE_FUN(ft_t_flow);
394         
395         lzrw3_compress(COMPRESS_ACTION_DECOMPRESS, zftc_wrk_mem,
396                        in_buffer, (__s32)in_sz,
397                        out_buffer, (__u32 *)&max_out_sz);
398         
399         if (TRACE_LEVEL >= ft_t_info) {
400                 TRACE(ft_t_data_flow, "\n"
401                       KERN_INFO "before decompression: %d bytes\n"
402                       KERN_INFO "after decompression : %d bytes", 
403                       in_sz < 0 ? -in_sz : in_sz,(int)max_out_sz);
404                 /*  for statistical purposes
405                  */
406                 zftc_rd_compressed   += in_sz < 0 ? -in_sz : in_sz;
407                 zftc_rd_uncompressed += max_out_sz;
408         }
409         TRACE_EXIT (unsigned int)max_out_sz;
410 }
411
412 /* print some statistics about the efficiency of the compression to
413  * the kernel log 
414  */
415 static void zftc_stats(void)
416 {
417         TRACE_FUN(ft_t_flow);
418
419         if (TRACE_LEVEL < ft_t_info) {
420                 TRACE_EXIT;
421         }
422         if (zftc_wr_uncompressed != 0) {
423                 if (zftc_wr_compressed > (1<<14)) {
424                         TRACE(ft_t_info, "compression statistics (writing):\n"
425                               KERN_INFO " compr./uncmpr.   : %3d %%",
426                               (((zftc_wr_compressed>>10) * 100)
427                                / (zftc_wr_uncompressed>>10)));
428                 } else {
429                         TRACE(ft_t_info, "compression statistics (writing):\n"
430                               KERN_INFO " compr./uncmpr.   : %3d %%",
431                               ((zftc_wr_compressed * 100)
432                                / zftc_wr_uncompressed));
433                 }
434         }
435         if (zftc_rd_uncompressed != 0) {
436                 if (zftc_rd_compressed > (1<<14)) {
437                         TRACE(ft_t_info, "compression statistics (reading):\n"
438                               KERN_INFO " compr./uncmpr.   : %3d %%",
439                               (((zftc_rd_compressed>>10) * 100)
440                                / (zftc_rd_uncompressed>>10)));
441                 } else {
442                         TRACE(ft_t_info, "compression statistics (reading):\n"
443                               KERN_INFO " compr./uncmpr.   : %3d %%",
444                               ((zftc_rd_compressed * 100)
445                                / zftc_rd_uncompressed));
446                 }
447         }
448         /* only print it once: */
449         zftc_wr_uncompressed = 
450                 zftc_wr_compressed  =
451                 zftc_rd_uncompressed =
452                 zftc_rd_compressed   = 0;
453         TRACE_EXIT;
454 }
455
456 /* start new compressed block 
457  */
458 static int start_new_cseg(cmpr_info *cluster, 
459                           char *dst_buf, 
460                           const zft_position *pos,
461                           const unsigned int blk_sz,
462                           const char *src_buf,
463                           const int this_segs_sz,
464                           const int qic113)
465 {
466         int size_left;
467         int cp_cnt;
468         int buf_pos;
469         TRACE_FUN(ft_t_flow);
470
471         size_left = this_segs_sz - sizeof(__u16) - cluster->cmpr_sz;
472         TRACE(ft_t_data_flow,"\n" 
473               KERN_INFO "segment size   : %d\n"
474               KERN_INFO "compressed_sz: %d\n"
475               KERN_INFO "size_left      : %d",
476               this_segs_sz, cluster->cmpr_sz, size_left);
477         if (size_left > 18) { /* start a new cluseter */
478                 cp_cnt = cluster->cmpr_sz;
479                 cluster->cmpr_sz = 0;
480                 buf_pos = cp_cnt + sizeof(__u16);
481                 PUT2(dst_buf, 0, buf_pos);
482
483                 if (qic113) {
484                         __s64 foffs = pos->volume_pos;
485                         if (cp_cnt) foffs += (__s64)blk_sz;
486
487                         TRACE(ft_t_data_flow, "new style QIC-113 header");
488                         PUT8(dst_buf, buf_pos, foffs);
489                         buf_pos += sizeof(__s64);
490                 } else {
491                         __u32 foffs = (__u32)pos->volume_pos;
492                         if (cp_cnt) foffs += (__u32)blk_sz;
493                         
494                         TRACE(ft_t_data_flow, "old style QIC-80MC header");
495                         PUT4(dst_buf, buf_pos, foffs);
496                         buf_pos += sizeof(__u32);
497                 }
498         } else if (size_left >= 0) {
499                 cp_cnt = cluster->cmpr_sz;
500                 cluster->cmpr_sz = 0;
501                 buf_pos = cp_cnt + sizeof(__u16);
502                 PUT2(dst_buf, 0, buf_pos);  
503                 /* zero unused part of segment. */
504                 memset(dst_buf + buf_pos, '\0', size_left);
505                 buf_pos = this_segs_sz;
506         } else { /* need entire segment and more space */
507                 PUT2(dst_buf, 0, 0); 
508                 cp_cnt = this_segs_sz - sizeof(__u16);
509                 cluster->cmpr_sz  -= cp_cnt;
510                 buf_pos = this_segs_sz;
511         }
512         memcpy(dst_buf + sizeof(__u16), src_buf + cluster->cmpr_pos, cp_cnt);
513         cluster->cmpr_pos += cp_cnt;
514         TRACE_EXIT buf_pos;
515 }
516
517 /* return-value: the number of bytes removed from the user-buffer
518  *               `src_buf' or error code
519  *
520  *  int *write_cnt           : how much actually has been moved to the
521  *                             dst_buf. Need not be initialized when
522  *                             function returns with an error code
523  *                             (negativ return value) 
524  *  __u8 *dst_buf            : kernel space buffer where the has to be
525  *                             copied to. The contents of this buffers
526  *                             goes to a specific segment.
527  *  const int seg_sz         : the size of the segment dst_buf will be
528  *                             copied to.
529  *  const zft_position *pos  : struct containing the coordinates in
530  *                             the current volume (byte position,
531  *                             segment id of current segment etc)
532  *  const zft_volinfo *volume: information about the current volume,
533  *                             size etc.
534  *  const __u8 *src_buf      : user space buffer that contains the
535  *                             data the user wants to be written to
536  *                             tape.
537  *  const int req_len        : the amount of data the user wants to be
538  *                             written to tape.
539  */
540 static int zftc_write(int *write_cnt,
541                       __u8 *dst_buf, const int seg_sz,
542                       const __u8 *src_buf, const int req_len,
543                       const zft_position *pos, const zft_volinfo *volume)
544 {
545         int req_len_left = req_len;
546         int result;
547         int len_left;
548         int buf_pos_write = pos->seg_byte_pos;
549         TRACE_FUN(ft_t_flow);
550         
551         /* Note: we do not unlock the module because
552          * there are some values cached in that `cseg' variable.  We
553          * don't don't want to use this information when being
554          * unloaded by kerneld even when the tape is full or when we
555          * cannot allocate enough memory.
556          */
557         if (pos->tape_pos > (volume->size-volume->blk_sz-ZFT_CMPR_OVERHEAD)) {
558                 TRACE_EXIT -ENOSPC;
559         }    
560         if (zft_allocate_cmpr_mem(volume->blk_sz) < 0) {
561                 /* should we unlock the module? But it shouldn't 
562                  * be locked anyway ...
563                  */
564                 TRACE_EXIT -ENOMEM;
565         }
566         if (buf_pos_write == 0) { /* fill a new segment */
567                 *write_cnt = buf_pos_write = start_new_cseg(&cseg,
568                                                             dst_buf,
569                                                             pos,
570                                                             volume->blk_sz,
571                                                             zftc_buf, 
572                                                             seg_sz,
573                                                             volume->qic113);
574                 if (cseg.cmpr_sz == 0 && cseg.cmpr_pos != 0) {
575                         req_len_left -= result = volume->blk_sz;
576                         cseg.cmpr_pos  = 0;
577                 } else {
578                         result = 0;
579                 }
580         } else {
581                 *write_cnt = result = 0;
582         }
583         
584         len_left = seg_sz - buf_pos_write;
585         while ((req_len_left > 0) && (len_left > 18)) {
586                 /* now we have some size left for a new compressed
587                  * block.  We know, that the compression buffer is
588                  * empty (else there wouldn't be any space left).  
589                  */
590                 if (copy_from_user(zftc_scratch_buf, src_buf + result, 
591                                    volume->blk_sz) != 0) {
592                         TRACE_EXIT -EFAULT;
593                 }
594                 req_len_left -= volume->blk_sz;
595                 cseg.cmpr_sz = zft_compress(zftc_scratch_buf, volume->blk_sz, 
596                                             zftc_buf);
597                 if (cseg.cmpr_sz < 0) {
598                         cseg.uncmpr = 0x8000;
599                         cseg.cmpr_sz = -cseg.cmpr_sz;
600                 } else {
601                         cseg.uncmpr = 0;
602                 }
603                 /* increment "result" iff we copied the entire
604                  * compressed block to the zft_deblock_buf 
605                  */
606                 len_left -= sizeof(__u16);
607                 if (len_left >= cseg.cmpr_sz) {
608                         len_left -= cseg.count = cseg.cmpr_sz;
609                         cseg.cmpr_pos = cseg.cmpr_sz = 0;
610                         result += volume->blk_sz;
611                 } else {
612                         cseg.cmpr_sz       -= 
613                                 cseg.cmpr_pos =
614                                 cseg.count    = len_left;
615                         len_left = 0;
616                 }
617                 PUT2(dst_buf, buf_pos_write, cseg.uncmpr | cseg.count);
618                 buf_pos_write += sizeof(__u16);
619                 memcpy(dst_buf + buf_pos_write, zftc_buf, cseg.count);
620                 buf_pos_write += cseg.count;
621                 *write_cnt    += cseg.count + sizeof(__u16);
622                 FT_SIGNAL_EXIT(_DONT_BLOCK);
623         }
624         /* erase the remainder of the segment if less than 18 bytes
625          * left (18 bytes is due to the QIC-80 standard) 
626          */
627         if (len_left <= 18) {
628                 memset(dst_buf + buf_pos_write, '\0', len_left);
629                 (*write_cnt) += len_left;
630         }
631         TRACE(ft_t_data_flow, "returning %d", result);
632         TRACE_EXIT result;
633 }   
634
635 /* out:
636  *
637  * int *read_cnt: the number of bytes we removed from the zft_deblock_buf
638  *                (result)
639  * int *to_do   : the remaining size of the read-request.
640  *
641  * in:
642  *
643  * char *buff          : buff is the address of the upper part of the user
644  *                       buffer, that hasn't been filled with data yet.
645
646  * int buf_pos_read    : copy of from _ftape_read()
647  * int buf_len_read    : copy of buf_len_rd from _ftape_read()
648  * char *zft_deblock_buf: zft_deblock_buf
649  * unsigned short blk_sz: the block size valid for this volume, may differ
650  *                            from zft_blk_sz.
651  * int finish: if != 0 means that this is the last segment belonging
652  *  to this volume
653  * returns the amount of data actually copied to the user-buffer
654  *
655  * to_do MUST NOT SHRINK except to indicate an EOF. In this case *to_do has to
656  * be set to 0 
657  */
658 static int zftc_read (int *read_cnt, 
659                       __u8  *dst_buf, const int to_do, 
660                       const __u8 *src_buf, const int seg_sz, 
661                       const zft_position *pos, const zft_volinfo *volume)
662 {          
663         int uncompressed_sz;         
664         int result = 0;
665         int remaining = to_do;
666         TRACE_FUN(ft_t_flow);
667
668         TRACE_CATCH(zft_allocate_cmpr_mem(volume->blk_sz),);
669         if (pos->seg_byte_pos == 0) {
670                 /* new segment just read
671                  */
672                 TRACE_CATCH(get_cseg(&cseg, src_buf, seg_sz, volume),
673                             *read_cnt = 0);
674                 memcpy(zftc_buf + cseg.cmpr_pos, src_buf + sizeof(__u16), 
675                        cseg.count);
676                 cseg.cmpr_pos += cseg.count;
677                 *read_cnt      = cseg.offset;
678                 DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */, "", &cseg);
679         } else {
680                 *read_cnt = 0;
681         }
682         /* loop and uncompress until user buffer full or
683          * deblock-buffer empty 
684          */
685         TRACE(ft_t_data_flow, "compressed_sz: %d, compos : %d, *read_cnt: %d",
686               cseg.cmpr_sz, cseg.cmpr_pos, *read_cnt);
687         while ((cseg.spans == 0) && (remaining > 0)) {
688                 if (cseg.cmpr_pos  != 0) { /* cmpr buf is not empty */
689                         uncompressed_sz = 
690                                 zft_uncompress(zftc_buf,
691                                                cseg.uncmpr == 0x8000 ?
692                                                -cseg.cmpr_pos : cseg.cmpr_pos,
693                                                zftc_scratch_buf,
694                                                volume->blk_sz);
695                         if (uncompressed_sz != volume->blk_sz) {
696                                 *read_cnt = 0;
697                                 TRACE_ABORT(-EIO, ft_t_warn,
698                                       "Uncompressed blk (%d) != blk size (%d)",
699                                       uncompressed_sz, volume->blk_sz);
700                         }       
701                         if (copy_to_user(dst_buf + result, 
702                                          zftc_scratch_buf, 
703                                          uncompressed_sz) != 0 ) {
704                                 TRACE_EXIT -EFAULT;
705                         }
706                         remaining      -= uncompressed_sz;
707                         result     += uncompressed_sz;
708                         cseg.cmpr_pos  = 0;
709                 }                                              
710                 if (remaining > 0) {
711                         get_next_cluster(&cseg, src_buf, seg_sz, 
712                                          volume->end_seg == pos->seg_pos);
713                         if (cseg.count != 0) {
714                                 memcpy(zftc_buf, src_buf + cseg.offset,
715                                        cseg.count);
716                                 cseg.cmpr_pos = cseg.count;
717                                 cseg.offset  += cseg.count;
718                                 *read_cnt += cseg.count + sizeof(__u16);
719                         } else {
720                                 remaining = 0;
721                         }
722                 }
723                 TRACE(ft_t_data_flow, "\n" 
724                       KERN_INFO "compressed_sz: %d\n"
725                       KERN_INFO "compos       : %d\n"
726                       KERN_INFO "*read_cnt    : %d",
727                       cseg.cmpr_sz, cseg.cmpr_pos, *read_cnt);
728         }
729         if (seg_sz - cseg.offset <= 18) {
730                 *read_cnt += seg_sz - cseg.offset;
731                 TRACE(ft_t_data_flow, "expanding read cnt to: %d", *read_cnt);
732         }
733         TRACE(ft_t_data_flow, "\n"
734               KERN_INFO "segment size   : %d\n"
735               KERN_INFO "read count     : %d\n"
736               KERN_INFO "buf_pos_read   : %d\n"
737               KERN_INFO "remaining      : %d",
738                 seg_sz, *read_cnt, pos->seg_byte_pos, 
739                 seg_sz - *read_cnt - pos->seg_byte_pos);
740         TRACE(ft_t_data_flow, "returning: %d", result);
741         TRACE_EXIT result;
742 }                
743
744 /* seeks to the new data-position. Reads sometimes a segment.
745  *  
746  * start_seg and end_seg give the boundaries of the current volume
747  * blk_sz is the blk_sz of the current volume as stored in the
748  * volume label
749  *
750  * We don't allow blocksizes less than 1024 bytes, therefore we don't need
751  * a 64 bit argument for new_block_pos.
752  */
753
754 static int seek_in_segment(const unsigned int to_do, cmpr_info  *c_info,
755                            const char *src_buf, const int seg_sz, 
756                            const int seg_pos, const zft_volinfo *volume);
757 static int slow_seek_forward_until_error(const unsigned int distance,
758                                          cmpr_info *c_info, zft_position *pos, 
759                                          const zft_volinfo *volume, __u8 *buf);
760 static int search_valid_segment(unsigned int segment,
761                                 const unsigned int end_seg,
762                                 const unsigned int max_foffs,
763                                 zft_position *pos, cmpr_info *c_info,
764                                 const zft_volinfo *volume, __u8 *buf);
765 static int slow_seek_forward(unsigned int dest, cmpr_info *c_info,
766                              zft_position *pos, const zft_volinfo *volume,
767                              __u8 *buf);
768 static int compute_seg_pos(unsigned int dest, zft_position *pos,
769                            const zft_volinfo *volume);
770
771 #define ZFT_SLOW_SEEK_THRESHOLD  10 /* segments */
772 #define ZFT_FAST_SEEK_MAX_TRIALS 10 /* times */
773 #define ZFT_FAST_SEEK_BACKUP     10 /* segments */
774
775 static int zftc_seek(unsigned int new_block_pos,
776                      zft_position *pos, const zft_volinfo *volume, __u8 *buf)
777 {
778         unsigned int dest;
779         int limit;
780         int distance;
781         int result = 0;
782         int seg_dist;
783         int new_seg;
784         int old_seg = 0;
785         int fast_seek_trials = 0;
786         TRACE_FUN(ft_t_flow);
787
788         if (new_block_pos == 0) {
789                 pos->seg_pos      = volume->start_seg;
790                 pos->seg_byte_pos = 0;
791                 pos->volume_pos   = 0;
792                 zftc_reset();
793                 TRACE_EXIT 0;
794         }
795         dest = new_block_pos * (volume->blk_sz >> 10);
796         distance = dest - (pos->volume_pos >> 10);
797         while (distance != 0) {
798                 seg_dist = compute_seg_pos(dest, pos, volume);
799                 TRACE(ft_t_noise, "\n"
800                       KERN_INFO "seg_dist: %d\n"
801                       KERN_INFO "distance: %d\n"
802                       KERN_INFO "dest    : %d\n"
803                       KERN_INFO "vpos    : %d\n"
804                       KERN_INFO "seg_pos : %d\n"
805                       KERN_INFO "trials  : %d",
806                       seg_dist, distance, dest,
807                       (unsigned int)(pos->volume_pos>>10), pos->seg_pos,
808                       fast_seek_trials);
809                 if (distance > 0) {
810                         if (seg_dist < 0) {
811                                 TRACE(ft_t_bug, "BUG: distance %d > 0, "
812                                       "segment difference %d < 0",
813                                       distance, seg_dist);
814                                 result = -EIO;
815                                 break;
816                         }
817                         new_seg = pos->seg_pos + seg_dist;
818                         if (new_seg > volume->end_seg) {
819                                 new_seg = volume->end_seg;
820                         }
821                         if (old_seg == new_seg || /* loop */
822                             seg_dist <= ZFT_SLOW_SEEK_THRESHOLD ||
823                             fast_seek_trials >= ZFT_FAST_SEEK_MAX_TRIALS) {
824                                 TRACE(ft_t_noise, "starting slow seek:\n"
825                                    KERN_INFO "fast seek failed too often: %s\n"
826                                    KERN_INFO "near target position      : %s\n"
827                                    KERN_INFO "looping between two segs  : %s",
828                                       (fast_seek_trials >= 
829                                        ZFT_FAST_SEEK_MAX_TRIALS)
830                                       ? "yes" : "no",
831                                       (seg_dist <= ZFT_SLOW_SEEK_THRESHOLD) 
832                                       ? "yes" : "no",
833                                       (old_seg == new_seg)
834                                       ? "yes" : "no");
835                                 result = slow_seek_forward(dest, &cseg, 
836                                                            pos, volume, buf);
837                                 break;
838                         }
839                         old_seg = new_seg;
840                         limit = volume->end_seg;
841                         fast_seek_trials ++;
842                         for (;;) {
843                                 result = search_valid_segment(new_seg, limit,
844                                                               volume->size,
845                                                               pos, &cseg,
846                                                               volume, buf);
847                                 if (result == 0 || result == -EINTR) {
848                                         break;
849                                 }
850                                 if (new_seg == volume->start_seg) {
851                                         result = -EIO; /* set errror 
852                                                         * condition
853                                                         */
854                                         break;
855                                 }
856                                 limit    = new_seg;
857                                 new_seg -= ZFT_FAST_SEEK_BACKUP;
858                                 if (new_seg < volume->start_seg) {
859                                         new_seg = volume->start_seg;
860                                 }
861                         }
862                         if (result < 0) {
863                                 TRACE(ft_t_warn,
864                                       "Couldn't find a readable segment");
865                                 break;
866                         }
867                 } else /* if (distance < 0) */ {
868                         if (seg_dist > 0) {
869                                 TRACE(ft_t_bug, "BUG: distance %d < 0, "
870                                       "segment difference %d >0",
871                                       distance, seg_dist);
872                                 result = -EIO;
873                                 break;
874                         }
875                         new_seg = pos->seg_pos + seg_dist;
876                         if (fast_seek_trials > 0 && seg_dist == 0) {
877                                 /* this avoids sticking to the same
878                                  * segment all the time. On the other hand:
879                                  * if we got here for the first time, and the
880                                  * deblock_buffer still contains a valid
881                                  * segment, then there is no need to skip to 
882                                  * the previous segment if the desired position
883                                  * is inside this segment.
884                                  */
885                                 new_seg --;
886                         }
887                         if (new_seg < volume->start_seg) {
888                                 new_seg = volume->start_seg;
889                         }
890                         limit   = pos->seg_pos;
891                         fast_seek_trials ++;
892                         for (;;) {
893                                 result = search_valid_segment(new_seg, limit,
894                                                               pos->volume_pos,
895                                                               pos, &cseg,
896                                                               volume, buf);
897                                 if (result == 0 || result == -EINTR) {
898                                         break;
899                                 }
900                                 if (new_seg == volume->start_seg) {
901                                         result = -EIO; /* set errror 
902                                                         * condition
903                                                         */
904                                         break;
905                                 }
906                                 limit    = new_seg;
907                                 new_seg -= ZFT_FAST_SEEK_BACKUP;
908                                 if (new_seg < volume->start_seg) {
909                                         new_seg = volume->start_seg;
910                                 }
911                         }
912                         if (result < 0) {
913                                 TRACE(ft_t_warn,
914                                       "Couldn't find a readable segment");
915                                 break;
916                         }
917                 }
918                 distance = dest - (pos->volume_pos >> 10);
919         }
920         TRACE_EXIT result;
921 }
922
923
924 /*  advance inside the given segment at most to_do bytes.
925  *  of kilobytes moved
926  */
927
928 static int seek_in_segment(const unsigned int to_do,
929                            cmpr_info  *c_info,
930                            const char *src_buf, 
931                            const int seg_sz, 
932                            const int seg_pos,
933                            const zft_volinfo *volume)
934 {
935         int result = 0;
936         int blk_sz = volume->blk_sz >> 10;
937         int remaining = to_do;
938         TRACE_FUN(ft_t_flow);
939
940         if (c_info->offset == 0) {
941                 /* new segment just read
942                  */
943                 TRACE_CATCH(get_cseg(c_info, src_buf, seg_sz, volume),);
944                 c_info->cmpr_pos += c_info->count;
945                 DUMP_CMPR_INFO(ft_t_noise, "", c_info);
946         }
947         /* loop and uncompress until user buffer full or
948          * deblock-buffer empty 
949          */
950         TRACE(ft_t_noise, "compressed_sz: %d, compos : %d",
951               c_info->cmpr_sz, c_info->cmpr_pos);
952         while (c_info->spans == 0 && remaining > 0) {
953                 if (c_info->cmpr_pos  != 0) { /* cmpr buf is not empty */
954                         result       += blk_sz;
955                         remaining    -= blk_sz;
956                         c_info->cmpr_pos = 0;
957                 }
958                 if (remaining > 0) {
959                         get_next_cluster(c_info, src_buf, seg_sz, 
960                                          volume->end_seg == seg_pos);
961                         if (c_info->count != 0) {
962                                 c_info->cmpr_pos = c_info->count;
963                                 c_info->offset  += c_info->count;
964                         } else {
965                                 break;
966                         }
967                 }
968                 /*  Allow escape from this loop on signal!
969                  */
970                 FT_SIGNAL_EXIT(_DONT_BLOCK);
971                 DUMP_CMPR_INFO(ft_t_noise, "", c_info);
972                 TRACE(ft_t_noise, "to_do: %d", remaining);
973         }
974         if (seg_sz - c_info->offset <= 18) {
975                 c_info->offset = seg_sz;
976         }
977         TRACE(ft_t_noise, "\n"
978               KERN_INFO "segment size   : %d\n"
979               KERN_INFO "buf_pos_read   : %d\n"
980               KERN_INFO "remaining      : %d",
981               seg_sz, c_info->offset,
982               seg_sz - c_info->offset);
983         TRACE_EXIT result;
984 }                
985
986 static int slow_seek_forward_until_error(const unsigned int distance,
987                                          cmpr_info *c_info,
988                                          zft_position *pos, 
989                                          const zft_volinfo *volume,
990                                          __u8 *buf)
991 {
992         unsigned int remaining = distance;
993         int seg_sz;
994         int seg_pos;
995         int result;
996         TRACE_FUN(ft_t_flow);
997         
998         seg_pos = pos->seg_pos;
999         do {
1000                 TRACE_CATCH(seg_sz = zft_fetch_segment(seg_pos, buf, 
1001                                                        FT_RD_AHEAD),);
1002                 /* now we have the contents of the actual segment in
1003                  * the deblock buffer
1004                  */
1005                 TRACE_CATCH(result = seek_in_segment(remaining, c_info, buf,
1006                                                      seg_sz, seg_pos,volume),);
1007                 remaining        -= result;
1008                 pos->volume_pos  += result<<10;
1009                 pos->seg_pos      = seg_pos;
1010                 pos->seg_byte_pos = c_info->offset;
1011                 seg_pos ++;
1012                 if (seg_pos <= volume->end_seg && c_info->offset == seg_sz) {
1013                         pos->seg_pos ++;
1014                         pos->seg_byte_pos = 0;
1015                         c_info->offset = 0;
1016                 }
1017                 /*  Allow escape from this loop on signal!
1018                  */
1019                 FT_SIGNAL_EXIT(_DONT_BLOCK);
1020                 TRACE(ft_t_noise, "\n"
1021                       KERN_INFO "remaining:  %d\n"
1022                       KERN_INFO "seg_pos:    %d\n"
1023                       KERN_INFO "end_seg:    %d\n"
1024                       KERN_INFO "result:     %d",
1025                       remaining, seg_pos, volume->end_seg, result);  
1026         } while (remaining > 0 && seg_pos <= volume->end_seg);
1027         TRACE_EXIT 0;
1028 }
1029
1030 /* return segment id of next segment containing valid data, -EIO otherwise
1031  */
1032 static int search_valid_segment(unsigned int segment,
1033                                 const unsigned int end_seg,
1034                                 const unsigned int max_foffs,
1035                                 zft_position *pos,
1036                                 cmpr_info *c_info,
1037                                 const zft_volinfo *volume,
1038                                 __u8 *buf)
1039 {
1040         cmpr_info tmp_info;
1041         int seg_sz;
1042         TRACE_FUN(ft_t_flow);
1043         
1044         memset(&tmp_info, 0, sizeof(cmpr_info));
1045         while (segment <= end_seg) {
1046                 FT_SIGNAL_EXIT(_DONT_BLOCK);
1047                 TRACE(ft_t_noise,
1048                       "Searching readable segment between %d and %d",
1049                       segment, end_seg);
1050                 seg_sz = zft_fetch_segment(segment, buf, FT_RD_AHEAD);
1051                 if ((seg_sz > 0) &&
1052                     (get_cseg (&tmp_info, buf, seg_sz, volume) >= 0) &&
1053                     (tmp_info.foffs != 0 || segment == volume->start_seg)) {
1054                         if ((tmp_info.foffs>>10) > max_foffs) {
1055                                 TRACE_ABORT(-EIO, ft_t_noise, "\n"
1056                                             KERN_INFO "cseg.foff: %d\n"
1057                                             KERN_INFO "dest     : %d",
1058                                             (int)(tmp_info.foffs >> 10),
1059                                             max_foffs);
1060                         }
1061                         DUMP_CMPR_INFO(ft_t_noise, "", &tmp_info);
1062                         *c_info           = tmp_info;
1063                         pos->seg_pos      = segment;
1064                         pos->volume_pos   = c_info->foffs;
1065                         pos->seg_byte_pos = c_info->offset;
1066                         TRACE(ft_t_noise, "found segment at %d", segment);
1067                         TRACE_EXIT 0;
1068                 }
1069                 segment++;
1070         }
1071         TRACE_EXIT -EIO;
1072 }
1073
1074 static int slow_seek_forward(unsigned int dest,
1075                              cmpr_info *c_info,
1076                              zft_position *pos,
1077                              const zft_volinfo *volume,
1078                              __u8 *buf)
1079 {
1080         unsigned int distance;
1081         int result = 0;
1082         TRACE_FUN(ft_t_flow);
1083                 
1084         distance = dest - (pos->volume_pos >> 10);
1085         while ((distance > 0) &&
1086                (result = slow_seek_forward_until_error(distance,
1087                                                        c_info,
1088                                                        pos,
1089                                                        volume,
1090                                                        buf)) < 0) {
1091                 if (result == -EINTR) {
1092                         break;
1093                 }
1094                 TRACE(ft_t_noise, "seg_pos: %d", pos->seg_pos);
1095                 /* the failing segment is either pos->seg_pos or
1096                  * pos->seg_pos + 1. There is no need to further try
1097                  * that segment, because ftape_read_segment() already
1098                  * has tried very much to read it. So we start with
1099                  * following segment, which is pos->seg_pos + 1
1100                  */
1101                 if(search_valid_segment(pos->seg_pos+1, volume->end_seg, dest,
1102                                         pos, c_info,
1103                                         volume, buf) < 0) {
1104                         TRACE(ft_t_noise, "search_valid_segment() failed");
1105                         result = -EIO;
1106                         break;
1107                 }
1108                 distance = dest - (pos->volume_pos >> 10);
1109                 result = 0;
1110                 TRACE(ft_t_noise, "segment: %d", pos->seg_pos);
1111                 /* found valid segment, retry the seek */
1112         }
1113         TRACE_EXIT result;
1114 }
1115
1116 static int compute_seg_pos(const unsigned int dest,
1117                            zft_position *pos,
1118                            const zft_volinfo *volume)
1119 {
1120         int segment;
1121         int distance = dest - (pos->volume_pos >> 10);
1122         unsigned int raw_size;
1123         unsigned int virt_size;
1124         unsigned int factor;
1125         TRACE_FUN(ft_t_flow);
1126
1127         if (distance >= 0) {
1128                 raw_size  = volume->end_seg - pos->seg_pos + 1;
1129                 virt_size = ((unsigned int)(volume->size>>10) 
1130                              - (unsigned int)(pos->volume_pos>>10)
1131                              + FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS - 1);
1132                 virt_size /= FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS;
1133                 if (virt_size == 0 || raw_size == 0) {
1134                         TRACE_EXIT 0;
1135                 }
1136                 if (raw_size >= (1<<25)) {
1137                         factor = raw_size/(virt_size>>7);
1138                 } else {
1139                         factor = (raw_size<<7)/virt_size;
1140                 }
1141                 segment = distance/(FT_SECTORS_PER_SEGMENT-FT_ECC_SECTORS);
1142                 segment = (segment * factor)>>7;
1143         } else {
1144                 raw_size  = pos->seg_pos - volume->start_seg + 1;
1145                 virt_size = ((unsigned int)(pos->volume_pos>>10)
1146                              + FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS - 1);
1147                 virt_size /= FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS;
1148                 if (virt_size == 0 || raw_size == 0) {
1149                         TRACE_EXIT 0;
1150                 }
1151                 if (raw_size >= (1<<25)) {
1152                         factor = raw_size/(virt_size>>7);
1153                 } else {
1154                         factor = (raw_size<<7)/virt_size;
1155                 }
1156                 segment = distance/(FT_SECTORS_PER_SEGMENT-FT_ECC_SECTORS);
1157         }
1158         TRACE(ft_t_noise, "factor: %d/%d", factor, 1<<7);
1159         TRACE_EXIT segment;
1160 }
1161
1162 static struct zft_cmpr_ops cmpr_ops = {
1163         zftc_write,
1164         zftc_read,
1165         zftc_seek,
1166         zftc_lock,
1167         zftc_reset,
1168         zftc_cleanup
1169 };
1170
1171 int zft_compressor_init(void)
1172 {
1173         TRACE_FUN(ft_t_flow);
1174         
1175 #ifdef MODULE
1176         printk(KERN_INFO "zftape compressor v1.00a 970514 for " FTAPE_VERSION "\n");
1177         if (TRACE_LEVEL >= ft_t_info) {
1178                 printk(
1179 KERN_INFO "(c) 1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de)\n"
1180 KERN_INFO "Compressor for zftape (lzrw3 algorithm)\n"
1181 KERN_INFO "Compiled for kernel version %s\n", UTS_RELEASE);
1182         }
1183 #else /* !MODULE */
1184         /* print a short no-nonsense boot message */
1185         printk("zftape compressor v1.00a 970514 for Linux " UTS_RELEASE "\n");
1186         printk("For use with " FTAPE_VERSION "\n");
1187 #endif /* MODULE */
1188         TRACE(ft_t_info, "zft_compressor_init @ 0x%p", zft_compressor_init);
1189         TRACE(ft_t_info, "installing compressor for zftape ...");
1190         TRACE_CATCH(zft_cmpr_register(&cmpr_ops),);
1191         TRACE_EXIT 0;
1192 }
1193
1194 #ifdef MODULE
1195
1196 MODULE_AUTHOR(
1197         "(c) 1996, 1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de");
1198 MODULE_DESCRIPTION(
1199 "Compression routines for zftape. Uses the lzrw3 algorithm by Ross Williams");
1200 MODULE_LICENSE("GPL");
1201
1202 /* Called by modules package when installing the driver
1203  */
1204 int init_module(void)
1205 {
1206         return zft_compressor_init();
1207 }
1208
1209 #endif /* MODULE */