This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / drivers / block / pktcdvd.c
1 /*
2  * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
3  * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
4  *
5  * May be copied or modified under the terms of the GNU General Public
6  * License.  See linux/COPYING for more information.
7  *
8  * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
9  * DVD-RW devices (aka an exercise in block layer masturbation)
10  *
11  *
12  * TODO: (circa order of when I will fix it)
13  * - Only able to write on CD-RW media right now.
14  * - check host application code on media and set it in write page
15  * - interface for UDF <-> packet to negotiate a new location when a write
16  *   fails.
17  * - handle OPC, especially for -RW media
18  *
19  * Theory of operation:
20  *
21  * We use a custom make_request_fn function that forwards reads directly to
22  * the underlying CD device. Write requests are either attached directly to
23  * a live packet_data object, or simply stored sequentially in a list for
24  * later processing by the kcdrwd kernel thread. This driver doesn't use
25  * any elevator functionally as defined by the elevator_s struct, but the
26  * underlying CD device uses a standard elevator.
27  *
28  * This strategy makes it possible to do very late merging of IO requests.
29  * A new bio sent to pkt_make_request can be merged with a live packet_data
30  * object even if the object is in the data gathering state.
31  *
32  *************************************************************************/
33
34 #define VERSION_CODE    "v0.2.0a 2004-07-14 Jens Axboe (axboe@suse.de) and petero2@telia.com"
35
36 #include <linux/pktcdvd.h>
37 #include <linux/config.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/kernel.h>
41 #include <linux/kthread.h>
42 #include <linux/errno.h>
43 #include <linux/spinlock.h>
44 #include <linux/file.h>
45 #include <linux/proc_fs.h>
46 #include <linux/seq_file.h>
47 #include <linux/miscdevice.h>
48 #include <linux/suspend.h>
49 #include <scsi/scsi_cmnd.h>
50 #include <scsi/scsi_ioctl.h>
51
52 #include <asm/uaccess.h>
53
54 #if PACKET_DEBUG
55 #define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
56 #else
57 #define DPRINTK(fmt, args...)
58 #endif
59
60 #if PACKET_DEBUG > 1
61 #define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
62 #else
63 #define VPRINTK(fmt, args...)
64 #endif
65
66 #define MAX_SPEED 0xffff
67
68 #define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1))
69
70 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
71 static struct proc_dir_entry *pkt_proc;
72 static int pkt_major;
73 static struct semaphore ctl_mutex;      /* Serialize open/close/setup/teardown */
74 static mempool_t *psd_pool;
75
76
77 static void pkt_bio_finished(struct pktcdvd_device *pd)
78 {
79         BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
80         if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
81                 VPRINTK("pktcdvd: queue empty\n");
82                 atomic_set(&pd->iosched.attention, 1);
83                 wake_up(&pd->wqueue);
84         }
85 }
86
87 static void pkt_bio_destructor(struct bio *bio)
88 {
89         kfree(bio->bi_io_vec);
90         kfree(bio);
91 }
92
93 static struct bio *pkt_bio_alloc(int nr_iovecs)
94 {
95         struct bio_vec *bvl = NULL;
96         struct bio *bio;
97
98         bio = kmalloc(sizeof(struct bio), GFP_KERNEL);
99         if (!bio)
100                 goto no_bio;
101         bio_init(bio);
102
103         bvl = kmalloc(nr_iovecs * sizeof(struct bio_vec), GFP_KERNEL);
104         if (!bvl)
105                 goto no_bvl;
106         memset(bvl, 0, nr_iovecs * sizeof(struct bio_vec));
107
108         bio->bi_max_vecs = nr_iovecs;
109         bio->bi_io_vec = bvl;
110         bio->bi_destructor = pkt_bio_destructor;
111
112         return bio;
113
114  no_bvl:
115         kfree(bio);
116  no_bio:
117         return NULL;
118 }
119
120 /*
121  * Allocate a packet_data struct
122  */
123 static struct packet_data *pkt_alloc_packet_data(void)
124 {
125         int i;
126         struct packet_data *pkt;
127
128         pkt = kmalloc(sizeof(struct packet_data), GFP_KERNEL);
129         if (!pkt)
130                 goto no_pkt;
131         memset(pkt, 0, sizeof(struct packet_data));
132
133         pkt->w_bio = pkt_bio_alloc(PACKET_MAX_SIZE);
134         if (!pkt->w_bio)
135                 goto no_bio;
136
137         for (i = 0; i < PAGES_PER_PACKET; i++) {
138                 pkt->pages[i] = alloc_page(GFP_KERNEL);
139                 if (!pkt->pages[i])
140                         goto no_page;
141         }
142         for (i = 0; i < PAGES_PER_PACKET; i++)
143                 clear_page(page_address(pkt->pages[i]));
144
145         spin_lock_init(&pkt->lock);
146
147         for (i = 0; i < PACKET_MAX_SIZE; i++) {
148                 struct bio *bio = pkt_bio_alloc(1);
149                 if (!bio)
150                         goto no_rd_bio;
151                 pkt->r_bios[i] = bio;
152         }
153
154         return pkt;
155
156 no_rd_bio:
157         for (i = 0; i < PACKET_MAX_SIZE; i++) {
158                 struct bio *bio = pkt->r_bios[i];
159                 if (bio)
160                         bio_put(bio);
161         }
162
163 no_page:
164         for (i = 0; i < PAGES_PER_PACKET; i++)
165                 if (pkt->pages[i])
166                         __free_page(pkt->pages[i]);
167         bio_put(pkt->w_bio);
168 no_bio:
169         kfree(pkt);
170 no_pkt:
171         return NULL;
172 }
173
174 /*
175  * Free a packet_data struct
176  */
177 static void pkt_free_packet_data(struct packet_data *pkt)
178 {
179         int i;
180
181         for (i = 0; i < PACKET_MAX_SIZE; i++) {
182                 struct bio *bio = pkt->r_bios[i];
183                 if (bio)
184                         bio_put(bio);
185         }
186         for (i = 0; i < PAGES_PER_PACKET; i++)
187                 __free_page(pkt->pages[i]);
188         bio_put(pkt->w_bio);
189         kfree(pkt);
190 }
191
192 static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
193 {
194         struct packet_data *pkt, *next;
195
196         BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
197
198         list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
199                 pkt_free_packet_data(pkt);
200         }
201 }
202
203 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
204 {
205         struct packet_data *pkt;
206
207         INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
208         INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
209         spin_lock_init(&pd->cdrw.active_list_lock);
210         while (nr_packets > 0) {
211                 pkt = pkt_alloc_packet_data();
212                 if (!pkt) {
213                         pkt_shrink_pktlist(pd);
214                         return 0;
215                 }
216                 pkt->id = nr_packets;
217                 pkt->pd = pd;
218                 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
219                 nr_packets--;
220         }
221         return 1;
222 }
223
224 static void *pkt_rb_alloc(int gfp_mask, void *data)
225 {
226         return kmalloc(sizeof(struct pkt_rb_node), gfp_mask);
227 }
228
229 static void pkt_rb_free(void *ptr, void *data)
230 {
231         kfree(ptr);
232 }
233
234 static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
235 {
236         struct rb_node *n = rb_next(&node->rb_node);
237         if (!n)
238                 return NULL;
239         return rb_entry(n, struct pkt_rb_node, rb_node);
240 }
241
242 static inline void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
243 {
244         rb_erase(&node->rb_node, &pd->bio_queue);
245         mempool_free(node, pd->rb_pool);
246         pd->bio_queue_size--;
247         BUG_ON(pd->bio_queue_size < 0);
248 }
249
250 /*
251  * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
252  */
253 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
254 {
255         struct rb_node *n = pd->bio_queue.rb_node;
256         struct rb_node *next;
257         struct pkt_rb_node *tmp;
258
259         if (!n) {
260                 BUG_ON(pd->bio_queue_size > 0);
261                 return NULL;
262         }
263
264         for (;;) {
265                 tmp = rb_entry(n, struct pkt_rb_node, rb_node);
266                 if (s <= tmp->bio->bi_sector)
267                         next = n->rb_left;
268                 else
269                         next = n->rb_right;
270                 if (!next)
271                         break;
272                 n = next;
273         }
274
275         if (s > tmp->bio->bi_sector) {
276                 tmp = pkt_rbtree_next(tmp);
277                 if (!tmp)
278                         return NULL;
279         }
280         BUG_ON(s > tmp->bio->bi_sector);
281         return tmp;
282 }
283
284 /*
285  * Insert a node into the pd->bio_queue rb tree.
286  */
287 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
288 {
289         struct rb_node **p = &pd->bio_queue.rb_node;
290         struct rb_node *parent = NULL;
291         sector_t s = node->bio->bi_sector;
292         struct pkt_rb_node *tmp;
293
294         while (*p) {
295                 parent = *p;
296                 tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
297                 if (s < tmp->bio->bi_sector)
298                         p = &(*p)->rb_left;
299                 else
300                         p = &(*p)->rb_right;
301         }
302         rb_link_node(&node->rb_node, parent, p);
303         rb_insert_color(&node->rb_node, &pd->bio_queue);
304         pd->bio_queue_size++;
305 }
306
307 /*
308  * Add a bio to a single linked list defined by its head and tail pointers.
309  */
310 static inline void pkt_add_list_last(struct bio *bio, struct bio **list_head, struct bio **list_tail)
311 {
312         bio->bi_next = NULL;
313         if (*list_tail) {
314                 BUG_ON((*list_head) == NULL);
315                 (*list_tail)->bi_next = bio;
316                 (*list_tail) = bio;
317         } else {
318                 BUG_ON((*list_head) != NULL);
319                 (*list_head) = bio;
320                 (*list_tail) = bio;
321         }
322 }
323
324 /*
325  * Remove and return the first bio from a single linked list defined by its
326  * head and tail pointers.
327  */
328 static inline struct bio *pkt_get_list_first(struct bio **list_head, struct bio **list_tail)
329 {
330         struct bio *bio;
331
332         if (*list_head == NULL)
333                 return NULL;
334
335         bio = *list_head;
336         *list_head = bio->bi_next;
337         if (*list_head == NULL)
338                 *list_tail = NULL;
339
340         bio->bi_next = NULL;
341         return bio;
342 }
343
344 /*
345  * Send a packet_command to the underlying block device and
346  * wait for completion.
347  */
348 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
349 {
350         char sense[SCSI_SENSE_BUFFERSIZE];
351         request_queue_t *q;
352         struct request *rq;
353         DECLARE_COMPLETION(wait);
354         int err = 0;
355
356         q = bdev_get_queue(pd->bdev);
357
358         rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? WRITE : READ,
359                              __GFP_WAIT);
360         rq->errors = 0;
361         rq->rq_disk = pd->bdev->bd_disk;
362         rq->bio = NULL;
363         rq->buffer = NULL;
364         rq->timeout = 60*HZ;
365         rq->data = cgc->buffer;
366         rq->data_len = cgc->buflen;
367         rq->sense = sense;
368         memset(sense, 0, sizeof(sense));
369         rq->sense_len = 0;
370         rq->flags |= REQ_BLOCK_PC | REQ_HARDBARRIER;
371         if (cgc->quiet)
372                 rq->flags |= REQ_QUIET;
373         memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
374         if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
375                 memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
376
377         rq->ref_count++;
378         rq->flags |= REQ_NOMERGE;
379         rq->waiting = &wait;
380         elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
381         generic_unplug_device(q);
382         wait_for_completion(&wait);
383
384         if (rq->errors)
385                 err = -EIO;
386
387         blk_put_request(rq);
388         return err;
389 }
390
391 /*
392  * A generic sense dump / resolve mechanism should be implemented across
393  * all ATAPI + SCSI devices.
394  */
395 static void pkt_dump_sense(struct packet_command *cgc)
396 {
397         static char *info[9] = { "No sense", "Recovered error", "Not ready",
398                                  "Medium error", "Hardware error", "Illegal request",
399                                  "Unit attention", "Data protect", "Blank check" };
400         int i;
401         struct request_sense *sense = cgc->sense;
402
403         printk("pktcdvd:");
404         for (i = 0; i < CDROM_PACKET_SIZE; i++)
405                 printk(" %02x", cgc->cmd[i]);
406         printk(" - ");
407
408         if (sense == NULL) {
409                 printk("no sense\n");
410                 return;
411         }
412
413         printk("sense %02x.%02x.%02x", sense->sense_key, sense->asc, sense->ascq);
414
415         if (sense->sense_key > 8) {
416                 printk(" (INVALID)\n");
417                 return;
418         }
419
420         printk(" (%s)\n", info[sense->sense_key]);
421 }
422
423 /*
424  * flush the drive cache to media
425  */
426 static int pkt_flush_cache(struct pktcdvd_device *pd)
427 {
428         struct packet_command cgc;
429
430         init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
431         cgc.cmd[0] = GPCMD_FLUSH_CACHE;
432         cgc.quiet = 1;
433
434         /*
435          * the IMMED bit -- we default to not setting it, although that
436          * would allow a much faster close, this is safer
437          */
438 #if 0
439         cgc.cmd[1] = 1 << 1;
440 #endif
441         return pkt_generic_packet(pd, &cgc);
442 }
443
444 /*
445  * speed is given as the normal factor, e.g. 4 for 4x
446  */
447 static int pkt_set_speed(struct pktcdvd_device *pd, unsigned write_speed, unsigned read_speed)
448 {
449         struct packet_command cgc;
450         struct request_sense sense;
451         int ret;
452
453         init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
454         cgc.sense = &sense;
455         cgc.cmd[0] = GPCMD_SET_SPEED;
456         cgc.cmd[2] = (read_speed >> 8) & 0xff;
457         cgc.cmd[3] = read_speed & 0xff;
458         cgc.cmd[4] = (write_speed >> 8) & 0xff;
459         cgc.cmd[5] = write_speed & 0xff;
460
461         if ((ret = pkt_generic_packet(pd, &cgc)))
462                 pkt_dump_sense(&cgc);
463
464         return ret;
465 }
466
467 /*
468  * Queue a bio for processing by the low-level CD device. Must be called
469  * from process context.
470  */
471 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio, int high_prio_read)
472 {
473         spin_lock(&pd->iosched.lock);
474         if (bio_data_dir(bio) == READ) {
475                 pkt_add_list_last(bio, &pd->iosched.read_queue,
476                                   &pd->iosched.read_queue_tail);
477                 if (high_prio_read)
478                         pd->iosched.high_prio_read = 1;
479         } else {
480                 pkt_add_list_last(bio, &pd->iosched.write_queue,
481                                   &pd->iosched.write_queue_tail);
482         }
483         spin_unlock(&pd->iosched.lock);
484
485         atomic_set(&pd->iosched.attention, 1);
486         wake_up(&pd->wqueue);
487 }
488
489 /*
490  * Process the queued read/write requests. This function handles special
491  * requirements for CDRW drives:
492  * - A cache flush command must be inserted before a read request if the
493  *   previous request was a write.
494  * - Switching between reading and writing is slow, so don't it more often
495  *   than necessary.
496  * - Set the read speed according to current usage pattern. When only reading
497  *   from the device, it's best to use the highest possible read speed, but
498  *   when switching often between reading and writing, it's better to have the
499  *   same read and write speeds.
500  * - Reads originating from user space should have higher priority than reads
501  *   originating from pkt_gather_data, because some process is usually waiting
502  *   on reads of the first kind.
503  */
504 static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
505 {
506         request_queue_t *q;
507
508         if (atomic_read(&pd->iosched.attention) == 0)
509                 return;
510         atomic_set(&pd->iosched.attention, 0);
511
512         q = bdev_get_queue(pd->bdev);
513
514         for (;;) {
515                 struct bio *bio;
516                 int reads_queued, writes_queued, high_prio_read;
517
518                 spin_lock(&pd->iosched.lock);
519                 reads_queued = (pd->iosched.read_queue != NULL);
520                 writes_queued = (pd->iosched.write_queue != NULL);
521                 if (!reads_queued)
522                         pd->iosched.high_prio_read = 0;
523                 high_prio_read = pd->iosched.high_prio_read;
524                 spin_unlock(&pd->iosched.lock);
525
526                 if (!reads_queued && !writes_queued)
527                         break;
528
529                 if (pd->iosched.writing) {
530                         if (high_prio_read || (!writes_queued && reads_queued)) {
531                                 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
532                                         VPRINTK("pktcdvd: write, waiting\n");
533                                         break;
534                                 }
535                                 pkt_flush_cache(pd);
536                                 pd->iosched.writing = 0;
537                         }
538                 } else {
539                         if (!reads_queued && writes_queued) {
540                                 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
541                                         VPRINTK("pktcdvd: read, waiting\n");
542                                         break;
543                                 }
544                                 pd->iosched.writing = 1;
545                         }
546                 }
547
548                 spin_lock(&pd->iosched.lock);
549                 if (pd->iosched.writing) {
550                         bio = pkt_get_list_first(&pd->iosched.write_queue,
551                                                  &pd->iosched.write_queue_tail);
552                 } else {
553                         bio = pkt_get_list_first(&pd->iosched.read_queue,
554                                                  &pd->iosched.read_queue_tail);
555                 }
556                 spin_unlock(&pd->iosched.lock);
557
558                 if (!bio)
559                         continue;
560
561                 if (bio_data_dir(bio) == READ)
562                         pd->iosched.successive_reads += bio->bi_size >> 10;
563                 else
564                         pd->iosched.successive_reads = 0;
565                 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
566                         if (pd->read_speed == pd->write_speed) {
567                                 pd->read_speed = MAX_SPEED;
568                                 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
569                         }
570                 } else {
571                         if (pd->read_speed != pd->write_speed) {
572                                 pd->read_speed = pd->write_speed;
573                                 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
574                         }
575                 }
576
577                 atomic_inc(&pd->cdrw.pending_bios);
578                 generic_make_request(bio);
579         }
580 }
581
582 /*
583  * Special care is needed if the underlying block device has a small
584  * max_phys_segments value.
585  */
586 static int pkt_set_segment_merging(struct pktcdvd_device *pd, request_queue_t *q)
587 {
588         if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {
589                 /*
590                  * The cdrom device can handle one segment/frame
591                  */
592                 clear_bit(PACKET_MERGE_SEGS, &pd->flags);
593                 return 0;
594         } else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) {
595                 /*
596                  * We can handle this case at the expense of some extra memory
597                  * copies during write operations
598                  */
599                 set_bit(PACKET_MERGE_SEGS, &pd->flags);
600                 return 0;
601         } else {
602                 printk("pktcdvd: cdrom max_phys_segments too small\n");
603                 return -EIO;
604         }
605 }
606
607 /*
608  * Copy CD_FRAMESIZE bytes from src_bio into a destination page
609  */
610 static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs,
611                               struct page *dst_page, int dst_offs)
612 {
613         unsigned int copy_size = CD_FRAMESIZE;
614
615         while (copy_size > 0) {
616                 struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg);
617                 void *vfrom = kmap_atomic(src_bvl->bv_page, KM_USER0) +
618                         src_bvl->bv_offset + offs;
619                 void *vto = page_address(dst_page) + dst_offs;
620                 int len = min_t(int, copy_size, src_bvl->bv_len - offs);
621
622                 BUG_ON(len < 0);
623                 memcpy(vto, vfrom, len);
624                 kunmap_atomic(vfrom, KM_USER0);
625
626                 seg++;
627                 offs = 0;
628                 dst_offs += len;
629                 copy_size -= len;
630         }
631 }
632
633 /*
634  * Copy all data for this packet to pkt->pages[], so that
635  * a) The number of required segments for the write bio is minimized, which
636  *    is necessary for some scsi controllers.
637  * b) The data can be used as cache to avoid read requests if we receive a
638  *    new write request for the same zone.
639  */
640 static void pkt_make_local_copy(struct packet_data *pkt, struct page **pages, int *offsets)
641 {
642         int f, p, offs;
643
644         /* Copy all data to pkt->pages[] */
645         p = 0;
646         offs = 0;
647         for (f = 0; f < pkt->frames; f++) {
648                 if (pages[f] != pkt->pages[p]) {
649                         void *vfrom = kmap_atomic(pages[f], KM_USER0) + offsets[f];
650                         void *vto = page_address(pkt->pages[p]) + offs;
651                         memcpy(vto, vfrom, CD_FRAMESIZE);
652                         kunmap_atomic(vfrom, KM_USER0);
653                         pages[f] = pkt->pages[p];
654                         offsets[f] = offs;
655                 } else {
656                         BUG_ON(offsets[f] != offs);
657                 }
658                 offs += CD_FRAMESIZE;
659                 if (offs >= PAGE_SIZE) {
660                         BUG_ON(offs > PAGE_SIZE);
661                         offs = 0;
662                         p++;
663                 }
664         }
665 }
666
667 static int pkt_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
668 {
669         struct packet_data *pkt = bio->bi_private;
670         struct pktcdvd_device *pd = pkt->pd;
671         BUG_ON(!pd);
672
673         if (bio->bi_size)
674                 return 1;
675
676         VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
677                 (unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);
678
679         if (err)
680                 atomic_inc(&pkt->io_errors);
681         if (atomic_dec_and_test(&pkt->io_wait)) {
682                 atomic_inc(&pkt->run_sm);
683                 wake_up(&pd->wqueue);
684         }
685         pkt_bio_finished(pd);
686
687         return 0;
688 }
689
690 static int pkt_end_io_packet_write(struct bio *bio, unsigned int bytes_done, int err)
691 {
692         struct packet_data *pkt = bio->bi_private;
693         struct pktcdvd_device *pd = pkt->pd;
694         BUG_ON(!pd);
695
696         if (bio->bi_size)
697                 return 1;
698
699         VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);
700
701         pd->stats.pkt_ended++;
702
703         pkt_bio_finished(pd);
704         atomic_dec(&pkt->io_wait);
705         atomic_inc(&pkt->run_sm);
706         wake_up(&pd->wqueue);
707         return 0;
708 }
709
710 /*
711  * Schedule reads for the holes in a packet
712  */
713 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
714 {
715         int frames_read = 0;
716         struct bio *bio;
717         int f;
718         char written[PACKET_MAX_SIZE];
719
720         BUG_ON(!pkt->orig_bios);
721
722         atomic_set(&pkt->io_wait, 0);
723         atomic_set(&pkt->io_errors, 0);
724
725         if (pkt->cache_valid) {
726                 VPRINTK("pkt_gather_data: zone %llx cached\n",
727                         (unsigned long long)pkt->sector);
728                 goto out_account;
729         }
730
731         /*
732          * Figure out which frames we need to read before we can write.
733          */
734         memset(written, 0, sizeof(written));
735         spin_lock(&pkt->lock);
736         for (bio = pkt->orig_bios; bio; bio = bio->bi_next) {
737                 int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
738                 int num_frames = bio->bi_size / CD_FRAMESIZE;
739                 BUG_ON(first_frame < 0);
740                 BUG_ON(first_frame + num_frames > pkt->frames);
741                 for (f = first_frame; f < first_frame + num_frames; f++)
742                         written[f] = 1;
743         }
744         spin_unlock(&pkt->lock);
745
746         /*
747          * Schedule reads for missing parts of the packet.
748          */
749         for (f = 0; f < pkt->frames; f++) {
750                 int p, offset;
751                 if (written[f])
752                         continue;
753                 bio = pkt->r_bios[f];
754                 bio_init(bio);
755                 bio->bi_max_vecs = 1;
756                 bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
757                 bio->bi_bdev = pd->bdev;
758                 bio->bi_end_io = pkt_end_io_read;
759                 bio->bi_private = pkt;
760
761                 p = (f * CD_FRAMESIZE) / PAGE_SIZE;
762                 offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
763                 VPRINTK("pkt_gather_data: Adding frame %d, page:%p offs:%d\n",
764                         f, pkt->pages[p], offset);
765                 if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
766                         BUG();
767
768                 atomic_inc(&pkt->io_wait);
769                 bio->bi_rw = READ;
770                 pkt_queue_bio(pd, bio, 0);
771                 frames_read++;
772         }
773
774 out_account:
775         VPRINTK("pkt_gather_data: need %d frames for zone %llx\n",
776                 frames_read, (unsigned long long)pkt->sector);
777         pd->stats.pkt_started++;
778         pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
779         pd->stats.secs_w += pd->settings.size;
780 }
781
782 /*
783  * Find a packet matching zone, or the least recently used packet if
784  * there is no match.
785  */
786 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
787 {
788         struct packet_data *pkt;
789
790         list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
791                 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
792                         list_del_init(&pkt->list);
793                         if (pkt->sector != zone)
794                                 pkt->cache_valid = 0;
795                         break;
796                 }
797         }
798         return pkt;
799 }
800
801 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
802 {
803         if (pkt->cache_valid) {
804                 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
805         } else {
806                 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
807         }
808 }
809
810 /*
811  * recover a failed write, query for relocation if possible
812  *
813  * returns 1 if recovery is possible, or 0 if not
814  *
815  */
816 static int pkt_start_recovery(struct packet_data *pkt)
817 {
818         /*
819          * FIXME. We need help from the file system to implement
820          * recovery handling.
821          */
822         return 0;
823 #if 0
824         struct request *rq = pkt->rq;
825         struct pktcdvd_device *pd = rq->rq_disk->private_data;
826         struct block_device *pkt_bdev;
827         struct super_block *sb = NULL;
828         unsigned long old_block, new_block;
829         sector_t new_sector;
830
831         pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev));
832         if (pkt_bdev) {
833                 sb = get_super(pkt_bdev);
834                 bdput(pkt_bdev);
835         }
836
837         if (!sb)
838                 return 0;
839
840         if (!sb->s_op || !sb->s_op->relocate_blocks)
841                 goto out;
842
843         old_block = pkt->sector / (CD_FRAMESIZE >> 9);
844         if (sb->s_op->relocate_blocks(sb, old_block, &new_block))
845                 goto out;
846
847         new_sector = new_block * (CD_FRAMESIZE >> 9);
848         pkt->sector = new_sector;
849
850         pkt->bio->bi_sector = new_sector;
851         pkt->bio->bi_next = NULL;
852         pkt->bio->bi_flags = 1 << BIO_UPTODATE;
853         pkt->bio->bi_idx = 0;
854
855         BUG_ON(pkt->bio->bi_rw != (1 << BIO_RW));
856         BUG_ON(pkt->bio->bi_vcnt != pkt->frames);
857         BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);
858         BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
859         BUG_ON(pkt->bio->bi_private != pkt);
860
861         drop_super(sb);
862         return 1;
863
864 out:
865         drop_super(sb);
866         return 0;
867 #endif
868 }
869
870 static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
871 {
872 #if PACKET_DEBUG > 1
873         static const char *state_name[] = {
874                 "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
875         };
876         enum packet_data_state old_state = pkt->state;
877         VPRINTK("pkt %2d : s=%6llx %s -> %s\n", pkt->id, (unsigned long long)pkt->sector,
878                 state_name[old_state], state_name[state]);
879 #endif
880         pkt->state = state;
881 }
882
883 /*
884  * Scan the work queue to see if we can start a new packet.
885  * returns non-zero if any work was done.
886  */
887 static int pkt_handle_queue(struct pktcdvd_device *pd)
888 {
889         struct packet_data *pkt, *p;
890         struct bio *bio = NULL;
891         sector_t zone = 0; /* Suppress gcc warning */
892         struct pkt_rb_node *node, *first_node;
893         struct rb_node *n;
894
895         VPRINTK("handle_queue\n");
896
897         atomic_set(&pd->scan_queue, 0);
898
899         if (list_empty(&pd->cdrw.pkt_free_list)) {
900                 VPRINTK("handle_queue: no pkt\n");
901                 return 0;
902         }
903
904         /*
905          * Try to find a zone we are not already working on.
906          */
907         spin_lock(&pd->lock);
908         first_node = pkt_rbtree_find(pd, pd->current_sector);
909         if (!first_node) {
910                 n = rb_first(&pd->bio_queue);
911                 if (n)
912                         first_node = rb_entry(n, struct pkt_rb_node, rb_node);
913         }
914         node = first_node;
915         while (node) {
916                 bio = node->bio;
917                 zone = ZONE(bio->bi_sector, pd);
918                 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
919                         if (p->sector == zone)
920                                 goto try_next_bio;
921                 }
922                 break;
923 try_next_bio:
924                 node = pkt_rbtree_next(node);
925                 if (!node) {
926                         n = rb_first(&pd->bio_queue);
927                         if (n)
928                                 node = rb_entry(n, struct pkt_rb_node, rb_node);
929                 }
930                 if (node == first_node)
931                         node = NULL;
932         }
933         spin_unlock(&pd->lock);
934         if (!bio) {
935                 VPRINTK("handle_queue: no bio\n");
936                 return 0;
937         }
938
939         pkt = pkt_get_packet_data(pd, zone);
940         BUG_ON(!pkt);
941
942         pd->current_sector = zone + pd->settings.size;
943         pkt->sector = zone;
944         pkt->frames = pd->settings.size >> 2;
945         BUG_ON(pkt->frames > PACKET_MAX_SIZE);
946         pkt->write_size = 0;
947
948         /*
949          * Scan work queue for bios in the same zone and link them
950          * to this packet.
951          */
952         spin_lock(&pd->lock);
953         VPRINTK("pkt_handle_queue: looking for zone %llx\n", (unsigned long long)zone);
954         while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
955                 bio = node->bio;
956                 VPRINTK("pkt_handle_queue: found zone=%llx\n",
957                         (unsigned long long)ZONE(bio->bi_sector, pd));
958                 if (ZONE(bio->bi_sector, pd) != zone)
959                         break;
960                 pkt_rbtree_erase(pd, node);
961                 spin_lock(&pkt->lock);
962                 pkt_add_list_last(bio, &pkt->orig_bios, &pkt->orig_bios_tail);
963                 pkt->write_size += bio->bi_size / CD_FRAMESIZE;
964                 spin_unlock(&pkt->lock);
965         }
966         spin_unlock(&pd->lock);
967
968         pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
969         pkt_set_state(pkt, PACKET_WAITING_STATE);
970         atomic_set(&pkt->run_sm, 1);
971
972         spin_lock(&pd->cdrw.active_list_lock);
973         list_add(&pkt->list, &pd->cdrw.pkt_active_list);
974         spin_unlock(&pd->cdrw.active_list_lock);
975
976         return 1;
977 }
978
979 /*
980  * Assemble a bio to write one packet and queue the bio for processing
981  * by the underlying block device.
982  */
983 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
984 {
985         struct bio *bio;
986         struct page *pages[PACKET_MAX_SIZE];
987         int offsets[PACKET_MAX_SIZE];
988         int f;
989         int frames_write;
990
991         for (f = 0; f < pkt->frames; f++) {
992                 pages[f] = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
993                 offsets[f] = (f * CD_FRAMESIZE) % PAGE_SIZE;
994         }
995
996         /*
997          * Fill-in pages[] and offsets[] with data from orig_bios.
998          */
999         frames_write = 0;
1000         spin_lock(&pkt->lock);
1001         for (bio = pkt->orig_bios; bio; bio = bio->bi_next) {
1002                 int segment = bio->bi_idx;
1003                 int src_offs = 0;
1004                 int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
1005                 int num_frames = bio->bi_size / CD_FRAMESIZE;
1006                 BUG_ON(first_frame < 0);
1007                 BUG_ON(first_frame + num_frames > pkt->frames);
1008                 for (f = first_frame; f < first_frame + num_frames; f++) {
1009                         struct bio_vec *src_bvl = bio_iovec_idx(bio, segment);
1010
1011                         while (src_offs >= src_bvl->bv_len) {
1012                                 src_offs -= src_bvl->bv_len;
1013                                 segment++;
1014                                 BUG_ON(segment >= bio->bi_vcnt);
1015                                 src_bvl = bio_iovec_idx(bio, segment);
1016                         }
1017
1018                         if (src_bvl->bv_len - src_offs >= CD_FRAMESIZE) {
1019                                 pages[f] = src_bvl->bv_page;
1020                                 offsets[f] = src_bvl->bv_offset + src_offs;
1021                         } else {
1022                                 pkt_copy_bio_data(bio, segment, src_offs,
1023                                                   pages[f], offsets[f]);
1024                         }
1025                         src_offs += CD_FRAMESIZE;
1026                         frames_write++;
1027                 }
1028         }
1029         pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
1030         spin_unlock(&pkt->lock);
1031
1032         VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n",
1033                 frames_write, (unsigned long long)pkt->sector);
1034         BUG_ON(frames_write != pkt->write_size);
1035
1036         if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
1037                 pkt_make_local_copy(pkt, pages, offsets);
1038                 pkt->cache_valid = 1;
1039         } else {
1040                 pkt->cache_valid = 0;
1041         }
1042
1043         /* Start the write request */
1044         bio_init(pkt->w_bio);
1045         pkt->w_bio->bi_max_vecs = PACKET_MAX_SIZE;
1046         pkt->w_bio->bi_sector = pkt->sector;
1047         pkt->w_bio->bi_bdev = pd->bdev;
1048         pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1049         pkt->w_bio->bi_private = pkt;
1050         for (f = 0; f < pkt->frames; f++) {
1051                 if ((f + 1 < pkt->frames) && (pages[f + 1] == pages[f]) &&
1052                     (offsets[f + 1] = offsets[f] + CD_FRAMESIZE)) {
1053                         if (!bio_add_page(pkt->w_bio, pages[f], CD_FRAMESIZE * 2, offsets[f]))
1054                                 BUG();
1055                         f++;
1056                 } else {
1057                         if (!bio_add_page(pkt->w_bio, pages[f], CD_FRAMESIZE, offsets[f]))
1058                                 BUG();
1059                 }
1060         }
1061         VPRINTK("pktcdvd: vcnt=%d\n", pkt->w_bio->bi_vcnt);
1062
1063         atomic_set(&pkt->io_wait, 1);
1064         pkt->w_bio->bi_rw = WRITE;
1065         pkt_queue_bio(pd, pkt->w_bio, 0);
1066 }
1067
1068 static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
1069 {
1070         struct bio *bio, *next;
1071
1072         if (!uptodate)
1073                 pkt->cache_valid = 0;
1074
1075         /* Finish all bios corresponding to this packet */
1076         bio = pkt->orig_bios;
1077         while (bio) {
1078                 next = bio->bi_next;
1079                 bio->bi_next = NULL;
1080                 bio_endio(bio, bio->bi_size, uptodate ? 0 : -EIO);
1081                 bio = next;
1082         }
1083         pkt->orig_bios = pkt->orig_bios_tail = NULL;
1084 }
1085
1086 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
1087 {
1088         int uptodate;
1089
1090         VPRINTK("run_state_machine: pkt %d\n", pkt->id);
1091
1092         for (;;) {
1093                 switch (pkt->state) {
1094                 case PACKET_WAITING_STATE:
1095                         if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
1096                                 return;
1097
1098                         pkt->sleep_time = 0;
1099                         pkt_gather_data(pd, pkt);
1100                         pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
1101                         break;
1102
1103                 case PACKET_READ_WAIT_STATE:
1104                         if (atomic_read(&pkt->io_wait) > 0)
1105                                 return;
1106
1107                         if (atomic_read(&pkt->io_errors) > 0) {
1108                                 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1109                         } else {
1110                                 pkt_start_write(pd, pkt);
1111                         }
1112                         break;
1113
1114                 case PACKET_WRITE_WAIT_STATE:
1115                         if (atomic_read(&pkt->io_wait) > 0)
1116                                 return;
1117
1118                         if (test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags)) {
1119                                 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1120                         } else {
1121                                 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1122                         }
1123                         break;
1124
1125                 case PACKET_RECOVERY_STATE:
1126                         if (pkt_start_recovery(pkt)) {
1127                                 pkt_start_write(pd, pkt);
1128                         } else {
1129                                 VPRINTK("No recovery possible\n");
1130                                 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1131                         }
1132                         break;
1133
1134                 case PACKET_FINISHED_STATE:
1135                         uptodate = test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags);
1136                         pkt_finish_packet(pkt, uptodate);
1137                         return;
1138
1139                 default:
1140                         BUG();
1141                         break;
1142                 }
1143         }
1144 }
1145
1146 static void pkt_handle_packets(struct pktcdvd_device *pd)
1147 {
1148         struct packet_data *pkt, *next;
1149
1150         VPRINTK("pkt_handle_packets\n");
1151
1152         /*
1153          * Run state machine for active packets
1154          */
1155         list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1156                 if (atomic_read(&pkt->run_sm) > 0) {
1157                         atomic_set(&pkt->run_sm, 0);
1158                         pkt_run_state_machine(pd, pkt);
1159                 }
1160         }
1161
1162         /*
1163          * Move no longer active packets to the free list
1164          */
1165         spin_lock(&pd->cdrw.active_list_lock);
1166         list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
1167                 if (pkt->state == PACKET_FINISHED_STATE) {
1168                         list_del(&pkt->list);
1169                         pkt_put_packet_data(pd, pkt);
1170                         pkt_set_state(pkt, PACKET_IDLE_STATE);
1171                         atomic_set(&pd->scan_queue, 1);
1172                 }
1173         }
1174         spin_unlock(&pd->cdrw.active_list_lock);
1175 }
1176
1177 static void pkt_count_states(struct pktcdvd_device *pd, int *states)
1178 {
1179         struct packet_data *pkt;
1180         int i;
1181
1182         for (i = 0; i <= PACKET_NUM_STATES; i++)
1183                 states[i] = 0;
1184
1185         spin_lock(&pd->cdrw.active_list_lock);
1186         list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1187                 states[pkt->state]++;
1188         }
1189         spin_unlock(&pd->cdrw.active_list_lock);
1190 }
1191
1192 /*
1193  * kcdrwd is woken up when writes have been queued for one of our
1194  * registered devices
1195  */
1196 static int kcdrwd(void *foobar)
1197 {
1198         struct pktcdvd_device *pd = foobar;
1199         struct packet_data *pkt;
1200         long min_sleep_time, residue;
1201
1202         set_user_nice(current, -20);
1203
1204         for (;;) {
1205                 DECLARE_WAITQUEUE(wait, current);
1206
1207                 /*
1208                  * Wait until there is something to do
1209                  */
1210                 add_wait_queue(&pd->wqueue, &wait);
1211                 for (;;) {
1212                         set_current_state(TASK_INTERRUPTIBLE);
1213
1214                         /* Check if we need to run pkt_handle_queue */
1215                         if (atomic_read(&pd->scan_queue) > 0)
1216                                 goto work_to_do;
1217
1218                         /* Check if we need to run the state machine for some packet */
1219                         list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1220                                 if (atomic_read(&pkt->run_sm) > 0)
1221                                         goto work_to_do;
1222                         }
1223
1224                         /* Check if we need to process the iosched queues */
1225                         if (atomic_read(&pd->iosched.attention) != 0)
1226                                 goto work_to_do;
1227
1228                         /* Otherwise, go to sleep */
1229                         if (PACKET_DEBUG > 1) {
1230                                 int states[PACKET_NUM_STATES];
1231                                 pkt_count_states(pd, states);
1232                                 VPRINTK("kcdrwd: i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
1233                                         states[0], states[1], states[2], states[3],
1234                                         states[4], states[5]);
1235                         }
1236
1237                         min_sleep_time = MAX_SCHEDULE_TIMEOUT;
1238                         list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1239                                 if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
1240                                         min_sleep_time = pkt->sleep_time;
1241                         }
1242
1243                         generic_unplug_device(bdev_get_queue(pd->bdev));
1244
1245                         VPRINTK("kcdrwd: sleeping\n");
1246                         residue = schedule_timeout(min_sleep_time);
1247                         VPRINTK("kcdrwd: wake up\n");
1248
1249                         /* make swsusp happy with our thread */
1250                         if (current->flags & PF_FREEZE)
1251                                 refrigerator(PF_FREEZE);
1252
1253                         list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1254                                 if (!pkt->sleep_time)
1255                                         continue;
1256                                 pkt->sleep_time -= min_sleep_time - residue;
1257                                 if (pkt->sleep_time <= 0) {
1258                                         pkt->sleep_time = 0;
1259                                         atomic_inc(&pkt->run_sm);
1260                                 }
1261                         }
1262
1263                         if (signal_pending(current)) {
1264                                 flush_signals(current);
1265                         }
1266                         if (kthread_should_stop())
1267                                 break;
1268                 }
1269 work_to_do:
1270                 set_current_state(TASK_RUNNING);
1271                 remove_wait_queue(&pd->wqueue, &wait);
1272
1273                 if (kthread_should_stop())
1274                         break;
1275
1276                 /*
1277                  * if pkt_handle_queue returns true, we can queue
1278                  * another request.
1279                  */
1280                 while (pkt_handle_queue(pd))
1281                         ;
1282
1283                 /*
1284                  * Handle packet state machine
1285                  */
1286                 pkt_handle_packets(pd);
1287
1288                 /*
1289                  * Handle iosched queues
1290                  */
1291                 pkt_iosched_process_queue(pd);
1292         }
1293
1294         return 0;
1295 }
1296
1297 static void pkt_print_settings(struct pktcdvd_device *pd)
1298 {
1299         printk("pktcdvd: %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
1300         printk("%u blocks, ", pd->settings.size >> 2);
1301         printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
1302 }
1303
1304 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc,
1305                           int page_code, int page_control)
1306 {
1307         memset(cgc->cmd, 0, sizeof(cgc->cmd));
1308
1309         cgc->cmd[0] = GPCMD_MODE_SENSE_10;
1310         cgc->cmd[2] = page_code | (page_control << 6);
1311         cgc->cmd[7] = cgc->buflen >> 8;
1312         cgc->cmd[8] = cgc->buflen & 0xff;
1313         cgc->data_direction = CGC_DATA_READ;
1314         return pkt_generic_packet(pd, cgc);
1315 }
1316
1317 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
1318 {
1319         memset(cgc->cmd, 0, sizeof(cgc->cmd));
1320         memset(cgc->buffer, 0, 2);
1321         cgc->cmd[0] = GPCMD_MODE_SELECT_10;
1322         cgc->cmd[1] = 0x10;             /* PF */
1323         cgc->cmd[7] = cgc->buflen >> 8;
1324         cgc->cmd[8] = cgc->buflen & 0xff;
1325         cgc->data_direction = CGC_DATA_WRITE;
1326         return pkt_generic_packet(pd, cgc);
1327 }
1328
1329 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
1330 {
1331         struct packet_command cgc;
1332         int ret;
1333
1334         /* set up command and get the disc info */
1335         init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
1336         cgc.cmd[0] = GPCMD_READ_DISC_INFO;
1337         cgc.cmd[8] = cgc.buflen = 2;
1338         cgc.quiet = 1;
1339
1340         if ((ret = pkt_generic_packet(pd, &cgc)))
1341                 return ret;
1342
1343         /* not all drives have the same disc_info length, so requeue
1344          * packet with the length the drive tells us it can supply
1345          */
1346         cgc.buflen = be16_to_cpu(di->disc_information_length) +
1347                      sizeof(di->disc_information_length);
1348
1349         if (cgc.buflen > sizeof(disc_information))
1350                 cgc.buflen = sizeof(disc_information);
1351
1352         cgc.cmd[8] = cgc.buflen;
1353         return pkt_generic_packet(pd, &cgc);
1354 }
1355
1356 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
1357 {
1358         struct packet_command cgc;
1359         int ret;
1360
1361         init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
1362         cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
1363         cgc.cmd[1] = type & 3;
1364         cgc.cmd[4] = (track & 0xff00) >> 8;
1365         cgc.cmd[5] = track & 0xff;
1366         cgc.cmd[8] = 8;
1367         cgc.quiet = 1;
1368
1369         if ((ret = pkt_generic_packet(pd, &cgc)))
1370                 return ret;
1371
1372         cgc.buflen = be16_to_cpu(ti->track_information_length) +
1373                      sizeof(ti->track_information_length);
1374
1375         if (cgc.buflen > sizeof(track_information))
1376                 cgc.buflen = sizeof(track_information);
1377
1378         cgc.cmd[8] = cgc.buflen;
1379         return pkt_generic_packet(pd, &cgc);
1380 }
1381
1382 static int pkt_get_last_written(struct pktcdvd_device *pd, long *last_written)
1383 {
1384         disc_information di;
1385         track_information ti;
1386         __u32 last_track;
1387         int ret = -1;
1388
1389         if ((ret = pkt_get_disc_info(pd, &di)))
1390                 return ret;
1391
1392         last_track = (di.last_track_msb << 8) | di.last_track_lsb;
1393         if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
1394                 return ret;
1395
1396         /* if this track is blank, try the previous. */
1397         if (ti.blank) {
1398                 last_track--;
1399                 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
1400                         return ret;
1401         }
1402
1403         /* if last recorded field is valid, return it. */
1404         if (ti.lra_v) {
1405                 *last_written = be32_to_cpu(ti.last_rec_address);
1406         } else {
1407                 /* make it up instead */
1408                 *last_written = be32_to_cpu(ti.track_start) +
1409                                 be32_to_cpu(ti.track_size);
1410                 if (ti.free_blocks)
1411                         *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
1412         }
1413         return 0;
1414 }
1415
1416 /*
1417  * write mode select package based on pd->settings
1418  */
1419 static int pkt_set_write_settings(struct pktcdvd_device *pd)
1420 {
1421         struct packet_command cgc;
1422         struct request_sense sense;
1423         write_param_page *wp;
1424         char buffer[128];
1425         int ret, size;
1426
1427         /* doesn't apply to DVD+RW */
1428         if (pd->mmc3_profile == 0x1a)
1429                 return 0;
1430
1431         memset(buffer, 0, sizeof(buffer));
1432         init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
1433         cgc.sense = &sense;
1434         if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
1435                 pkt_dump_sense(&cgc);
1436                 return ret;
1437         }
1438
1439         size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
1440         pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
1441         if (size > sizeof(buffer))
1442                 size = sizeof(buffer);
1443
1444         /*
1445          * now get it all
1446          */
1447         init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
1448         cgc.sense = &sense;
1449         if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
1450                 pkt_dump_sense(&cgc);
1451                 return ret;
1452         }
1453
1454         /*
1455          * write page is offset header + block descriptor length
1456          */
1457         wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1458
1459         wp->fp = pd->settings.fp;
1460         wp->track_mode = pd->settings.track_mode;
1461         wp->write_type = pd->settings.write_type;
1462         wp->data_block_type = pd->settings.block_mode;
1463
1464         wp->multi_session = 0;
1465
1466 #ifdef PACKET_USE_LS
1467         wp->link_size = 7;
1468         wp->ls_v = 1;
1469 #endif
1470
1471         if (wp->data_block_type == PACKET_BLOCK_MODE1) {
1472                 wp->session_format = 0;
1473                 wp->subhdr2 = 0x20;
1474         } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
1475                 wp->session_format = 0x20;
1476                 wp->subhdr2 = 8;
1477 #if 0
1478                 wp->mcn[0] = 0x80;
1479                 memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
1480 #endif
1481         } else {
1482                 /*
1483                  * paranoia
1484                  */
1485                 printk("pktcdvd: write mode wrong %d\n", wp->data_block_type);
1486                 return 1;
1487         }
1488         wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1489
1490         cgc.buflen = cgc.cmd[8] = size;
1491         if ((ret = pkt_mode_select(pd, &cgc))) {
1492                 pkt_dump_sense(&cgc);
1493                 return ret;
1494         }
1495
1496         pkt_print_settings(pd);
1497         return 0;
1498 }
1499
1500 /*
1501  * 0 -- we can write to this track, 1 -- we can't
1502  */
1503 static int pkt_good_track(track_information *ti)
1504 {
1505         /*
1506          * only good for CD-RW at the moment, not DVD-RW
1507          */
1508
1509         /*
1510          * FIXME: only for FP
1511          */
1512         if (ti->fp == 0)
1513                 return 0;
1514
1515         /*
1516          * "good" settings as per Mt Fuji.
1517          */
1518         if (ti->rt == 0 && ti->blank == 0 && ti->packet == 1)
1519                 return 0;
1520
1521         if (ti->rt == 0 && ti->blank == 1 && ti->packet == 1)
1522                 return 0;
1523
1524         if (ti->rt == 1 && ti->blank == 0 && ti->packet == 1)
1525                 return 0;
1526
1527         printk("pktcdvd: bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
1528         return 1;
1529 }
1530
1531 /*
1532  * 0 -- we can write to this disc, 1 -- we can't
1533  */
1534 static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di)
1535 {
1536         switch (pd->mmc3_profile) {
1537                 case 0x0a: /* CD-RW */
1538                 case 0xffff: /* MMC3 not supported */
1539                         break;
1540                 case 0x1a: /* DVD+RW */
1541                 case 0x13: /* DVD-RW */
1542                         return 0;
1543                 default:
1544                         printk("pktcdvd: Wrong disc profile (%x)\n", pd->mmc3_profile);
1545                         return 1;
1546         }
1547
1548         /*
1549          * for disc type 0xff we should probably reserve a new track.
1550          * but i'm not sure, should we leave this to user apps? probably.
1551          */
1552         if (di->disc_type == 0xff) {
1553                 printk("pktcdvd: Unknown disc. No track?\n");
1554                 return 1;
1555         }
1556
1557         if (di->disc_type != 0x20 && di->disc_type != 0) {
1558                 printk("pktcdvd: Wrong disc type (%x)\n", di->disc_type);
1559                 return 1;
1560         }
1561
1562         if (di->erasable == 0) {
1563                 printk("pktcdvd: Disc not erasable\n");
1564                 return 1;
1565         }
1566
1567         if (di->border_status == PACKET_SESSION_RESERVED) {
1568                 printk("pktcdvd: Can't write to last track (reserved)\n");
1569                 return 1;
1570         }
1571
1572         return 0;
1573 }
1574
1575 static int pkt_probe_settings(struct pktcdvd_device *pd)
1576 {
1577         struct packet_command cgc;
1578         unsigned char buf[12];
1579         disc_information di;
1580         track_information ti;
1581         int ret, track;
1582
1583         init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1584         cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
1585         cgc.cmd[8] = 8;
1586         ret = pkt_generic_packet(pd, &cgc);
1587         pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
1588
1589         memset(&di, 0, sizeof(disc_information));
1590         memset(&ti, 0, sizeof(track_information));
1591
1592         if ((ret = pkt_get_disc_info(pd, &di))) {
1593                 printk("failed get_disc\n");
1594                 return ret;
1595         }
1596
1597         if (pkt_good_disc(pd, &di))
1598                 return -ENXIO;
1599
1600         switch (pd->mmc3_profile) {
1601                 case 0x1a: /* DVD+RW */
1602                         printk("pktcdvd: inserted media is DVD+RW\n");
1603                         break;
1604                 case 0x13: /* DVD-RW */
1605                         printk("pktcdvd: inserted media is DVD-RW\n");
1606                         break;
1607                 default:
1608                         printk("pktcdvd: inserted media is CD-R%s\n", di.erasable ? "W" : "");
1609                         break;
1610         }
1611         pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1612
1613         track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
1614         if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
1615                 printk("pktcdvd: failed get_track\n");
1616                 return ret;
1617         }
1618
1619         if (pkt_good_track(&ti)) {
1620                 printk("pktcdvd: can't write to this track\n");
1621                 return -ENXIO;
1622         }
1623
1624         /*
1625          * we keep packet size in 512 byte units, makes it easier to
1626          * deal with request calculations.
1627          */
1628         pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1629         if (pd->settings.size == 0) {
1630                 printk("pktcdvd: detected zero packet size!\n");
1631                 pd->settings.size = 128;
1632         }
1633         pd->settings.fp = ti.fp;
1634         pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1635
1636         if (ti.nwa_v) {
1637                 pd->nwa = be32_to_cpu(ti.next_writable);
1638                 set_bit(PACKET_NWA_VALID, &pd->flags);
1639         }
1640
1641         /*
1642          * in theory we could use lra on -RW media as well and just zero
1643          * blocks that haven't been written yet, but in practice that
1644          * is just a no-go. we'll use that for -R, naturally.
1645          */
1646         if (ti.lra_v) {
1647                 pd->lra = be32_to_cpu(ti.last_rec_address);
1648                 set_bit(PACKET_LRA_VALID, &pd->flags);
1649         } else {
1650                 pd->lra = 0xffffffff;
1651                 set_bit(PACKET_LRA_VALID, &pd->flags);
1652         }
1653
1654         /*
1655          * fine for now
1656          */
1657         pd->settings.link_loss = 7;
1658         pd->settings.write_type = 0;    /* packet */
1659         pd->settings.track_mode = ti.track_mode;
1660
1661         /*
1662          * mode1 or mode2 disc
1663          */
1664         switch (ti.data_mode) {
1665                 case PACKET_MODE1:
1666                         pd->settings.block_mode = PACKET_BLOCK_MODE1;
1667                         break;
1668                 case PACKET_MODE2:
1669                         pd->settings.block_mode = PACKET_BLOCK_MODE2;
1670                         break;
1671                 default:
1672                         printk("pktcdvd: unknown data mode\n");
1673                         return 1;
1674         }
1675         return 0;
1676 }
1677
1678 /*
1679  * enable/disable write caching on drive
1680  */
1681 static int pkt_write_caching(struct pktcdvd_device *pd, int set)
1682 {
1683         struct packet_command cgc;
1684         struct request_sense sense;
1685         unsigned char buf[64];
1686         int ret;
1687
1688         memset(buf, 0, sizeof(buf));
1689         init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1690         cgc.sense = &sense;
1691         cgc.buflen = pd->mode_offset + 12;
1692
1693         /*
1694          * caching mode page might not be there, so quiet this command
1695          */
1696         cgc.quiet = 1;
1697
1698         if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0)))
1699                 return ret;
1700
1701         buf[pd->mode_offset + 10] |= (!!set << 2);
1702
1703         cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
1704         ret = pkt_mode_select(pd, &cgc);
1705         if (ret) {
1706                 printk("pktcdvd: write caching control failed\n");
1707                 pkt_dump_sense(&cgc);
1708         } else if (!ret && set)
1709                 printk("pktcdvd: enabled write caching on %s\n", pd->name);
1710         return ret;
1711 }
1712
1713 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
1714 {
1715         struct packet_command cgc;
1716
1717         init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1718         cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
1719         cgc.cmd[4] = lockflag ? 1 : 0;
1720         return pkt_generic_packet(pd, &cgc);
1721 }
1722
1723 /*
1724  * Returns drive maximum write speed
1725  */
1726 static int pkt_get_max_speed(struct pktcdvd_device *pd, unsigned *write_speed)
1727 {
1728         struct packet_command cgc;
1729         struct request_sense sense;
1730         unsigned char buf[256+18];
1731         unsigned char *cap_buf;
1732         int ret, offset;
1733
1734         memset(buf, 0, sizeof(buf));
1735         cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
1736         init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
1737         cgc.sense = &sense;
1738
1739         ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1740         if (ret) {
1741                 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
1742                              sizeof(struct mode_page_header);
1743                 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1744                 if (ret) {
1745                         pkt_dump_sense(&cgc);
1746                         return ret;
1747                 }
1748         }
1749
1750         offset = 20;                        /* Obsoleted field, used by older drives */
1751         if (cap_buf[1] >= 28)
1752                 offset = 28;                /* Current write speed selected */
1753         if (cap_buf[1] >= 30) {
1754                 /* If the drive reports at least one "Logical Unit Write
1755                  * Speed Performance Descriptor Block", use the information
1756                  * in the first block. (contains the highest speed)
1757                  */
1758                 int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
1759                 if (num_spdb > 0)
1760                         offset = 34;
1761         }
1762
1763         *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
1764         return 0;
1765 }
1766
1767 /* These tables from cdrecord - I don't have orange book */
1768 /* standard speed CD-RW (1-4x) */
1769 static char clv_to_speed[16] = {
1770         /* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
1771            0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1772 };
1773 /* high speed CD-RW (-10x) */
1774 static char hs_clv_to_speed[16] = {
1775         /* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
1776            0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1777 };
1778 /* ultra high speed CD-RW */
1779 static char us_clv_to_speed[16] = {
1780         /* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
1781            0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
1782 };
1783
1784 /*
1785  * reads the maximum media speed from ATIP
1786  */
1787 static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed)
1788 {
1789         struct packet_command cgc;
1790         struct request_sense sense;
1791         unsigned char buf[64];
1792         unsigned int size, st, sp;
1793         int ret;
1794
1795         init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
1796         cgc.sense = &sense;
1797         cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
1798         cgc.cmd[1] = 2;
1799         cgc.cmd[2] = 4; /* READ ATIP */
1800         cgc.cmd[8] = 2;
1801         ret = pkt_generic_packet(pd, &cgc);
1802         if (ret) {
1803                 pkt_dump_sense(&cgc);
1804                 return ret;
1805         }
1806         size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
1807         if (size > sizeof(buf))
1808                 size = sizeof(buf);
1809
1810         init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
1811         cgc.sense = &sense;
1812         cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
1813         cgc.cmd[1] = 2;
1814         cgc.cmd[2] = 4;
1815         cgc.cmd[8] = size;
1816         ret = pkt_generic_packet(pd, &cgc);
1817         if (ret) {
1818                 pkt_dump_sense(&cgc);
1819                 return ret;
1820         }
1821
1822         if (!buf[6] & 0x40) {
1823                 printk("pktcdvd: Disc type is not CD-RW\n");
1824                 return 1;
1825         }
1826         if (!buf[6] & 0x4) {
1827                 printk("pktcdvd: A1 values on media are not valid, maybe not CDRW?\n");
1828                 return 1;
1829         }
1830
1831         st = (buf[6] >> 3) & 0x7; /* disc sub-type */
1832
1833         sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
1834
1835         /* Info from cdrecord */
1836         switch (st) {
1837                 case 0: /* standard speed */
1838                         *speed = clv_to_speed[sp];
1839                         break;
1840                 case 1: /* high speed */
1841                         *speed = hs_clv_to_speed[sp];
1842                         break;
1843                 case 2: /* ultra high speed */
1844                         *speed = us_clv_to_speed[sp];
1845                         break;
1846                 default:
1847                         printk("pktcdvd: Unknown disc sub-type %d\n",st);
1848                         return 1;
1849         }
1850         if (*speed) {
1851                 printk("pktcdvd: Max. media speed: %d\n",*speed);
1852                 return 0;
1853         } else {
1854                 printk("pktcdvd: Unknown speed %d for sub-type %d\n",sp,st);
1855                 return 1;
1856         }
1857 }
1858
1859 static int pkt_perform_opc(struct pktcdvd_device *pd)
1860 {
1861         struct packet_command cgc;
1862         struct request_sense sense;
1863         int ret;
1864
1865         VPRINTK("pktcdvd: Performing OPC\n");
1866
1867         init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1868         cgc.sense = &sense;
1869         cgc.timeout = 60*HZ;
1870         cgc.cmd[0] = GPCMD_SEND_OPC;
1871         cgc.cmd[1] = 1;
1872         if ((ret = pkt_generic_packet(pd, &cgc)))
1873                 pkt_dump_sense(&cgc);
1874         return ret;
1875 }
1876
1877 static int pkt_open_write(struct pktcdvd_device *pd)
1878 {
1879         int ret;
1880         unsigned int write_speed, media_write_speed, read_speed;
1881
1882         if ((ret = pkt_probe_settings(pd))) {
1883                 DPRINTK("pktcdvd: %s failed probe\n", pd->name);
1884                 return -EIO;
1885         }
1886
1887         if ((ret = pkt_set_write_settings(pd))) {
1888                 DPRINTK("pktcdvd: %s failed saving write settings\n", pd->name);
1889                 return -EIO;
1890         }
1891
1892         pkt_write_caching(pd, USE_WCACHING);
1893
1894         if ((ret = pkt_get_max_speed(pd, &write_speed)))
1895                 write_speed = 16 * 177;
1896         switch (pd->mmc3_profile) {
1897                 case 0x13: /* DVD-RW */
1898                 case 0x1a: /* DVD+RW */
1899                         DPRINTK("pktcdvd: write speed %ukB/s\n", write_speed);
1900                         break;
1901                 default:
1902                         if ((ret = pkt_media_speed(pd, &media_write_speed)))
1903                                 media_write_speed = 16;
1904                         write_speed = min(write_speed, media_write_speed * 177);
1905                         DPRINTK("pktcdvd: write speed %ux\n", write_speed / 176);
1906                         break;
1907         }
1908         read_speed = write_speed;
1909
1910         if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
1911                 DPRINTK("pktcdvd: %s couldn't set write speed\n", pd->name);
1912                 return -EIO;
1913         }
1914         pd->write_speed = write_speed;
1915         pd->read_speed = read_speed;
1916
1917         if ((ret = pkt_perform_opc(pd))) {
1918                 DPRINTK("pktcdvd: %s Optimum Power Calibration failed\n", pd->name);
1919         }
1920
1921         return 0;
1922 }
1923
1924 /*
1925  * called at open time.
1926  */
1927 static int pkt_open_dev(struct pktcdvd_device *pd, int write)
1928 {
1929         int ret;
1930         long lba;
1931         request_queue_t *q;
1932
1933         /*
1934          * We need to re-open the cdrom device without O_NONBLOCK to be able
1935          * to read/write from/to it. It is already opened in O_NONBLOCK mode
1936          * so bdget() can't fail.
1937          */
1938         bdget(pd->bdev->bd_dev);
1939         if ((ret = blkdev_get(pd->bdev, FMODE_READ, O_RDONLY)))
1940                 goto out;
1941
1942         if ((ret = pkt_get_last_written(pd, &lba))) {
1943                 printk("pktcdvd: pkt_get_last_written failed\n");
1944                 goto out_putdev;
1945         }
1946
1947         set_capacity(pd->disk, lba << 2);
1948         set_capacity(pd->bdev->bd_disk, lba << 2);
1949         bd_set_size(pd->bdev, (loff_t)lba << 11);
1950
1951         q = bdev_get_queue(pd->bdev);
1952         if (write) {
1953                 if ((ret = pkt_open_write(pd)))
1954                         goto out_putdev;
1955                 /*
1956                  * Some CDRW drives can not handle writes larger than one packet,
1957                  * even if the size is a multiple of the packet size.
1958                  */
1959                 spin_lock_irq(q->queue_lock);
1960                 blk_queue_max_sectors(q, pd->settings.size);
1961                 spin_unlock_irq(q->queue_lock);
1962                 set_bit(PACKET_WRITABLE, &pd->flags);
1963         } else {
1964                 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
1965                 clear_bit(PACKET_WRITABLE, &pd->flags);
1966         }
1967
1968         if ((ret = pkt_set_segment_merging(pd, q)))
1969                 goto out_putdev;
1970
1971         if (write)
1972                 printk("pktcdvd: %lukB available on disc\n", lba << 1);
1973
1974         return 0;
1975
1976 out_putdev:
1977         blkdev_put(pd->bdev);
1978 out:
1979         return ret;
1980 }
1981
1982 /*
1983  * called when the device is closed. makes sure that the device flushes
1984  * the internal cache before we close.
1985  */
1986 static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
1987 {
1988         if (flush && pkt_flush_cache(pd))
1989                 DPRINTK("pktcdvd: %s not flushing cache\n", pd->name);
1990
1991         pkt_lock_door(pd, 0);
1992
1993         pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
1994         blkdev_put(pd->bdev);
1995 }
1996
1997 static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
1998 {
1999         if (dev_minor >= MAX_WRITERS)
2000                 return NULL;
2001         return pkt_devs[dev_minor];
2002 }
2003
2004 static int pkt_open(struct inode *inode, struct file *file)
2005 {
2006         struct pktcdvd_device *pd = NULL;
2007         int ret;
2008
2009         VPRINTK("pktcdvd: entering open\n");
2010
2011         down(&ctl_mutex);
2012         pd = pkt_find_dev_from_minor(iminor(inode));
2013         if (!pd) {
2014                 ret = -ENODEV;
2015                 goto out;
2016         }
2017         BUG_ON(pd->refcnt < 0);
2018
2019         pd->refcnt++;
2020         if (pd->refcnt == 1) {
2021                 if (pkt_open_dev(pd, file->f_mode & FMODE_WRITE)) {
2022                         ret = -EIO;
2023                         goto out_dec;
2024                 }
2025                 /*
2026                  * needed here as well, since ext2 (among others) may change
2027                  * the blocksize at mount time
2028                  */
2029                 set_blocksize(inode->i_bdev, CD_FRAMESIZE);
2030         }
2031
2032         up(&ctl_mutex);
2033         return 0;
2034
2035 out_dec:
2036         pd->refcnt--;
2037 out:
2038         VPRINTK("pktcdvd: failed open (%d)\n", ret);
2039         up(&ctl_mutex);
2040         return ret;
2041 }
2042
2043 static int pkt_close(struct inode *inode, struct file *file)
2044 {
2045         struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
2046         int ret = 0;
2047
2048         down(&ctl_mutex);
2049         pd->refcnt--;
2050         BUG_ON(pd->refcnt < 0);
2051         if (pd->refcnt == 0) {
2052                 int flush = test_bit(PACKET_WRITABLE, &pd->flags);
2053                 pkt_release_dev(pd, flush);
2054         }
2055         up(&ctl_mutex);
2056         return ret;
2057 }
2058
2059
2060 static void *psd_pool_alloc(int gfp_mask, void *data)
2061 {
2062         return kmalloc(sizeof(struct packet_stacked_data), gfp_mask);
2063 }
2064
2065 static void psd_pool_free(void *ptr, void *data)
2066 {
2067         kfree(ptr);
2068 }
2069
2070 static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err)
2071 {
2072         struct packet_stacked_data *psd = bio->bi_private;
2073         struct pktcdvd_device *pd = psd->pd;
2074
2075         if (bio->bi_size)
2076                 return 1;
2077
2078         bio_put(bio);
2079         bio_endio(psd->bio, psd->bio->bi_size, err);
2080         mempool_free(psd, psd_pool);
2081         pkt_bio_finished(pd);
2082         return 0;
2083 }
2084
2085 static int pkt_make_request(request_queue_t *q, struct bio *bio)
2086 {
2087         struct pktcdvd_device *pd;
2088         char b[BDEVNAME_SIZE];
2089         sector_t zone;
2090         struct packet_data *pkt;
2091         int was_empty, blocked_bio;
2092         struct pkt_rb_node *node;
2093
2094         pd = q->queuedata;
2095         if (!pd) {
2096                 printk("pktcdvd: %s incorrect request queue\n", bdevname(bio->bi_bdev, b));
2097                 goto end_io;
2098         }
2099
2100         /*
2101          * Clone READ bios so we can have our own bi_end_io callback.
2102          */
2103         if (bio_data_dir(bio) == READ) {
2104                 struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
2105                 struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
2106
2107                 psd->pd = pd;
2108                 psd->bio = bio;
2109                 cloned_bio->bi_bdev = pd->bdev;
2110                 cloned_bio->bi_private = psd;
2111                 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2112                 pd->stats.secs_r += bio->bi_size >> 9;
2113                 pkt_queue_bio(pd, cloned_bio, 1);
2114                 return 0;
2115         }
2116
2117         if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2118                 printk("pktcdvd: WRITE for ro device %s (%llu)\n",
2119                         pd->name, (unsigned long long)bio->bi_sector);
2120                 goto end_io;
2121         }
2122
2123         if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
2124                 printk("pktcdvd: wrong bio size\n");
2125                 goto end_io;
2126         }
2127
2128         blk_queue_bounce(q, &bio);
2129
2130         zone = ZONE(bio->bi_sector, pd);
2131         VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n",
2132                 (unsigned long long)bio->bi_sector,
2133                 (unsigned long long)(bio->bi_sector + bio_sectors(bio)));
2134
2135         /* Check if we have to split the bio */
2136         {
2137                 struct bio_pair *bp;
2138                 sector_t last_zone;
2139                 int first_sectors;
2140
2141                 last_zone = ZONE(bio->bi_sector + bio_sectors(bio) - 1, pd);
2142                 if (last_zone != zone) {
2143                         BUG_ON(last_zone != zone + pd->settings.size);
2144                         first_sectors = last_zone - bio->bi_sector;
2145                         bp = bio_split(bio, bio_split_pool, first_sectors);
2146                         BUG_ON(!bp);
2147                         pkt_make_request(q, &bp->bio1);
2148                         pkt_make_request(q, &bp->bio2);
2149                         bio_pair_release(bp);
2150                         return 0;
2151                 }
2152         }
2153
2154         /*
2155          * If we find a matching packet in state WAITING or READ_WAIT, we can
2156          * just append this bio to that packet.
2157          */
2158         spin_lock(&pd->cdrw.active_list_lock);
2159         blocked_bio = 0;
2160         list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
2161                 if (pkt->sector == zone) {
2162                         spin_lock(&pkt->lock);
2163                         if ((pkt->state == PACKET_WAITING_STATE) ||
2164                             (pkt->state == PACKET_READ_WAIT_STATE)) {
2165                                 pkt_add_list_last(bio, &pkt->orig_bios,
2166                                                   &pkt->orig_bios_tail);
2167                                 pkt->write_size += bio->bi_size / CD_FRAMESIZE;
2168                                 if ((pkt->write_size >= pkt->frames) &&
2169                                     (pkt->state == PACKET_WAITING_STATE)) {
2170                                         atomic_inc(&pkt->run_sm);
2171                                         wake_up(&pd->wqueue);
2172                                 }
2173                                 spin_unlock(&pkt->lock);
2174                                 spin_unlock(&pd->cdrw.active_list_lock);
2175                                 return 0;
2176                         } else {
2177                                 blocked_bio = 1;
2178                         }
2179                         spin_unlock(&pkt->lock);
2180                 }
2181         }
2182         spin_unlock(&pd->cdrw.active_list_lock);
2183
2184         /*
2185          * No matching packet found. Store the bio in the work queue.
2186          */
2187         node = mempool_alloc(pd->rb_pool, GFP_NOIO);
2188         BUG_ON(!node);
2189         node->bio = bio;
2190         spin_lock(&pd->lock);
2191         BUG_ON(pd->bio_queue_size < 0);
2192         was_empty = (pd->bio_queue_size == 0);
2193         pkt_rbtree_insert(pd, node);
2194         spin_unlock(&pd->lock);
2195
2196         /*
2197          * Wake up the worker thread.
2198          */
2199         atomic_set(&pd->scan_queue, 1);
2200         if (was_empty) {
2201                 /* This wake_up is required for correct operation */
2202                 wake_up(&pd->wqueue);
2203         } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
2204                 /*
2205                  * This wake up is not required for correct operation,
2206                  * but improves performance in some cases.
2207                  */
2208                 wake_up(&pd->wqueue);
2209         }
2210         return 0;
2211 end_io:
2212         bio_io_error(bio, bio->bi_size);
2213         return 0;
2214 }
2215
2216
2217
2218 static int pkt_merge_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *bvec)
2219 {
2220         struct pktcdvd_device *pd = q->queuedata;
2221         sector_t zone = ZONE(bio->bi_sector, pd);
2222         int used = ((bio->bi_sector - zone) << 9) + bio->bi_size;
2223         int remaining = (pd->settings.size << 9) - used;
2224         int remaining2;
2225
2226         /*
2227          * A bio <= PAGE_SIZE must be allowed. If it crosses a packet
2228          * boundary, pkt_make_request() will split the bio.
2229          */
2230         remaining2 = PAGE_SIZE - bio->bi_size;
2231         remaining = max(remaining, remaining2);
2232
2233         BUG_ON(remaining < 0);
2234         return remaining;
2235 }
2236
2237 static void pkt_init_queue(struct pktcdvd_device *pd)
2238 {
2239         request_queue_t *q = pd->disk->queue;
2240
2241         blk_queue_make_request(q, pkt_make_request);
2242         blk_queue_hardsect_size(q, CD_FRAMESIZE);
2243         blk_queue_max_sectors(q, PACKET_MAX_SECTORS);
2244         blk_queue_merge_bvec(q, pkt_merge_bvec);
2245         q->queuedata = pd;
2246 }
2247
2248 static int pkt_seq_show(struct seq_file *m, void *p)
2249 {
2250         struct pktcdvd_device *pd = m->private;
2251         char *msg;
2252         char bdev_buf[BDEVNAME_SIZE];
2253         int states[PACKET_NUM_STATES];
2254
2255         seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
2256                    bdevname(pd->bdev, bdev_buf));
2257
2258         seq_printf(m, "\nSettings:\n");
2259         seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
2260
2261         if (pd->settings.write_type == 0)
2262                 msg = "Packet";
2263         else
2264                 msg = "Unknown";
2265         seq_printf(m, "\twrite type:\t\t%s\n", msg);
2266
2267         seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
2268         seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
2269
2270         seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
2271
2272         if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
2273                 msg = "Mode 1";
2274         else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
2275                 msg = "Mode 2";
2276         else
2277                 msg = "Unknown";
2278         seq_printf(m, "\tblock mode:\t\t%s\n", msg);
2279
2280         seq_printf(m, "\nStatistics:\n");
2281         seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
2282         seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
2283         seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
2284         seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
2285         seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
2286
2287         seq_printf(m, "\nMisc:\n");
2288         seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
2289         seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
2290         seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
2291         seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
2292         seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
2293         seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
2294
2295         seq_printf(m, "\nQueue state:\n");
2296         seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
2297         seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
2298         seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
2299
2300         pkt_count_states(pd, states);
2301         seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
2302                    states[0], states[1], states[2], states[3], states[4], states[5]);
2303
2304         return 0;
2305 }
2306
2307 static int pkt_seq_open(struct inode *inode, struct file *file)
2308 {
2309         return single_open(file, pkt_seq_show, PDE(inode)->data);
2310 }
2311
2312 static struct file_operations pkt_proc_fops = {
2313         .open   = pkt_seq_open,
2314         .read   = seq_read,
2315         .llseek = seq_lseek,
2316         .release = single_release
2317 };
2318
2319 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2320 {
2321         int i;
2322         int ret = 0;
2323         char b[BDEVNAME_SIZE];
2324         struct proc_dir_entry *proc;
2325         struct block_device *bdev;
2326
2327         if (pd->pkt_dev == dev) {
2328                 printk("pktcdvd: Recursive setup not allowed\n");
2329                 return -EBUSY;
2330         }
2331         for (i = 0; i < MAX_WRITERS; i++) {
2332                 struct pktcdvd_device *pd2 = pkt_devs[i];
2333                 if (!pd2)
2334                         continue;
2335                 if (pd2->bdev->bd_dev == dev) {
2336                         printk("pktcdvd: %s already setup\n", bdevname(pd2->bdev, b));
2337                         return -EBUSY;
2338                 }
2339                 if (pd2->pkt_dev == dev) {
2340                         printk("pktcdvd: Can't chain pktcdvd devices\n");
2341                         return -EBUSY;
2342                 }
2343         }
2344
2345         bdev = bdget(dev);
2346         if (!bdev)
2347                 return -ENOMEM;
2348         ret = blkdev_get(bdev, FMODE_READ, O_RDONLY | O_NONBLOCK);
2349         if (ret)
2350                 return ret;
2351
2352         /* This is safe, since we have a reference from open(). */
2353         __module_get(THIS_MODULE);
2354
2355         if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
2356                 printk("pktcdvd: not enough memory for buffers\n");
2357                 ret = -ENOMEM;
2358                 goto out_mem;
2359         }
2360
2361         pd->bdev = bdev;
2362         set_blocksize(bdev, CD_FRAMESIZE);
2363
2364         pkt_init_queue(pd);
2365
2366         atomic_set(&pd->cdrw.pending_bios, 0);
2367         pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
2368         if (IS_ERR(pd->cdrw.thread)) {
2369                 printk("pktcdvd: can't start kernel thread\n");
2370                 ret = -ENOMEM;
2371                 goto out_thread;
2372         }
2373
2374         proc = create_proc_entry(pd->name, 0, pkt_proc);
2375         if (proc) {
2376                 proc->data = pd;
2377                 proc->proc_fops = &pkt_proc_fops;
2378         }
2379         DPRINTK("pktcdvd: writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
2380         return 0;
2381
2382 out_thread:
2383         pkt_shrink_pktlist(pd);
2384 out_mem:
2385         blkdev_put(bdev);
2386         /* This is safe: open() is still holding a reference. */
2387         module_put(THIS_MODULE);
2388         return ret;
2389 }
2390
2391 static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
2392 {
2393         struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
2394
2395         VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode));
2396         BUG_ON(!pd);
2397
2398         switch (cmd) {
2399         /*
2400          * forward selected CDROM ioctls to CD-ROM, for UDF
2401          */
2402         case CDROMMULTISESSION:
2403         case CDROMREADTOCENTRY:
2404         case CDROM_LAST_WRITTEN:
2405         case CDROM_SEND_PACKET:
2406         case SCSI_IOCTL_SEND_COMMAND:
2407                 return ioctl_by_bdev(pd->bdev, cmd, arg);
2408
2409         case CDROMEJECT:
2410                 /*
2411                  * The door gets locked when the device is opened, so we
2412                  * have to unlock it or else the eject command fails.
2413                  */
2414                 pkt_lock_door(pd, 0);
2415                 return ioctl_by_bdev(pd->bdev, cmd, arg);
2416
2417         default:
2418                 printk("pktcdvd: Unknown ioctl for %s (%x)\n", pd->name, cmd);
2419                 return -ENOTTY;
2420         }
2421
2422         return 0;
2423 }
2424
2425 static int pkt_media_changed(struct gendisk *disk)
2426 {
2427         struct pktcdvd_device *pd = disk->private_data;
2428         struct gendisk *attached_disk;
2429
2430         if (!pd)
2431                 return 0;
2432         if (!pd->bdev)
2433                 return 0;
2434         attached_disk = pd->bdev->bd_disk;
2435         if (!attached_disk)
2436                 return 0;
2437         return attached_disk->fops->media_changed(attached_disk);
2438 }
2439
2440 static struct block_device_operations pktcdvd_ops = {
2441         .owner =                THIS_MODULE,
2442         .open =                 pkt_open,
2443         .release =              pkt_close,
2444         .ioctl =                pkt_ioctl,
2445         .media_changed =        pkt_media_changed,
2446 };
2447
2448 /*
2449  * Set up mapping from pktcdvd device to CD-ROM device.
2450  */
2451 static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd)
2452 {
2453         int idx;
2454         int ret = -ENOMEM;
2455         struct pktcdvd_device *pd;
2456         struct gendisk *disk;
2457         dev_t dev = new_decode_dev(ctrl_cmd->dev);
2458
2459         for (idx = 0; idx < MAX_WRITERS; idx++)
2460                 if (!pkt_devs[idx])
2461                         break;
2462         if (idx == MAX_WRITERS) {
2463                 printk("pktcdvd: max %d writers supported\n", MAX_WRITERS);
2464                 return -EBUSY;
2465         }
2466
2467         pd = kmalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
2468         if (!pd)
2469                 return ret;
2470         memset(pd, 0, sizeof(struct pktcdvd_device));
2471
2472         pd->rb_pool = mempool_create(PKT_RB_POOL_SIZE, pkt_rb_alloc, pkt_rb_free, NULL);
2473         if (!pd->rb_pool)
2474                 goto out_mem;
2475
2476         disk = alloc_disk(1);
2477         if (!disk)
2478                 goto out_mem;
2479         pd->disk = disk;
2480
2481         spin_lock_init(&pd->lock);
2482         spin_lock_init(&pd->iosched.lock);
2483         sprintf(pd->name, "pktcdvd%d", idx);
2484         init_waitqueue_head(&pd->wqueue);
2485         pd->bio_queue = RB_ROOT;
2486
2487         disk->major = pkt_major;
2488         disk->first_minor = idx;
2489         disk->fops = &pktcdvd_ops;
2490         disk->flags = GENHD_FL_REMOVABLE;
2491         sprintf(disk->disk_name, "pktcdvd%d", idx);
2492         disk->private_data = pd;
2493         disk->queue = blk_alloc_queue(GFP_KERNEL);
2494         if (!disk->queue)
2495                 goto out_mem2;
2496
2497         pd->pkt_dev = MKDEV(disk->major, disk->first_minor);
2498         ret = pkt_new_dev(pd, dev);
2499         if (ret)
2500                 goto out_new_dev;
2501
2502         add_disk(disk);
2503         pkt_devs[idx] = pd;
2504         ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
2505         return 0;
2506
2507 out_new_dev:
2508         blk_put_queue(disk->queue);
2509 out_mem2:
2510         put_disk(disk);
2511 out_mem:
2512         if (pd->rb_pool)
2513                 mempool_destroy(pd->rb_pool);
2514         kfree(pd);
2515         return ret;
2516 }
2517
2518 /*
2519  * Tear down mapping from pktcdvd device to CD-ROM device.
2520  */
2521 static int pkt_remove_dev(struct pkt_ctrl_command *ctrl_cmd)
2522 {
2523         struct pktcdvd_device *pd;
2524         int idx;
2525         dev_t pkt_dev = new_decode_dev(ctrl_cmd->pkt_dev);
2526
2527         for (idx = 0; idx < MAX_WRITERS; idx++) {
2528                 pd = pkt_devs[idx];
2529                 if (pd && (pd->pkt_dev == pkt_dev))
2530                         break;
2531         }
2532         if (idx == MAX_WRITERS) {
2533                 DPRINTK("pktcdvd: dev not setup\n");
2534                 return -ENXIO;
2535         }
2536
2537         if (pd->refcnt > 0)
2538                 return -EBUSY;
2539
2540         if (!IS_ERR(pd->cdrw.thread))
2541                 kthread_stop(pd->cdrw.thread);
2542
2543         blkdev_put(pd->bdev);
2544
2545         pkt_shrink_pktlist(pd);
2546
2547         remove_proc_entry(pd->name, pkt_proc);
2548         DPRINTK("pktcdvd: writer %s unmapped\n", pd->name);
2549
2550         del_gendisk(pd->disk);
2551         blk_put_queue(pd->disk->queue);
2552         put_disk(pd->disk);
2553
2554         pkt_devs[idx] = NULL;
2555         mempool_destroy(pd->rb_pool);
2556         kfree(pd);
2557
2558         /* This is safe: open() is still holding a reference. */
2559         module_put(THIS_MODULE);
2560         return 0;
2561 }
2562
2563 static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
2564 {
2565         struct pktcdvd_device *pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
2566         if (pd) {
2567                 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
2568                 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
2569         } else {
2570                 ctrl_cmd->dev = 0;
2571                 ctrl_cmd->pkt_dev = 0;
2572         }
2573         ctrl_cmd->num_devices = MAX_WRITERS;
2574 }
2575
2576 static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
2577 {
2578         void __user *argp = (void __user *)arg;
2579         struct pkt_ctrl_command ctrl_cmd;
2580         int ret = 0;
2581
2582         if (cmd != PACKET_CTRL_CMD)
2583                 return -ENOTTY;
2584
2585         if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
2586                 return -EFAULT;
2587
2588         switch (ctrl_cmd.command) {
2589         case PKT_CTRL_CMD_SETUP:
2590                 if (!capable(CAP_SYS_ADMIN))
2591                         return -EPERM;
2592                 down(&ctl_mutex);
2593                 ret = pkt_setup_dev(&ctrl_cmd);
2594                 up(&ctl_mutex);
2595                 break;
2596         case PKT_CTRL_CMD_TEARDOWN:
2597                 if (!capable(CAP_SYS_ADMIN))
2598                         return -EPERM;
2599                 down(&ctl_mutex);
2600                 ret = pkt_remove_dev(&ctrl_cmd);
2601                 up(&ctl_mutex);
2602                 break;
2603         case PKT_CTRL_CMD_STATUS:
2604                 down(&ctl_mutex);
2605                 pkt_get_status(&ctrl_cmd);
2606                 up(&ctl_mutex);
2607                 break;
2608         default:
2609                 return -ENOTTY;
2610         }
2611
2612         if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
2613                 return -EFAULT;
2614         return ret;
2615 }
2616
2617
2618 static struct file_operations pkt_ctl_fops = {
2619         .ioctl   = pkt_ctl_ioctl,
2620         .owner   = THIS_MODULE,
2621 };
2622
2623 static struct miscdevice pkt_misc = {
2624         .minor          = MISC_DYNAMIC_MINOR,
2625         .name           = "pktcdvd",
2626         .devfs_name     = "pktcdvd/control",
2627         .fops           = &pkt_ctl_fops
2628 };
2629
2630 int pkt_init(void)
2631 {
2632         int ret;
2633
2634         psd_pool = mempool_create(PSD_POOL_SIZE, psd_pool_alloc, psd_pool_free, NULL);
2635         if (!psd_pool)
2636                 return -ENOMEM;
2637
2638         ret = register_blkdev(pkt_major, "pktcdvd");
2639         if (ret < 0) {
2640                 printk("pktcdvd: Unable to register block device\n");
2641                 goto out2;
2642         }
2643         if (!pkt_major)
2644                 pkt_major = ret;
2645
2646         ret = misc_register(&pkt_misc);
2647         if (ret) {
2648                 printk("pktcdvd: Unable to register misc device\n");
2649                 goto out;
2650         }
2651
2652         init_MUTEX(&ctl_mutex);
2653
2654         pkt_proc = proc_mkdir("pktcdvd", proc_root_driver);
2655
2656         DPRINTK("pktcdvd: %s\n", VERSION_CODE);
2657         return 0;
2658
2659 out:
2660         unregister_blkdev(pkt_major, "pktcdvd");
2661 out2:
2662         mempool_destroy(psd_pool);
2663         return ret;
2664 }
2665
2666 void pkt_exit(void)
2667 {
2668         remove_proc_entry("pktcdvd", proc_root_driver);
2669         misc_deregister(&pkt_misc);
2670         unregister_blkdev(pkt_major, "pktcdvd");
2671         mempool_destroy(psd_pool);
2672 }
2673
2674 MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
2675 MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
2676 MODULE_LICENSE("GPL");
2677
2678 module_init(pkt_init);
2679 module_exit(pkt_exit);