* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*
- * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
- * DVD-RW devices (aka an exercise in block layer masturbation)
+ * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
+ * DVD-RAM devices.
*
+ * Theory of operation:
*
- * TODO: (circa order of when I will fix it)
- * - Only able to write on CD-RW media right now.
- * - check host application code on media and set it in write page
- * - interface for UDF <-> packet to negotiate a new location when a write
- * fails.
- * - handle OPC, especially for -RW media
+ * At the lowest level, there is the standard driver for the CD/DVD device,
+ * typically ide-cd.c or sr.c. This driver can handle read and write requests,
+ * but it doesn't know anything about the special restrictions that apply to
+ * packet writing. One restriction is that write requests must be aligned to
+ * packet boundaries on the physical media, and the size of a write request
+ * must be equal to the packet size. Another restriction is that a
+ * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
+ * command, if the previous command was a write.
*
- * Theory of operation:
+ * The purpose of the packet writing driver is to hide these restrictions from
+ * higher layers, such as file systems, and present a block device that can be
+ * randomly read and written using 2kB-sized blocks.
*
- * We use a custom make_request_fn function that forwards reads directly to
- * the underlying CD device. Write requests are either attached directly to
- * a live packet_data object, or simply stored sequentially in a list for
- * later processing by the kcdrwd kernel thread. This driver doesn't use
- * any elevator functionally as defined by the elevator_s struct, but the
- * underlying CD device uses a standard elevator.
+ * The lowest layer in the packet writing driver is the packet I/O scheduler.
+ * Its data is defined by the struct packet_iosched and includes two bio
+ * queues with pending read and write requests. These queues are processed
+ * by the pkt_iosched_process_queue() function. The write requests in this
+ * queue are already properly aligned and sized. This layer is responsible for
+ * issuing the flush cache commands and scheduling the I/O in a good order.
*
- * This strategy makes it possible to do very late merging of IO requests.
- * A new bio sent to pkt_make_request can be merged with a live packet_data
- * object even if the object is in the data gathering state.
+ * The next layer transforms unaligned write requests to aligned writes. This
+ * transformation requires reading missing pieces of data from the underlying
+ * block device, assembling the pieces to full packets and queuing them to the
+ * packet I/O scheduler.
+ *
+ * At the top layer there is a custom make_request_fn function that forwards
+ * read requests directly to the iosched queue and puts write requests in the
+ * unaligned write queue. A kernel thread performs the necessary read
+ * gathering to convert the unaligned writes to aligned writes and then feeds
+ * them to the packet I/O scheduler.
*
*************************************************************************/
-#define VERSION_CODE "v0.2.0a 2004-07-14 Jens Axboe (axboe@suse.de) and petero2@telia.com"
-
#include <linux/pktcdvd.h>
#include <linux/config.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/miscdevice.h>
#include <linux/suspend.h>
+#include <linux/mutex.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi.h>
#include <asm/uaccess.h>
static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
static struct proc_dir_entry *pkt_proc;
static int pkt_major;
-static struct semaphore ctl_mutex; /* Serialize open/close/setup/teardown */
+static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
static mempool_t *psd_pool;
goto no_bio;
bio_init(bio);
- bvl = kmalloc(nr_iovecs * sizeof(struct bio_vec), GFP_KERNEL);
+ bvl = kcalloc(nr_iovecs, sizeof(struct bio_vec), GFP_KERNEL);
if (!bvl)
goto no_bvl;
- memset(bvl, 0, nr_iovecs * sizeof(struct bio_vec));
bio->bi_max_vecs = nr_iovecs;
bio->bi_io_vec = bvl;
/*
* Allocate a packet_data struct
*/
-static struct packet_data *pkt_alloc_packet_data(void)
+static struct packet_data *pkt_alloc_packet_data(int frames)
{
int i;
struct packet_data *pkt;
- pkt = kmalloc(sizeof(struct packet_data), GFP_KERNEL);
+ pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
if (!pkt)
goto no_pkt;
- memset(pkt, 0, sizeof(struct packet_data));
- pkt->w_bio = pkt_bio_alloc(PACKET_MAX_SIZE);
+ pkt->frames = frames;
+ pkt->w_bio = pkt_bio_alloc(frames);
if (!pkt->w_bio)
goto no_bio;
- for (i = 0; i < PAGES_PER_PACKET; i++) {
+ for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!pkt->pages[i])
goto no_page;
spin_lock_init(&pkt->lock);
- for (i = 0; i < PACKET_MAX_SIZE; i++) {
+ for (i = 0; i < frames; i++) {
struct bio *bio = pkt_bio_alloc(1);
if (!bio)
goto no_rd_bio;
return pkt;
no_rd_bio:
- for (i = 0; i < PACKET_MAX_SIZE; i++) {
+ for (i = 0; i < frames; i++) {
struct bio *bio = pkt->r_bios[i];
if (bio)
bio_put(bio);
}
no_page:
- for (i = 0; i < PAGES_PER_PACKET; i++)
+ for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
if (pkt->pages[i])
__free_page(pkt->pages[i]);
bio_put(pkt->w_bio);
{
int i;
- for (i = 0; i < PACKET_MAX_SIZE; i++) {
+ for (i = 0; i < pkt->frames; i++) {
struct bio *bio = pkt->r_bios[i];
if (bio)
bio_put(bio);
}
- for (i = 0; i < PAGES_PER_PACKET; i++)
+ for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
__free_page(pkt->pages[i]);
bio_put(pkt->w_bio);
kfree(pkt);
list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
pkt_free_packet_data(pkt);
}
+ INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
}
static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
{
struct packet_data *pkt;
- INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
- INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
- spin_lock_init(&pd->cdrw.active_list_lock);
+ BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
+
while (nr_packets > 0) {
- pkt = pkt_alloc_packet_data();
+ pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
if (!pkt) {
pkt_shrink_pktlist(pd);
return 0;
return 1;
}
-static void *pkt_rb_alloc(int gfp_mask, void *data)
-{
- return kmalloc(sizeof(struct pkt_rb_node), gfp_mask);
-}
-
-static void pkt_rb_free(void *ptr, void *data)
-{
- kfree(ptr);
-}
-
static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
{
struct rb_node *n = rb_next(&node->rb_node);
return rb_entry(n, struct pkt_rb_node, rb_node);
}
-static inline void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
+static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
{
rb_erase(&node->rb_node, &pd->bio_queue);
mempool_free(node, pd->rb_pool);
/*
* Add a bio to a single linked list defined by its head and tail pointers.
*/
-static inline void pkt_add_list_last(struct bio *bio, struct bio **list_head, struct bio **list_tail)
+static void pkt_add_list_last(struct bio *bio, struct bio **list_head, struct bio **list_tail)
{
bio->bi_next = NULL;
if (*list_tail) {
memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
+ rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
rq->ref_count++;
rq->flags |= REQ_NOMERGE;
rq->waiting = &wait;
+ rq->end_io = blk_end_sync_rq;
elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
generic_unplug_device(q);
wait_for_completion(&wait);
* Queue a bio for processing by the low-level CD device. Must be called
* from process context.
*/
-static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio, int high_prio_read)
+static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
{
spin_lock(&pd->iosched.lock);
if (bio_data_dir(bio) == READ) {
pkt_add_list_last(bio, &pd->iosched.read_queue,
&pd->iosched.read_queue_tail);
- if (high_prio_read)
- pd->iosched.high_prio_read = 1;
} else {
pkt_add_list_last(bio, &pd->iosched.write_queue,
&pd->iosched.write_queue_tail);
* requirements for CDRW drives:
* - A cache flush command must be inserted before a read request if the
* previous request was a write.
- * - Switching between reading and writing is slow, so don't it more often
+ * - Switching between reading and writing is slow, so don't do it more often
* than necessary.
+ * - Optimize for throughput at the expense of latency. This means that streaming
+ * writes will never be interrupted by a read, but if the drive has to seek
+ * before the next write, switch to reading instead if there are any pending
+ * read requests.
* - Set the read speed according to current usage pattern. When only reading
* from the device, it's best to use the highest possible read speed, but
* when switching often between reading and writing, it's better to have the
* same read and write speeds.
- * - Reads originating from user space should have higher priority than reads
- * originating from pkt_gather_data, because some process is usually waiting
- * on reads of the first kind.
*/
static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
{
- request_queue_t *q;
if (atomic_read(&pd->iosched.attention) == 0)
return;
atomic_set(&pd->iosched.attention, 0);
- q = bdev_get_queue(pd->bdev);
-
for (;;) {
struct bio *bio;
- int reads_queued, writes_queued, high_prio_read;
+ int reads_queued, writes_queued;
spin_lock(&pd->iosched.lock);
reads_queued = (pd->iosched.read_queue != NULL);
writes_queued = (pd->iosched.write_queue != NULL);
- if (!reads_queued)
- pd->iosched.high_prio_read = 0;
- high_prio_read = pd->iosched.high_prio_read;
spin_unlock(&pd->iosched.lock);
if (!reads_queued && !writes_queued)
break;
if (pd->iosched.writing) {
- if (high_prio_read || (!writes_queued && reads_queued)) {
+ int need_write_seek = 1;
+ spin_lock(&pd->iosched.lock);
+ bio = pd->iosched.write_queue;
+ spin_unlock(&pd->iosched.lock);
+ if (bio && (bio->bi_sector == pd->iosched.last_write))
+ need_write_seek = 0;
+ if (need_write_seek && reads_queued) {
if (atomic_read(&pd->cdrw.pending_bios) > 0) {
VPRINTK("pktcdvd: write, waiting\n");
break;
if (bio_data_dir(bio) == READ)
pd->iosched.successive_reads += bio->bi_size >> 10;
- else
+ else {
pd->iosched.successive_reads = 0;
+ pd->iosched.last_write = bio->bi_sector + bio_sectors(bio);
+ }
if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
if (pd->read_speed == pd->write_speed) {
pd->read_speed = MAX_SPEED;
* b) The data can be used as cache to avoid read requests if we receive a
* new write request for the same zone.
*/
-static void pkt_make_local_copy(struct packet_data *pkt, struct page **pages, int *offsets)
+static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
{
int f, p, offs;
p = 0;
offs = 0;
for (f = 0; f < pkt->frames; f++) {
- if (pages[f] != pkt->pages[p]) {
- void *vfrom = kmap_atomic(pages[f], KM_USER0) + offsets[f];
+ if (bvec[f].bv_page != pkt->pages[p]) {
+ void *vfrom = kmap_atomic(bvec[f].bv_page, KM_USER0) + bvec[f].bv_offset;
void *vto = page_address(pkt->pages[p]) + offs;
memcpy(vto, vfrom, CD_FRAMESIZE);
kunmap_atomic(vfrom, KM_USER0);
- pages[f] = pkt->pages[p];
- offsets[f] = offs;
+ bvec[f].bv_page = pkt->pages[p];
+ bvec[f].bv_offset = offs;
} else {
- BUG_ON(offsets[f] != offs);
+ BUG_ON(bvec[f].bv_offset != offs);
}
offs += CD_FRAMESIZE;
if (offs >= PAGE_SIZE) {
- BUG_ON(offs > PAGE_SIZE);
offs = 0;
p++;
}
atomic_set(&pkt->io_wait, 0);
atomic_set(&pkt->io_errors, 0);
- if (pkt->cache_valid) {
- VPRINTK("pkt_gather_data: zone %llx cached\n",
- (unsigned long long)pkt->sector);
- goto out_account;
- }
-
/*
* Figure out which frames we need to read before we can write.
*/
for (bio = pkt->orig_bios; bio; bio = bio->bi_next) {
int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
int num_frames = bio->bi_size / CD_FRAMESIZE;
+ pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
BUG_ON(first_frame < 0);
BUG_ON(first_frame + num_frames > pkt->frames);
for (f = first_frame; f < first_frame + num_frames; f++)
}
spin_unlock(&pkt->lock);
+ if (pkt->cache_valid) {
+ VPRINTK("pkt_gather_data: zone %llx cached\n",
+ (unsigned long long)pkt->sector);
+ goto out_account;
+ }
+
/*
* Schedule reads for missing parts of the packet.
*/
atomic_inc(&pkt->io_wait);
bio->bi_rw = READ;
- pkt_queue_bio(pd, bio, 0);
+ pkt_queue_bio(pd, bio);
frames_read++;
}
frames_read, (unsigned long long)pkt->sector);
pd->stats.pkt_started++;
pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
- pd->stats.secs_w += pd->settings.size;
}
/*
list_del_init(&pkt->list);
if (pkt->sector != zone)
pkt->cache_valid = 0;
- break;
+ return pkt;
}
}
- return pkt;
+ BUG();
+ return NULL;
}
static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
bio = node->bio;
zone = ZONE(bio->bi_sector, pd);
list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
- if (p->sector == zone)
+ if (p->sector == zone) {
+ bio = NULL;
goto try_next_bio;
+ }
}
break;
try_next_bio:
}
pkt = pkt_get_packet_data(pd, zone);
- BUG_ON(!pkt);
pd->current_sector = zone + pd->settings.size;
pkt->sector = zone;
- pkt->frames = pd->settings.size >> 2;
- BUG_ON(pkt->frames > PACKET_MAX_SIZE);
+ BUG_ON(pkt->frames != pd->settings.size >> 2);
pkt->write_size = 0;
/*
static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
{
struct bio *bio;
- struct page *pages[PACKET_MAX_SIZE];
- int offsets[PACKET_MAX_SIZE];
int f;
int frames_write;
+ struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
for (f = 0; f < pkt->frames; f++) {
- pages[f] = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
- offsets[f] = (f * CD_FRAMESIZE) % PAGE_SIZE;
+ bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
+ bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
}
/*
- * Fill-in pages[] and offsets[] with data from orig_bios.
+ * Fill-in bvec with data from orig_bios.
*/
frames_write = 0;
spin_lock(&pkt->lock);
}
if (src_bvl->bv_len - src_offs >= CD_FRAMESIZE) {
- pages[f] = src_bvl->bv_page;
- offsets[f] = src_bvl->bv_offset + src_offs;
+ bvec[f].bv_page = src_bvl->bv_page;
+ bvec[f].bv_offset = src_bvl->bv_offset + src_offs;
} else {
pkt_copy_bio_data(bio, segment, src_offs,
- pages[f], offsets[f]);
+ bvec[f].bv_page, bvec[f].bv_offset);
}
src_offs += CD_FRAMESIZE;
frames_write++;
BUG_ON(frames_write != pkt->write_size);
if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
- pkt_make_local_copy(pkt, pages, offsets);
+ pkt_make_local_copy(pkt, bvec);
pkt->cache_valid = 1;
} else {
pkt->cache_valid = 0;
pkt->w_bio->bi_bdev = pd->bdev;
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt;
- for (f = 0; f < pkt->frames; f++) {
- if ((f + 1 < pkt->frames) && (pages[f + 1] == pages[f]) &&
- (offsets[f + 1] = offsets[f] + CD_FRAMESIZE)) {
- if (!bio_add_page(pkt->w_bio, pages[f], CD_FRAMESIZE * 2, offsets[f]))
- BUG();
- f++;
- } else {
- if (!bio_add_page(pkt->w_bio, pages[f], CD_FRAMESIZE, offsets[f]))
- BUG();
- }
- }
+ for (f = 0; f < pkt->frames; f++)
+ if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
+ BUG();
VPRINTK("pktcdvd: vcnt=%d\n", pkt->w_bio->bi_vcnt);
atomic_set(&pkt->io_wait, 1);
pkt->w_bio->bi_rw = WRITE;
- pkt_queue_bio(pd, pkt->w_bio, 0);
+ pkt_queue_bio(pd, pkt->w_bio);
}
static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
struct packet_data *pkt;
int i;
- for (i = 0; i <= PACKET_NUM_STATES; i++)
+ for (i = 0; i < PACKET_NUM_STATES; i++)
states[i] = 0;
spin_lock(&pd->cdrw.active_list_lock);
VPRINTK("kcdrwd: wake up\n");
/* make swsusp happy with our thread */
- if (current->flags & PF_FREEZE)
- refrigerator(PF_FREEZE);
+ try_to_freeze();
list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
if (!pkt->sleep_time)
char buffer[128];
int ret, size;
- /* doesn't apply to DVD+RW */
- if (pd->mmc3_profile == 0x1a)
+ /* doesn't apply to DVD+RW or DVD-RAM */
+ if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
return 0;
memset(buffer, 0, sizeof(buffer));
}
/*
- * 0 -- we can write to this track, 1 -- we can't
+ * 1 -- we can write to this track, 0 -- we can't
*/
-static int pkt_good_track(track_information *ti)
+static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
{
- /*
- * only good for CD-RW at the moment, not DVD-RW
- */
+ switch (pd->mmc3_profile) {
+ case 0x1a: /* DVD+RW */
+ case 0x12: /* DVD-RAM */
+ /* The track is always writable on DVD+RW/DVD-RAM */
+ return 1;
+ default:
+ break;
+ }
- /*
- * FIXME: only for FP
- */
- if (ti->fp == 0)
+ if (!ti->packet || !ti->fp)
return 0;
/*
* "good" settings as per Mt Fuji.
*/
- if (ti->rt == 0 && ti->blank == 0 && ti->packet == 1)
- return 0;
+ if (ti->rt == 0 && ti->blank == 0)
+ return 1;
- if (ti->rt == 0 && ti->blank == 1 && ti->packet == 1)
- return 0;
+ if (ti->rt == 0 && ti->blank == 1)
+ return 1;
- if (ti->rt == 1 && ti->blank == 0 && ti->packet == 1)
- return 0;
+ if (ti->rt == 1 && ti->blank == 0)
+ return 1;
printk("pktcdvd: bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
- return 1;
+ return 0;
}
/*
- * 0 -- we can write to this disc, 1 -- we can't
+ * 1 -- we can write to this disc, 0 -- we can't
*/
-static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di)
+static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
{
switch (pd->mmc3_profile) {
case 0x0a: /* CD-RW */
break;
case 0x1a: /* DVD+RW */
case 0x13: /* DVD-RW */
- return 0;
- default:
- printk("pktcdvd: Wrong disc profile (%x)\n", pd->mmc3_profile);
+ case 0x12: /* DVD-RAM */
return 1;
+ default:
+ VPRINTK("pktcdvd: Wrong disc profile (%x)\n", pd->mmc3_profile);
+ return 0;
}
/*
*/
if (di->disc_type == 0xff) {
printk("pktcdvd: Unknown disc. No track?\n");
- return 1;
+ return 0;
}
if (di->disc_type != 0x20 && di->disc_type != 0) {
printk("pktcdvd: Wrong disc type (%x)\n", di->disc_type);
- return 1;
+ return 0;
}
if (di->erasable == 0) {
printk("pktcdvd: Disc not erasable\n");
- return 1;
+ return 0;
}
if (di->border_status == PACKET_SESSION_RESERVED) {
printk("pktcdvd: Can't write to last track (reserved)\n");
- return 1;
+ return 0;
}
- return 0;
+ return 1;
}
static int pkt_probe_settings(struct pktcdvd_device *pd)
return ret;
}
- if (pkt_good_disc(pd, &di))
- return -ENXIO;
+ if (!pkt_writable_disc(pd, &di))
+ return -EROFS;
- switch (pd->mmc3_profile) {
- case 0x1a: /* DVD+RW */
- printk("pktcdvd: inserted media is DVD+RW\n");
- break;
- case 0x13: /* DVD-RW */
- printk("pktcdvd: inserted media is DVD-RW\n");
- break;
- default:
- printk("pktcdvd: inserted media is CD-R%s\n", di.erasable ? "W" : "");
- break;
- }
pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
return ret;
}
- if (pkt_good_track(&ti)) {
+ if (!pkt_writable_track(pd, &ti)) {
printk("pktcdvd: can't write to this track\n");
- return -ENXIO;
+ return -EROFS;
}
/*
pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
if (pd->settings.size == 0) {
printk("pktcdvd: detected zero packet size!\n");
- pd->settings.size = 128;
+ return -ENXIO;
+ }
+ if (pd->settings.size > PACKET_MAX_SECTORS) {
+ printk("pktcdvd: packet size is too big\n");
+ return -EROFS;
}
pd->settings.fp = ti.fp;
pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
break;
default:
printk("pktcdvd: unknown data mode\n");
- return 1;
+ return -EROFS;
}
return 0;
}
unsigned int write_speed, media_write_speed, read_speed;
if ((ret = pkt_probe_settings(pd))) {
- DPRINTK("pktcdvd: %s failed probe\n", pd->name);
- return -EIO;
+ VPRINTK("pktcdvd: %s failed probe\n", pd->name);
+ return ret;
}
if ((ret = pkt_set_write_settings(pd))) {
switch (pd->mmc3_profile) {
case 0x13: /* DVD-RW */
case 0x1a: /* DVD+RW */
+ case 0x12: /* DVD-RAM */
DPRINTK("pktcdvd: write speed %ukB/s\n", write_speed);
break;
default:
if ((ret = blkdev_get(pd->bdev, FMODE_READ, O_RDONLY)))
goto out;
+ if ((ret = bd_claim(pd->bdev, pd)))
+ goto out_putdev;
+
if ((ret = pkt_get_last_written(pd, &lba))) {
printk("pktcdvd: pkt_get_last_written failed\n");
- goto out_putdev;
+ goto out_unclaim;
}
set_capacity(pd->disk, lba << 2);
q = bdev_get_queue(pd->bdev);
if (write) {
if ((ret = pkt_open_write(pd)))
- goto out_putdev;
+ goto out_unclaim;
/*
* Some CDRW drives can not handle writes larger than one packet,
* even if the size is a multiple of the packet size.
}
if ((ret = pkt_set_segment_merging(pd, q)))
- goto out_putdev;
+ goto out_unclaim;
- if (write)
+ if (write) {
+ if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
+ printk("pktcdvd: not enough memory for buffers\n");
+ ret = -ENOMEM;
+ goto out_unclaim;
+ }
printk("pktcdvd: %lukB available on disc\n", lba << 1);
+ }
return 0;
+out_unclaim:
+ bd_release(pd->bdev);
out_putdev:
blkdev_put(pd->bdev);
out:
pkt_lock_door(pd, 0);
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
+ bd_release(pd->bdev);
blkdev_put(pd->bdev);
+
+ pkt_shrink_pktlist(pd);
}
static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
VPRINTK("pktcdvd: entering open\n");
- down(&ctl_mutex);
+ mutex_lock(&ctl_mutex);
pd = pkt_find_dev_from_minor(iminor(inode));
if (!pd) {
ret = -ENODEV;
BUG_ON(pd->refcnt < 0);
pd->refcnt++;
- if (pd->refcnt == 1) {
- if (pkt_open_dev(pd, file->f_mode & FMODE_WRITE)) {
- ret = -EIO;
+ if (pd->refcnt > 1) {
+ if ((file->f_mode & FMODE_WRITE) &&
+ !test_bit(PACKET_WRITABLE, &pd->flags)) {
+ ret = -EBUSY;
goto out_dec;
}
+ } else {
+ ret = pkt_open_dev(pd, file->f_mode & FMODE_WRITE);
+ if (ret)
+ goto out_dec;
/*
* needed here as well, since ext2 (among others) may change
* the blocksize at mount time
set_blocksize(inode->i_bdev, CD_FRAMESIZE);
}
- up(&ctl_mutex);
+ mutex_unlock(&ctl_mutex);
return 0;
out_dec:
pd->refcnt--;
out:
VPRINTK("pktcdvd: failed open (%d)\n", ret);
- up(&ctl_mutex);
+ mutex_unlock(&ctl_mutex);
return ret;
}
struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
int ret = 0;
- down(&ctl_mutex);
+ mutex_lock(&ctl_mutex);
pd->refcnt--;
BUG_ON(pd->refcnt < 0);
if (pd->refcnt == 0) {
int flush = test_bit(PACKET_WRITABLE, &pd->flags);
pkt_release_dev(pd, flush);
}
- up(&ctl_mutex);
+ mutex_unlock(&ctl_mutex);
return ret;
}
-static void *psd_pool_alloc(int gfp_mask, void *data)
-{
- return kmalloc(sizeof(struct packet_stacked_data), gfp_mask);
-}
-
-static void psd_pool_free(void *ptr, void *data)
-{
- kfree(ptr);
-}
-
static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err)
{
struct packet_stacked_data *psd = bio->bi_private;
cloned_bio->bi_private = psd;
cloned_bio->bi_end_io = pkt_end_io_read_cloned;
pd->stats.secs_r += bio->bi_size >> 9;
- pkt_queue_bio(pd, cloned_bio, 1);
+ pkt_queue_bio(pd, cloned_bio);
return 0;
}
* No matching packet found. Store the bio in the work queue.
*/
node = mempool_alloc(pd->rb_pool, GFP_NOIO);
- BUG_ON(!node);
node->bio = bio;
spin_lock(&pd->lock);
BUG_ON(pd->bio_queue_size < 0);
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
- if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
- printk("pktcdvd: not enough memory for buffers\n");
- ret = -ENOMEM;
- goto out_mem;
- }
-
pd->bdev = bdev;
set_blocksize(bdev, CD_FRAMESIZE);
if (IS_ERR(pd->cdrw.thread)) {
printk("pktcdvd: can't start kernel thread\n");
ret = -ENOMEM;
- goto out_thread;
+ goto out_mem;
}
proc = create_proc_entry(pd->name, 0, pkt_proc);
DPRINTK("pktcdvd: writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
return 0;
-out_thread:
- pkt_shrink_pktlist(pd);
out_mem:
blkdev_put(bdev);
/* This is safe: open() is still holding a reference. */
struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode));
- BUG_ON(!pd);
switch (cmd) {
/*
* The door gets locked when the device is opened, so we
* have to unlock it or else the eject command fails.
*/
- pkt_lock_door(pd, 0);
+ if (pd->refcnt == 1)
+ pkt_lock_door(pd, 0);
return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
default:
- printk("pktcdvd: Unknown ioctl for %s (%x)\n", pd->name, cmd);
+ VPRINTK("pktcdvd: Unknown ioctl for %s (%x)\n", pd->name, cmd);
return -ENOTTY;
}
return -EBUSY;
}
- pd = kmalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
+ pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
if (!pd)
return ret;
- memset(pd, 0, sizeof(struct pktcdvd_device));
- pd->rb_pool = mempool_create(PKT_RB_POOL_SIZE, pkt_rb_alloc, pkt_rb_free, NULL);
+ pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE,
+ sizeof(struct pkt_rb_node));
if (!pd->rb_pool)
goto out_mem;
goto out_mem;
pd->disk = disk;
+ INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
+ INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
+ spin_lock_init(&pd->cdrw.active_list_lock);
+
spin_lock_init(&pd->lock);
spin_lock_init(&pd->iosched.lock);
sprintf(pd->name, "pktcdvd%d", idx);
return 0;
out_new_dev:
- blk_put_queue(disk->queue);
+ blk_cleanup_queue(disk->queue);
out_mem2:
put_disk(disk);
out_mem:
blkdev_put(pd->bdev);
- pkt_shrink_pktlist(pd);
-
remove_proc_entry(pd->name, pkt_proc);
DPRINTK("pktcdvd: writer %s unmapped\n", pd->name);
del_gendisk(pd->disk);
- blk_put_queue(pd->disk->queue);
+ blk_cleanup_queue(pd->disk->queue);
put_disk(pd->disk);
pkt_devs[idx] = NULL;
case PKT_CTRL_CMD_SETUP:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- down(&ctl_mutex);
+ mutex_lock(&ctl_mutex);
ret = pkt_setup_dev(&ctrl_cmd);
- up(&ctl_mutex);
+ mutex_unlock(&ctl_mutex);
break;
case PKT_CTRL_CMD_TEARDOWN:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- down(&ctl_mutex);
+ mutex_lock(&ctl_mutex);
ret = pkt_remove_dev(&ctrl_cmd);
- up(&ctl_mutex);
+ mutex_unlock(&ctl_mutex);
break;
case PKT_CTRL_CMD_STATUS:
- down(&ctl_mutex);
+ mutex_lock(&ctl_mutex);
pkt_get_status(&ctrl_cmd);
- up(&ctl_mutex);
+ mutex_unlock(&ctl_mutex);
break;
default:
return -ENOTTY;
.fops = &pkt_ctl_fops
};
-static int pkt_init(void)
+static int __init pkt_init(void)
{
int ret;
- psd_pool = mempool_create(PSD_POOL_SIZE, psd_pool_alloc, psd_pool_free, NULL);
+ psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE,
+ sizeof(struct packet_stacked_data));
if (!psd_pool)
return -ENOMEM;
goto out;
}
- init_MUTEX(&ctl_mutex);
+ mutex_init(&ctl_mutex);
pkt_proc = proc_mkdir("pktcdvd", proc_root_driver);
- DPRINTK("pktcdvd: %s\n", VERSION_CODE);
return 0;
out:
return ret;
}
-static void pkt_exit(void)
+static void __exit pkt_exit(void)
{
remove_proc_entry("pktcdvd", proc_root_driver);
misc_deregister(&pkt_misc);