X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fblock%2Fub.c;h=2098eff91e14721641f81c47b3c510481fbb6bcc;hb=refs%2Fheads%2Fvserver;hp=acea9f08ea4475cc305445f64f05f64f542fd3e0;hpb=6a77f38946aaee1cd85eeec6cf4229b204c15071;p=linux-2.6.git diff --git a/drivers/block/ub.c b/drivers/block/ub.c index acea9f08e..2098eff91 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -8,49 +8,106 @@ * and is not licensed separately. See file COPYING for details. * * TODO (sorted by decreasing priority) - * -- Do resets with usb_device_reset (needs a thread context, use khubd) * -- set readonly flag for CDs, set removable flag for CF readers * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch) - * -- support pphaneuf's SDDR-75 with two LUNs (also broken capacity...) - * -- special case some senses, e.g. 3a/0 -> no media present, reduce retries * -- verify the 13 conditions and do bulk resets - * -- normal pool of commands instead of cmdv[]? - * -- kill last_pipe and simply do two-state clearing on both pipes - * -- verify protocol (bulk) from USB descriptors (maybe...) - * -- highmem and sg + * -- highmem * -- move top_sense and work_bcs into separate allocations (if they survive) * for cache purists and esoteric architectures. + * -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ? * -- prune comments, they are too volumnous - * -- Exterminate P3 printks * -- Resove XXX's - * -- Redo "benh's retries", perhaps have spin-up code to handle them. V:D=? + * -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring. */ #include #include #include +#include #include -#include #include #include #define DRV_NAME "ub" -#define DEVFS_NAME DRV_NAME #define UB_MAJOR 180 /* - * Definitions which have to be scattered once we understand the layout better. + * The command state machine is the key model for understanding of this driver. + * + * The general rule is that all transitions are done towards the bottom + * of the diagram, thus preventing any loops. + * + * An exception to that is how the STAT state is handled. A counter allows it + * to be re-entered along the path marked with [C]. + * + * +--------+ + * ! INIT ! + * +--------+ + * ! + * ub_scsi_cmd_start fails ->--------------------------------------\ + * ! ! + * V ! + * +--------+ ! + * ! CMD ! ! + * +--------+ ! + * ! +--------+ ! + * was -EPIPE -->-------------------------------->! CLEAR ! ! + * ! +--------+ ! + * ! ! ! + * was error -->------------------------------------- ! --------->\ + * ! ! ! + * /--<-- cmd->dir == NONE ? ! ! + * ! ! ! ! + * ! V ! ! + * ! +--------+ ! ! + * ! ! DATA ! ! ! + * ! +--------+ ! ! + * ! ! +---------+ ! ! + * ! was -EPIPE -->--------------->! CLR2STS ! ! ! + * ! ! +---------+ ! ! + * ! ! ! ! ! + * ! ! was error -->---- ! --------->\ + * ! was error -->--------------------- ! ------------- ! --------->\ + * ! ! ! ! ! + * ! V ! ! ! + * \--->+--------+ ! ! ! + * ! STAT !<--------------------------/ ! ! + * /--->+--------+ ! ! + * ! ! ! ! + * [C] was -EPIPE -->-----------\ ! ! + * ! ! ! ! ! + * +<---- len == 0 ! ! ! + * ! ! ! ! ! + * ! was error -->--------------------------------------!---------->\ + * ! ! ! ! ! + * +<---- bad CSW ! ! ! + * +<---- bad tag ! ! ! + * ! ! V ! ! + * ! ! +--------+ ! ! + * ! ! ! CLRRS ! ! ! + * ! ! +--------+ ! ! + * ! ! ! ! ! + * \------- ! --------------------[C]--------\ ! ! + * ! ! ! ! + * cmd->error---\ +--------+ ! ! + * ! +--------------->! SENSE !<----------/ ! + * STAT_FAIL----/ +--------+ ! + * ! ! V + * ! V +--------+ + * \--------------------------------\--------------------->! DONE ! + * +--------+ */ -/* Transport (despite PR in the name) */ -#define US_PR_BULK 0x50 /* bulk only */ - -/* Protocol */ -#define US_SC_SCSI 0x06 /* Transparent */ +/* + * This many LUNs per USB device. + * Every one of them takes a host, see UB_MAX_HOSTS. + */ +#define UB_MAX_LUNS 9 /* */ -#define UB_MINORS_PER_MAJOR 8 + +#define UB_PARTS_PER_LUN 8 #define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */ @@ -65,7 +122,7 @@ struct bulk_cb_wrap { u32 Tag; /* unique per command id */ __le32 DataTransferLength; /* size of data */ u8 Flags; /* direction in bit 0 */ - u8 Lun; /* LUN normally 0 */ + u8 Lun; /* LUN */ u8 Length; /* of of the CDB */ u8 CDB[UB_MAX_CDB_SIZE]; /* max command */ }; @@ -85,8 +142,6 @@ struct bulk_cs_wrap { #define US_BULK_CS_WRAP_LEN 13 #define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */ -/* This is for Olympus Camedia digital cameras */ -#define US_BULK_CS_OLYMPUS_SIGN 0x55425355 /* spells out 'USBU' */ #define US_BULK_STAT_OK 0 #define US_BULK_STAT_FAIL 1 #define US_BULK_STAT_PHASE 2 @@ -99,7 +154,7 @@ struct bulk_cs_wrap { */ struct ub_dev; -#define UB_MAX_REQ_SG 1 +#define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */ #define UB_MAX_SECTORS 64 /* @@ -129,27 +184,16 @@ enum ub_scsi_cmd_state { UB_CMDST_CLR2STS, /* Clearing before requesting status */ UB_CMDST_STAT, /* Status phase */ UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */ + UB_CMDST_CLRRS, /* Clearing before retrying status */ UB_CMDST_SENSE, /* Sending Request Sense */ UB_CMDST_DONE /* Final state */ }; -static char *ub_scsi_cmd_stname[] = { - ". ", - "Cmd", - "dat", - "c2s", - "sts", - "clr", - "Sen", - "fin" -}; - struct ub_scsi_cmd { unsigned char cdb[UB_MAX_CDB_SIZE]; unsigned char cdb_len; unsigned char dir; /* 0 - none, 1 - read, 3 - write. */ - unsigned char trace_index; enum ub_scsi_cmd_state state; unsigned int tag; struct ub_scsi_cmd *next; @@ -160,18 +204,23 @@ struct ub_scsi_cmd { int stat_count; /* Retries getting status. */ - /* - * We do not support transfers from highmem pages - * because the underlying USB framework does not do what we need. - */ - char *data; /* Requested buffer */ unsigned int len; /* Requested length */ - // struct scatterlist sgv[UB_MAX_REQ_SG]; + unsigned int current_sg; + unsigned int nsg; /* sgv[nsg] */ + struct scatterlist sgv[UB_MAX_REQ_SG]; + struct ub_lun *lun; void (*done)(struct ub_dev *, struct ub_scsi_cmd *); void *back; }; +struct ub_request { + struct request *rq; + unsigned int current_try; + unsigned int nsg; /* sgv[nsg] */ + struct scatterlist sgv[UB_MAX_REQ_SG]; +}; + /* */ struct ub_capacity { @@ -180,28 +229,6 @@ struct ub_capacity { unsigned int bshift; /* Shift between 512 and hard sects */ }; -/* - * The SCSI command tracing structure. - */ - -#define SCMD_ST_HIST_SZ 8 -#define SCMD_TRACE_SZ 63 /* Less than 4KB of 61-byte lines */ - -struct ub_scsi_cmd_trace { - int hcur; - unsigned int tag; - unsigned int req_size, act_size; - unsigned char op; - unsigned char dir; - unsigned char key, asc, ascq; - char st_hst[SCMD_ST_HIST_SZ]; -}; - -struct ub_scsi_trace { - int cur; - struct ub_scsi_cmd_trace vec[SCMD_TRACE_SZ]; -}; - /* * This is a direct take-off from linux/include/completion.h * The difference is that I do not wait on this thing, just poll. @@ -252,25 +279,49 @@ struct ub_scsi_cmd_queue { }; /* - * The UB device instance. + * The block device instance (one per LUN). + */ +struct ub_lun { + struct ub_dev *udev; + struct list_head link; + struct gendisk *disk; + int id; /* Host index */ + int num; /* LUN number */ + char name[16]; + + int changed; /* Media was changed */ + int removable; + int readonly; + + struct ub_request urq; + + /* Use Ingo's mempool if or when we have more than one command. */ + /* + * Currently we never need more than one command for the whole device. + * However, giving every LUN a command is a cheap and automatic way + * to enforce fairness between them. + */ + int cmda[1]; + struct ub_scsi_cmd cmdv[1]; + + struct ub_capacity capacity; +}; + +/* + * The USB device instance. */ struct ub_dev { - spinlock_t lock; - int id; /* Number among ub's */ + spinlock_t *lock; atomic_t poison; /* The USB device is disconnected */ int openc; /* protected by ub_lock! */ /* kref is too implicit for our taste */ + int reset; /* Reset is running */ unsigned int tagcnt; - int changed; /* Media was changed */ - int removable; - int readonly; - int first_open; /* Kludge. See ub_bd_open. */ - char name[8]; + char name[12]; struct usb_device *dev; struct usb_interface *intf; - struct ub_capacity capacity; - struct gendisk *disk; + struct list_head luns; unsigned int send_bulk_pipe; /* cached pipe values */ unsigned int recv_bulk_pipe; @@ -279,10 +330,6 @@ struct ub_dev { struct tasklet_struct tasklet; - /* XXX Use Ingo's mempool (once we have more than one) */ - int cmda[1]; - struct ub_scsi_cmd cmdv[1]; - struct ub_scsi_cmd_queue cmd_queue; struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */ unsigned char top_sense[UB_SENSE_SIZE]; @@ -291,46 +338,66 @@ struct ub_dev { struct urb work_urb; struct timer_list work_timer; int last_pipe; /* What might need clearing */ + __le32 signature; /* Learned signature */ struct bulk_cb_wrap work_bcb; struct bulk_cs_wrap work_bcs; struct usb_ctrlrequest work_cr; - struct ub_scsi_trace tr; + struct work_struct reset_work; + wait_queue_head_t reset_wait; + + int sg_stat[6]; }; /* */ -static int ub_bd_rq_fn_1(struct ub_dev *sc, struct request *rq); -static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd, - struct request *rq); -static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, - struct request *rq); +static void ub_cleanup(struct ub_dev *sc); +static int ub_request_fn_1(struct ub_lun *lun, struct request *rq); +static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, + struct ub_scsi_cmd *cmd, struct ub_request *urq); +static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, + struct ub_scsi_cmd *cmd, struct ub_request *urq); static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); -static void ub_end_rq(struct request *rq, int uptodate); +static void ub_end_rq(struct request *rq, unsigned int status); +static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, + struct ub_request *urq, struct ub_scsi_cmd *cmd); static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); -static void ub_urb_complete(struct urb *urb, struct pt_regs *pt); +static void ub_urb_complete(struct urb *urb); static void ub_scsi_action(unsigned long _dev); static void ub_scsi_dispatch(struct ub_dev *sc); static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd); +static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd); static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc); -static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); +static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); +static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd); static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd); static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int stalled_pipe); static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); -static int ub_sync_tur(struct ub_dev *sc); -static int ub_sync_read_cap(struct ub_dev *sc, struct ub_capacity *ret); +static void ub_reset_enter(struct ub_dev *sc, int try); +static void ub_reset_task(struct work_struct *work); +static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); +static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, + struct ub_capacity *ret); +static int ub_sync_reset(struct ub_dev *sc); +static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe); +static int ub_probe_lun(struct ub_dev *sc, int lnum); /* */ +#ifdef CONFIG_USB_LIBUSUAL + +#define ub_usb_ids storage_usb_ids +#else + static struct usb_device_id ub_usb_ids[] = { - // { USB_DEVICE_VER(0x0781, 0x0002, 0x0009, 0x0009) }, /* SDDR-31 */ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) }, { } }; MODULE_DEVICE_TABLE(usb, ub_usb_ids); +#endif /* CONFIG_USB_LIBUSUAL */ /* * Find me a way to identify "next free minor" for add_disk(), @@ -341,118 +408,12 @@ MODULE_DEVICE_TABLE(usb, ub_usb_ids); */ #define UB_MAX_HOSTS 26 static char ub_hostv[UB_MAX_HOSTS]; -static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */ - -/* - * The SCSI command tracing procedures. - */ - -static void ub_cmdtr_new(struct ub_dev *sc, struct ub_scsi_cmd *cmd) -{ - int n; - struct ub_scsi_cmd_trace *t; - - if ((n = sc->tr.cur + 1) == SCMD_TRACE_SZ) n = 0; - t = &sc->tr.vec[n]; - - memset(t, 0, sizeof(struct ub_scsi_cmd_trace)); - t->tag = cmd->tag; - t->op = cmd->cdb[0]; - t->dir = cmd->dir; - t->req_size = cmd->len; - t->st_hst[0] = cmd->state; - - sc->tr.cur = n; - cmd->trace_index = n; -} - -static void ub_cmdtr_state(struct ub_dev *sc, struct ub_scsi_cmd *cmd) -{ - int n; - struct ub_scsi_cmd_trace *t; - - t = &sc->tr.vec[cmd->trace_index]; - if (t->tag == cmd->tag) { - if ((n = t->hcur + 1) == SCMD_ST_HIST_SZ) n = 0; - t->st_hst[n] = cmd->state; - t->hcur = n; - } -} - -static void ub_cmdtr_act_len(struct ub_dev *sc, struct ub_scsi_cmd *cmd) -{ - struct ub_scsi_cmd_trace *t; - t = &sc->tr.vec[cmd->trace_index]; - if (t->tag == cmd->tag) - t->act_size = cmd->act_len; -} +#define UB_QLOCK_NUM 5 +static spinlock_t ub_qlockv[UB_QLOCK_NUM]; +static int ub_qlock_next = 0; -static void ub_cmdtr_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd, - unsigned char *sense) -{ - struct ub_scsi_cmd_trace *t; - - t = &sc->tr.vec[cmd->trace_index]; - if (t->tag == cmd->tag) { - t->key = sense[2] & 0x0F; - t->asc = sense[12]; - t->ascq = sense[13]; - } -} - -static ssize_t ub_diag_show(struct device *dev, char *page) -{ - struct usb_interface *intf; - struct ub_dev *sc; - int cnt; - unsigned long flags; - int nc, nh; - int i, j; - struct ub_scsi_cmd_trace *t; - - intf = to_usb_interface(dev); - sc = usb_get_intfdata(intf); - if (sc == NULL) - return 0; - - cnt = 0; - spin_lock_irqsave(&sc->lock, flags); - - cnt += sprintf(page + cnt, - "qlen %d qmax %d changed %d removable %d readonly %d\n", - sc->cmd_queue.qlen, sc->cmd_queue.qmax, - sc->changed, sc->removable, sc->readonly); - - if ((nc = sc->tr.cur + 1) == SCMD_TRACE_SZ) nc = 0; - for (j = 0; j < SCMD_TRACE_SZ; j++) { - t = &sc->tr.vec[nc]; - - cnt += sprintf(page + cnt, "%08x %02x", t->tag, t->op); - if (t->op == REQUEST_SENSE) { - cnt += sprintf(page + cnt, " [sense %x %02x %02x]", - t->key, t->asc, t->ascq); - } else { - cnt += sprintf(page + cnt, " %c", UB_DIR_CHAR(t->dir)); - cnt += sprintf(page + cnt, " [%5d %5d]", - t->req_size, t->act_size); - } - if ((nh = t->hcur + 1) == SCMD_ST_HIST_SZ) nh = 0; - for (i = 0; i < SCMD_ST_HIST_SZ; i++) { - cnt += sprintf(page + cnt, " %s", - ub_scsi_cmd_stname[(int)t->st_hst[nh]]); - if (++nh == SCMD_ST_HIST_SZ) nh = 0; - } - cnt += sprintf(page + cnt, "\n"); - - if (++nc == SCMD_TRACE_SZ) nc = 0; - } - - spin_unlock_irqrestore(&sc->lock, flags); - return cnt; -} - -static DEVICE_ATTR(diag, S_IRUGO, ub_diag_show, NULL); /* N.B. World readable */ +static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */ /* * The id allocator. @@ -478,66 +439,128 @@ static int ub_id_get(void) static void ub_id_put(int id) { + unsigned long flags; if (id < 0 || id >= UB_MAX_HOSTS) { printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id); return; } + + spin_lock_irqsave(&ub_lock, flags); if (ub_hostv[id] == 0) { + spin_unlock_irqrestore(&ub_lock, flags); printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id); return; } ub_hostv[id] = 0; + spin_unlock_irqrestore(&ub_lock, flags); +} + +/* + * This is necessitated by the fact that blk_cleanup_queue does not + * necesserily destroy the queue. Instead, it may merely decrease q->refcnt. + * Since our blk_init_queue() passes a spinlock common with ub_dev, + * we have life time issues when ub_cleanup frees ub_dev. + */ +static spinlock_t *ub_next_lock(void) +{ + unsigned long flags; + spinlock_t *ret; + + spin_lock_irqsave(&ub_lock, flags); + ret = &ub_qlockv[ub_qlock_next]; + ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM; + spin_unlock_irqrestore(&ub_lock, flags); + return ret; +} + +/* + * Downcount for deallocation. This rides on two assumptions: + * - once something is poisoned, its refcount cannot grow + * - opens cannot happen at this time (del_gendisk was done) + * If the above is true, we can drop the lock, which we need for + * blk_cleanup_queue(): the silly thing may attempt to sleep. + * [Actually, it never needs to sleep for us, but it calls might_sleep()] + */ +static void ub_put(struct ub_dev *sc) +{ + unsigned long flags; + + spin_lock_irqsave(&ub_lock, flags); + --sc->openc; + if (sc->openc == 0 && atomic_read(&sc->poison)) { + spin_unlock_irqrestore(&ub_lock, flags); + ub_cleanup(sc); + } else { + spin_unlock_irqrestore(&ub_lock, flags); + } } /* * Final cleanup and deallocation. - * This must be called with ub_lock taken. */ static void ub_cleanup(struct ub_dev *sc) { + struct list_head *p; + struct ub_lun *lun; + request_queue_t *q; - /* - * If we zero disk->private_data BEFORE put_disk, we have to check - * for NULL all over the place in open, release, check_media and - * revalidate, because the block level semaphore is well inside the - * put_disk. But we cannot zero after the call, because *disk is gone. - * The sd.c is blatantly racy in this area. - */ - /* disk->private_data = NULL; */ - put_disk(sc->disk); - sc->disk = NULL; + while (!list_empty(&sc->luns)) { + p = sc->luns.next; + lun = list_entry(p, struct ub_lun, link); + list_del(p); + + /* I don't think queue can be NULL. But... Stolen from sx8.c */ + if ((q = lun->disk->queue) != NULL) + blk_cleanup_queue(q); + /* + * If we zero disk->private_data BEFORE put_disk, we have + * to check for NULL all over the place in open, release, + * check_media and revalidate, because the block level + * semaphore is well inside the put_disk. + * But we cannot zero after the call, because *disk is gone. + * The sd.c is blatantly racy in this area. + */ + /* disk->private_data = NULL; */ + put_disk(lun->disk); + lun->disk = NULL; + + ub_id_put(lun->id); + kfree(lun); + } - ub_id_put(sc->id); + usb_set_intfdata(sc->intf, NULL); + usb_put_intf(sc->intf); + usb_put_dev(sc->dev); kfree(sc); } /* * The "command allocator". */ -static struct ub_scsi_cmd *ub_get_cmd(struct ub_dev *sc) +static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun) { struct ub_scsi_cmd *ret; - if (sc->cmda[0]) + if (lun->cmda[0]) return NULL; - ret = &sc->cmdv[0]; - sc->cmda[0] = 1; + ret = &lun->cmdv[0]; + lun->cmda[0] = 1; return ret; } -static void ub_put_cmd(struct ub_dev *sc, struct ub_scsi_cmd *cmd) +static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd) { - if (cmd != &sc->cmdv[0]) { + if (cmd != &lun->cmdv[0]) { printk(KERN_WARNING "%s: releasing a foreign cmd %p\n", - sc->name, cmd); + lun->name, cmd); return; } - if (!sc->cmda[0]) { - printk(KERN_WARNING "%s: releasing a free cmd\n", sc->name); + if (!lun->cmda[0]) { + printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name); return; } - sc->cmda[0] = 0; + lun->cmda[0] = 0; } /* @@ -596,119 +619,103 @@ static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc) * The request function is our main entry point */ -static void ub_bd_rq_fn(request_queue_t *q) +static void ub_request_fn(request_queue_t *q) { - struct ub_dev *sc = q->queuedata; + struct ub_lun *lun = q->queuedata; struct request *rq; while ((rq = elv_next_request(q)) != NULL) { - if (ub_bd_rq_fn_1(sc, rq) != 0) { + if (ub_request_fn_1(lun, rq) != 0) { blk_stop_queue(q); break; } } } -static int ub_bd_rq_fn_1(struct ub_dev *sc, struct request *rq) +static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) { + struct ub_dev *sc = lun->udev; struct ub_scsi_cmd *cmd; - int rc; + struct ub_request *urq; + int n_elem; + + if (atomic_read(&sc->poison)) { + blkdev_dequeue_request(rq); + ub_end_rq(rq, DID_NO_CONNECT << 16); + return 0; + } - if (atomic_read(&sc->poison) || sc->changed) { + if (lun->changed && !blk_pc_request(rq)) { blkdev_dequeue_request(rq); - ub_end_rq(rq, 0); + ub_end_rq(rq, SAM_STAT_CHECK_CONDITION); return 0; } - if ((cmd = ub_get_cmd(sc)) == NULL) + if (lun->urq.rq != NULL) + return -1; + if ((cmd = ub_get_cmd(lun)) == NULL) return -1; memset(cmd, 0, sizeof(struct ub_scsi_cmd)); blkdev_dequeue_request(rq); - if (blk_pc_request(rq)) { - rc = ub_cmd_build_packet(sc, cmd, rq); - } else { - rc = ub_cmd_build_block(sc, cmd, rq); + urq = &lun->urq; + memset(urq, 0, sizeof(struct ub_request)); + urq->rq = rq; + + /* + * get scatterlist from block layer + */ + n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]); + if (n_elem < 0) { + /* Impossible, because blk_rq_map_sg should not hit ENOMEM. */ + printk(KERN_INFO "%s: failed request map (%d)\n", + lun->name, n_elem); + goto drop; } - if (rc != 0) { - ub_put_cmd(sc, cmd); - ub_end_rq(rq, 0); - blk_start_queue(sc->disk->queue); - return 0; + if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ + printk(KERN_WARNING "%s: request with %d segments\n", + lun->name, n_elem); + goto drop; } + urq->nsg = n_elem; + sc->sg_stat[n_elem < 5 ? n_elem : 5]++; + if (blk_pc_request(rq)) { + ub_cmd_build_packet(sc, lun, cmd, urq); + } else { + ub_cmd_build_block(sc, lun, cmd, urq); + } cmd->state = UB_CMDST_INIT; + cmd->lun = lun; cmd->done = ub_rw_cmd_done; - cmd->back = rq; + cmd->back = urq; cmd->tag = sc->tagcnt++; - if ((rc = ub_submit_scsi(sc, cmd)) != 0) { - ub_put_cmd(sc, cmd); - ub_end_rq(rq, 0); - blk_start_queue(sc->disk->queue); - return 0; - } + if (ub_submit_scsi(sc, cmd) != 0) + goto drop; + + return 0; +drop: + ub_put_cmd(lun, cmd); + ub_end_rq(rq, DID_ERROR << 16); return 0; } -static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd, - struct request *rq) +static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, + struct ub_scsi_cmd *cmd, struct ub_request *urq) { - int ub_dir; -#if 0 /* We use rq->buffer for now */ - struct scatterlist *sg; - int n_elem; -#endif + struct request *rq = urq->rq; unsigned int block, nblks; if (rq_data_dir(rq) == WRITE) - ub_dir = UB_DIR_WRITE; + cmd->dir = UB_DIR_WRITE; else - ub_dir = UB_DIR_READ; + cmd->dir = UB_DIR_READ; - /* - * get scatterlist from block layer - */ -#if 0 /* We use rq->buffer for now */ - sg = &cmd->sgv[0]; - n_elem = blk_rq_map_sg(q, rq, sg); - if (n_elem <= 0) { - ub_put_cmd(sc, cmd); - ub_end_rq(rq, 0); - blk_start_queue(q); - return 0; /* request with no s/g entries? */ - } - - if (n_elem != 1) { /* Paranoia */ - printk(KERN_WARNING "%s: request with %d segments\n", - sc->name, n_elem); - ub_put_cmd(sc, cmd); - ub_end_rq(rq, 0); - blk_start_queue(q); - return 0; - } -#endif - - /* - * XXX Unfortunately, this check does not work. It is quite possible - * to get bogus non-null rq->buffer if you allow sg by mistake. - */ - if (rq->buffer == NULL) { - /* - * This must not happen if we set the queue right. - * The block level must create bounce buffers for us. - */ - static int do_print = 1; - if (do_print) { - printk(KERN_WARNING "%s: unmapped block request" - " flags 0x%lx sectors %lu\n", - sc->name, rq->flags, rq->nr_sectors); - do_print = 0; - } - return -1; - } + cmd->nsg = urq->nsg; + memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg); /* * build the command @@ -716,10 +723,10 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd, * The call to blk_queue_hardsect_size() guarantees that request * is aligned, but it is given in terms of 512 byte units, always. */ - block = rq->sector >> sc->capacity.bshift; - nblks = rq->nr_sectors >> sc->capacity.bshift; + block = rq->sector >> lun->capacity.bshift; + nblks = rq->nr_sectors >> lun->capacity.bshift; - cmd->cdb[0] = (ub_dir == UB_DIR_READ)? READ_10: WRITE_10; + cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10; /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ cmd->cdb[2] = block >> 24; cmd->cdb[3] = block >> 16; @@ -729,30 +736,13 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd, cmd->cdb[8] = nblks; cmd->cdb_len = 10; - cmd->dir = ub_dir; - cmd->data = rq->buffer; cmd->len = rq->nr_sectors * 512; - - return 0; } -static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, - struct request *rq) +static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, + struct ub_scsi_cmd *cmd, struct ub_request *urq) { - - if (rq->data_len != 0 && rq->data == NULL) { - static int do_print = 1; - if (do_print) { - printk(KERN_WARNING "%s: unmapped packet request" - " flags 0x%lx length %d\n", - sc->name, rq->flags, rq->data_len); - do_print = 0; - } - return -1; - } - - memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); - cmd->cdb_len = rq->cmd_len; + struct request *rq = urq->rq; if (rq->data_len == 0) { cmd->dir = UB_DIR_NONE; @@ -762,42 +752,107 @@ static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, else cmd->dir = UB_DIR_READ; } - cmd->data = rq->data; - cmd->len = rq->data_len; - return 0; + cmd->nsg = urq->nsg; + memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg); + + memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); + cmd->cdb_len = rq->cmd_len; + + cmd->len = rq->data_len; } static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) { - struct request *rq = cmd->back; - struct gendisk *disk = sc->disk; - request_queue_t *q = disk->queue; - int uptodate; + struct ub_lun *lun = cmd->lun; + struct ub_request *urq = cmd->back; + struct request *rq; + unsigned int scsi_status; - if (blk_pc_request(rq)) { - /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ - memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); - rq->sense_len = UB_SENSE_SIZE; + rq = urq->rq; + + if (cmd->error == 0) { + if (blk_pc_request(rq)) { + if (cmd->act_len >= rq->data_len) + rq->data_len = 0; + else + rq->data_len -= cmd->act_len; + } + scsi_status = 0; + } else { + if (blk_pc_request(rq)) { + /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ + memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); + rq->sense_len = UB_SENSE_SIZE; + if (sc->top_sense[0] != 0) + scsi_status = SAM_STAT_CHECK_CONDITION; + else + scsi_status = DID_ERROR << 16; + } else { + if (cmd->error == -EIO) { + if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0) + return; + } + scsi_status = SAM_STAT_CHECK_CONDITION; + } } - if (cmd->error == 0) + urq->rq = NULL; + + ub_put_cmd(lun, cmd); + ub_end_rq(rq, scsi_status); + blk_start_queue(lun->disk->queue); +} + +static void ub_end_rq(struct request *rq, unsigned int scsi_status) +{ + int uptodate; + + if (scsi_status == 0) { uptodate = 1; - else + } else { uptodate = 0; - - ub_put_cmd(sc, cmd); - ub_end_rq(rq, uptodate); - blk_start_queue(q); + rq->errors = scsi_status; + } + end_that_request_first(rq, uptodate, rq->hard_nr_sectors); + end_that_request_last(rq, uptodate); } -static void ub_end_rq(struct request *rq, int uptodate) +static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, + struct ub_request *urq, struct ub_scsi_cmd *cmd) { - int rc; - rc = end_that_request_first(rq, uptodate, rq->hard_nr_sectors); - // assert(rc == 0); - end_that_request_last(rq); + if (atomic_read(&sc->poison)) + return -ENXIO; + + ub_reset_enter(sc, urq->current_try); + + if (urq->current_try >= 3) + return -EIO; + urq->current_try++; + + /* Remove this if anyone complains of flooding. */ + printk(KERN_DEBUG "%s: dir %c len/act %d/%d " + "[sense %x %02x %02x] retry %d\n", + sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len, + cmd->key, cmd->asc, cmd->ascq, urq->current_try); + + memset(cmd, 0, sizeof(struct ub_scsi_cmd)); + ub_cmd_build_block(sc, lun, cmd, urq); + + cmd->state = UB_CMDST_INIT; + cmd->lun = lun; + cmd->done = ub_rw_cmd_done; + cmd->back = urq; + + cmd->tag = sc->tagcnt++; + +#if 0 /* Wasteful */ + return ub_submit_scsi(sc, cmd); +#else + ub_cmdq_add(sc, cmd); + return 0; +#endif } /* @@ -808,8 +863,6 @@ static void ub_end_rq(struct request *rq, int uptodate) * No exceptions. * * Host is assumed locked. - * - * XXX We only support Bulk for the moment. */ static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd) { @@ -855,7 +908,7 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) bcb->Tag = cmd->tag; /* Endianness is not important */ bcb->DataTransferLength = cpu_to_le32(cmd->len); bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0; - bcb->Lun = 0; /* No multi-LUN yet */ + bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0; bcb->Length = cmd->cdb_len; /* copy the command payload */ @@ -866,7 +919,6 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) sc->last_pipe = sc->send_bulk_pipe; usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe, bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc); - sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; /* Fill what we shouldn't be filling, because usb-storage did so. */ sc->work_urb.actual_length = 0; @@ -875,7 +927,6 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { /* XXX Clear stalls */ - printk("ub: cmd #%d start failed (%d)\n", cmd->tag, rc); /* P3 */ ub_complete(&sc->work_done); return rc; } @@ -884,7 +935,6 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) add_timer(&sc->work_timer); cmd->state = UB_CMDST_CMD; - ub_cmdtr_state(sc, cmd); return 0; } @@ -896,9 +946,10 @@ static void ub_urb_timeout(unsigned long arg) struct ub_dev *sc = (struct ub_dev *) arg; unsigned long flags; - spin_lock_irqsave(&sc->lock, flags); - usb_unlink_urb(&sc->work_urb); - spin_unlock_irqrestore(&sc->lock, flags); + spin_lock_irqsave(sc->lock, flags); + if (!ub_is_completed(&sc->work_done)) + usb_unlink_urb(&sc->work_urb); + spin_unlock_irqrestore(sc->lock, flags); } /* @@ -908,7 +959,7 @@ static void ub_urb_timeout(unsigned long arg) * the sc->lock taken) and from an interrupt (while we do NOT have * the sc->lock taken). Therefore, bounce this off to a tasklet. */ -static void ub_urb_complete(struct urb *urb, struct pt_regs *pt) +static void ub_urb_complete(struct urb *urb) { struct ub_dev *sc = urb->context; @@ -921,10 +972,9 @@ static void ub_scsi_action(unsigned long _dev) struct ub_dev *sc = (struct ub_dev *) _dev; unsigned long flags; - spin_lock_irqsave(&sc->lock, flags); - del_timer(&sc->work_timer); + spin_lock_irqsave(sc->lock, flags); ub_scsi_dispatch(sc); - spin_unlock_irqrestore(&sc->lock, flags); + spin_unlock_irqrestore(sc->lock, flags); } static void ub_scsi_dispatch(struct ub_dev *sc) @@ -932,20 +982,19 @@ static void ub_scsi_dispatch(struct ub_dev *sc) struct ub_scsi_cmd *cmd; int rc; - while ((cmd = ub_cmdq_peek(sc)) != NULL) { + while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) { if (cmd->state == UB_CMDST_DONE) { ub_cmdq_pop(sc); (*cmd->done)(sc, cmd); } else if (cmd->state == UB_CMDST_INIT) { - ub_cmdtr_new(sc, cmd); if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0) break; cmd->error = rc; cmd->state = UB_CMDST_DONE; - ub_cmdtr_state(sc, cmd); } else { if (!ub_is_completed(&sc->work_done)) break; + del_timer(&sc->work_timer); ub_scsi_urb_compl(sc, cmd); } } @@ -955,12 +1004,12 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) { struct urb *urb = &sc->work_urb; struct bulk_cs_wrap *bcs; - int pipe; + int len; int rc; if (atomic_read(&sc->poison)) { - /* A little too simplistic, I feel... */ - goto Bad_End; + ub_state_done(sc, cmd, -ENODEV); + return; } if (cmd->state == UB_CMDST_CLEAR) { @@ -968,11 +1017,9 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) /* * STALL while clearning STALL. * The control pipe clears itself - nothing to do. - * XXX Might try to reset the device here and retry. */ - printk(KERN_NOTICE "%s: " - "stall on control pipe for device %u\n", - sc->name, sc->dev->devnum); + printk(KERN_NOTICE "%s: stall on control pipe\n", + sc->name); goto Bad_End; } @@ -988,14 +1035,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) } else if (cmd->state == UB_CMDST_CLR2STS) { if (urb->status == -EPIPE) { - /* - * STALL while clearning STALL. - * The control pipe clears itself - nothing to do. - * XXX Might try to reset the device here and retry. - */ - printk(KERN_NOTICE "%s: " - "stall on control pipe for device %u\n", - sc->name, sc->dev->devnum); + printk(KERN_NOTICE "%s: stall on control pipe\n", + sc->name); goto Bad_End; } @@ -1009,83 +1050,74 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) ub_state_stat(sc, cmd); - } else if (cmd->state == UB_CMDST_CMD) { + } else if (cmd->state == UB_CMDST_CLRRS) { if (urb->status == -EPIPE) { + printk(KERN_NOTICE "%s: stall on control pipe\n", + sc->name); + goto Bad_End; + } + + /* + * We ignore the result for the halt clear. + */ + + /* reset the endpoint toggle */ + usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), + usb_pipeout(sc->last_pipe), 0); + + ub_state_stat_counted(sc, cmd); + + } else if (cmd->state == UB_CMDST_CMD) { + switch (urb->status) { + case 0: + break; + case -EOVERFLOW: + goto Bad_End; + case -EPIPE: rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); if (rc != 0) { printk(KERN_NOTICE "%s: " - "unable to submit clear for device %u" - " (code %d)\n", - sc->name, sc->dev->devnum, rc); + "unable to submit clear (%d)\n", + sc->name, rc); /* * This is typically ENOMEM or some other such shit. * Retrying is pointless. Just do Bad End on it... */ - goto Bad_End; + ub_state_done(sc, cmd, rc); + return; } cmd->state = UB_CMDST_CLEAR; - ub_cmdtr_state(sc, cmd); return; - } - if (urb->status != 0) { - printk("ub: cmd #%d cmd status (%d)\n", cmd->tag, urb->status); /* P3 */ + case -ESHUTDOWN: /* unplug */ + case -EILSEQ: /* unplug timeout on uhci */ + ub_state_done(sc, cmd, -ENODEV); + return; + default: goto Bad_End; } if (urb->actual_length != US_BULK_CB_WRAP_LEN) { - printk("ub: cmd #%d xferred %d\n", cmd->tag, urb->actual_length); /* P3 */ - /* XXX Must do reset here to unconfuse the device */ goto Bad_End; } - if (cmd->dir == UB_DIR_NONE) { + if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) { ub_state_stat(sc, cmd); return; } - UB_INIT_COMPLETION(sc->work_done); - - if (cmd->dir == UB_DIR_READ) - pipe = sc->recv_bulk_pipe; - else - pipe = sc->send_bulk_pipe; - sc->last_pipe = pipe; - usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, - cmd->data, cmd->len, ub_urb_complete, sc); - sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; - sc->work_urb.actual_length = 0; - sc->work_urb.error_count = 0; - sc->work_urb.status = 0; - - if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { - /* XXX Clear stalls */ - printk("ub: data #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */ - ub_complete(&sc->work_done); - ub_state_done(sc, cmd, rc); - return; - } - - sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT; - add_timer(&sc->work_timer); - - cmd->state = UB_CMDST_DATA; - ub_cmdtr_state(sc, cmd); + // udelay(125); // usb-storage has this + ub_data_start(sc, cmd); } else if (cmd->state == UB_CMDST_DATA) { if (urb->status == -EPIPE) { rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); if (rc != 0) { printk(KERN_NOTICE "%s: " - "unable to submit clear for device %u" - " (code %d)\n", - sc->name, sc->dev->devnum, rc); - /* - * This is typically ENOMEM or some other such shit. - * Retrying is pointless. Just do Bad End on it... - */ - goto Bad_End; + "unable to submit clear (%d)\n", + sc->name, rc); + ub_state_done(sc, cmd, rc); + return; } cmd->state = UB_CMDST_CLR2STS; - ub_cmdtr_state(sc, cmd); return; } if (urb->status == -EOVERFLOW) { @@ -1093,14 +1125,54 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) * A babble? Failure, but we must transfer CSW now. */ cmd->error = -EOVERFLOW; /* A cheap trick... */ + ub_state_stat(sc, cmd); + return; + } + + if (cmd->dir == UB_DIR_WRITE) { + /* + * Do not continue writes in case of a failure. + * Doing so would cause sectors to be mixed up, + * which is worse than sectors lost. + * + * We must try to read the CSW, or many devices + * get confused. + */ + len = urb->actual_length; + if (urb->status != 0 || + len != cmd->sgv[cmd->current_sg].length) { + cmd->act_len += len; + + cmd->error = -EIO; + ub_state_stat(sc, cmd); + return; + } + } else { + /* + * If an error occurs on read, we record it, and + * continue to fetch data in order to avoid bubble. + * + * As a small shortcut, we stop if we detect that + * a CSW mixed into data. + */ if (urb->status != 0) - goto Bad_End; + cmd->error = -EIO; + + len = urb->actual_length; + if (urb->status != 0 || + len != cmd->sgv[cmd->current_sg].length) { + if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN) + goto Bad_End; + } } - cmd->act_len = urb->actual_length; - ub_cmdtr_act_len(sc, cmd); + cmd->act_len += urb->actual_length; + if (++cmd->current_sg < cmd->nsg) { + ub_data_start(sc, cmd); + return; + } ub_state_stat(sc, cmd); } else if (cmd->state == UB_CMDST_STAT) { @@ -1108,62 +1180,55 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); if (rc != 0) { printk(KERN_NOTICE "%s: " - "unable to submit clear for device %u" - " (code %d)\n", - sc->name, sc->dev->devnum, rc); - /* - * This is typically ENOMEM or some other such shit. - * Retrying is pointless. Just do Bad End on it... - */ - goto Bad_End; + "unable to submit clear (%d)\n", + sc->name, rc); + ub_state_done(sc, cmd, rc); + return; } - cmd->state = UB_CMDST_CLEAR; - ub_cmdtr_state(sc, cmd); + + /* + * Having a stall when getting CSW is an error, so + * make sure uppper levels are not oblivious to it. + */ + cmd->error = -EIO; /* A cheap trick... */ + + cmd->state = UB_CMDST_CLRRS; return; } + + /* Catch everything, including -EOVERFLOW and other nasties. */ if (urb->status != 0) goto Bad_End; if (urb->actual_length == 0) { - /* - * Some broken devices add unnecessary zero-length - * packets to the end of their data transfers. - * Such packets show up as 0-length CSWs. If we - * encounter such a thing, try to read the CSW again. - */ - if (++cmd->stat_count >= 4) { - printk(KERN_NOTICE "%s: " - "unable to get CSW on device %u\n", - sc->name, sc->dev->devnum); - goto Bad_End; - } - __ub_state_stat(sc, cmd); + ub_state_stat_counted(sc, cmd); return; } /* * Check the returned Bulk protocol status. + * The status block has to be validated first. */ bcs = &sc->work_bcs; - rc = le32_to_cpu(bcs->Residue); - if (rc != cmd->len - cmd->act_len) { + + if (sc->signature == cpu_to_le32(0)) { /* - * It is all right to transfer less, the caller has - * to check. But it's not all right if the device - * counts disagree with our counts. + * This is the first reply, so do not perform the check. + * Instead, remember the signature the device uses + * for future checks. But do not allow a nul. */ - /* P3 */ printk("%s: resid %d len %d act %d\n", - sc->name, rc, cmd->len, cmd->act_len); - goto Bad_End; - } - -#if 0 - if (bcs->Signature != cpu_to_le32(US_BULK_CS_SIGN) && - bcs->Signature != cpu_to_le32(US_BULK_CS_OLYMPUS_SIGN)) { - /* Windows ignores signatures, so do we. */ + sc->signature = bcs->Signature; + if (sc->signature == cpu_to_le32(0)) { + ub_state_stat_counted(sc, cmd); + return; + } + } else { + if (bcs->Signature != sc->signature) { + ub_state_stat_counted(sc, cmd); + return; + } } -#endif if (bcs->Tag != cmd->tag) { /* @@ -1173,18 +1238,20 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) * commands and reply at commands we timed out before. * Without flushing these replies we loop forever. */ - if (++cmd->stat_count >= 4) { - printk(KERN_NOTICE "%s: " - "tag mismatch orig 0x%x reply 0x%x " - "on device %u\n", - sc->name, cmd->tag, bcs->Tag, - sc->dev->devnum); - goto Bad_End; - } - __ub_state_stat(sc, cmd); + ub_state_stat_counted(sc, cmd); return; } + len = le32_to_cpu(bcs->Residue); + if (len != cmd->len - cmd->act_len) { + /* + * It is all right to transfer less, the caller has + * to check. But it's not all right if the device + * counts disagree with our counts. + */ + goto Bad_End; + } + switch (bcs->Status) { case US_BULK_STAT_OK: break; @@ -1192,34 +1259,74 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) ub_state_sense(sc, cmd); return; case US_BULK_STAT_PHASE: - /* XXX We must reset the transport here */ - /* P3 */ printk("%s: status PHASE\n", sc->name); goto Bad_End; default: printk(KERN_INFO "%s: unknown CSW status 0x%x\n", sc->name, bcs->Status); - goto Bad_End; + ub_state_done(sc, cmd, -EINVAL); + return; } /* Not zeroing error to preserve a babble indicator */ + if (cmd->error != 0) { + ub_state_sense(sc, cmd); + return; + } cmd->state = UB_CMDST_DONE; - ub_cmdtr_state(sc, cmd); ub_cmdq_pop(sc); (*cmd->done)(sc, cmd); } else if (cmd->state == UB_CMDST_SENSE) { ub_state_done(sc, cmd, -EIO); - } else { - printk(KERN_WARNING "%s: " - "wrong command state %d on device %u\n", - sc->name, cmd->state, sc->dev->devnum); - goto Bad_End; + } else { + printk(KERN_WARNING "%s: " + "wrong command state %d\n", + sc->name, cmd->state); + ub_state_done(sc, cmd, -EINVAL); + return; + } + return; + +Bad_End: /* Little Excel is dead */ + ub_state_done(sc, cmd, -EIO); +} + +/* + * Factorization helper for the command state machine: + * Initiate a data segment transfer. + */ +static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) +{ + struct scatterlist *sg = &cmd->sgv[cmd->current_sg]; + int pipe; + int rc; + + UB_INIT_COMPLETION(sc->work_done); + + if (cmd->dir == UB_DIR_READ) + pipe = sc->recv_bulk_pipe; + else + pipe = sc->send_bulk_pipe; + sc->last_pipe = pipe; + usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, + page_address(sg->page) + sg->offset, sg->length, + ub_urb_complete, sc); + sc->work_urb.actual_length = 0; + sc->work_urb.error_count = 0; + sc->work_urb.status = 0; + + if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { + /* XXX Clear stalls */ + ub_complete(&sc->work_done); + ub_state_done(sc, cmd, rc); + return; } - return; -Bad_End: /* Little Excel is dead */ - ub_state_done(sc, cmd, -EIO); + sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT; + add_timer(&sc->work_timer); + + cmd->state = UB_CMDST_DATA; } /* @@ -1231,7 +1338,6 @@ static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc) cmd->error = rc; cmd->state = UB_CMDST_DONE; - ub_cmdtr_state(sc, cmd); ub_cmdq_pop(sc); (*cmd->done)(sc, cmd); } @@ -1240,7 +1346,7 @@ static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc) * Factorization helper for the command state machine: * Submit a CSW read. */ -static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) +static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) { int rc; @@ -1249,21 +1355,20 @@ static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) sc->last_pipe = sc->recv_bulk_pipe; usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe, &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc); - sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; sc->work_urb.actual_length = 0; sc->work_urb.error_count = 0; sc->work_urb.status = 0; if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { /* XXX Clear stalls */ - printk("%s: CSW #%d submit failed (%d)\n", sc->name, cmd->tag, rc); /* P3 */ ub_complete(&sc->work_done); ub_state_done(sc, cmd, rc); - return; + return -1; } sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT; add_timer(&sc->work_timer); + return 0; } /* @@ -1272,11 +1377,30 @@ static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) */ static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) { - __ub_state_stat(sc, cmd); + + if (__ub_state_stat(sc, cmd) != 0) + return; cmd->stat_count = 0; cmd->state = UB_CMDST_STAT; - ub_cmdtr_state(sc, cmd); +} + +/* + * Factorization helper for the command state machine: + * Submit a CSW read and go to STAT state with counter (along [C] path). + */ +static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd) +{ + + if (++cmd->stat_count >= 4) { + ub_state_sense(sc, cmd); + return; + } + + if (__ub_state_stat(sc, cmd) != 0) + return; + + cmd->state = UB_CMDST_STAT; } /* @@ -1286,6 +1410,7 @@ static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) { struct ub_scsi_cmd *scmd; + struct scatterlist *sg; int rc; if (cmd->cdb[0] == REQUEST_SENSE) { @@ -1294,20 +1419,25 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) } scmd = &sc->top_rqs_cmd; + memset(scmd, 0, sizeof(struct ub_scsi_cmd)); scmd->cdb[0] = REQUEST_SENSE; scmd->cdb[4] = UB_SENSE_SIZE; scmd->cdb_len = 6; scmd->dir = UB_DIR_READ; scmd->state = UB_CMDST_INIT; - scmd->data = sc->top_sense; + scmd->nsg = 1; + sg = &scmd->sgv[0]; + sg->page = virt_to_page(sc->top_sense); + sg->offset = (unsigned long)sc->top_sense & (PAGE_SIZE-1); + sg->length = UB_SENSE_SIZE; scmd->len = UB_SENSE_SIZE; + scmd->lun = cmd->lun; scmd->done = ub_top_sense_done; scmd->back = cmd; scmd->tag = sc->tagcnt++; cmd->state = UB_CMDST_SENSE; - ub_cmdtr_state(sc, cmd); ub_cmdq_insert(sc, scmd); return; @@ -1342,7 +1472,6 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, (unsigned char*) cr, NULL, 0, ub_urb_complete, sc); - sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; sc->work_urb.actual_length = 0; sc->work_urb.error_count = 0; sc->work_urb.status = 0; @@ -1361,14 +1490,9 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, */ static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) { - unsigned char *sense = scmd->data; + unsigned char *sense = sc->top_sense; struct ub_scsi_cmd *cmd; - /* - * Ignoring scmd->act_len, because the buffer was pre-zeroed. - */ - ub_cmdtr_sense(sc, scmd, sense); - /* * Find the command which triggered the unit attention or a check, * save the sense into it, and advance its state machine. @@ -1379,17 +1503,20 @@ static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) } if (cmd != scmd->back) { printk(KERN_WARNING "%s: " - "sense done for wrong command 0x%x on device %u\n", - sc->name, cmd->tag, sc->dev->devnum); + "sense done for wrong command 0x%x\n", + sc->name, cmd->tag); return; } if (cmd->state != UB_CMDST_SENSE) { printk(KERN_WARNING "%s: " - "sense done with bad cmd state %d on device %u\n", - sc->name, cmd->state, sc->dev->devnum); + "sense done with bad cmd state %d\n", + sc->name, cmd->state); return; } + /* + * Ignoring scmd->act_len, because the buffer was pre-zeroed. + */ cmd->key = sense[2] & 0x0F; cmd->asc = sense[12]; cmd->ascq = sense[13]; @@ -1397,68 +1524,124 @@ static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) ub_scsi_urb_compl(sc, cmd); } -#if 0 -/* Determine what the maximum LUN supported is */ -int usb_stor_Bulk_max_lun(struct us_data *us) +/* + * Reset management + * XXX Move usb_reset_device to khubd. Hogging kevent is not a good thing. + * XXX Make usb_sync_reset asynchronous. + */ + +static void ub_reset_enter(struct ub_dev *sc, int try) { - int result; - - /* issue the command */ - result = usb_stor_control_msg(us, us->recv_ctrl_pipe, - US_BULK_GET_MAX_LUN, - USB_DIR_IN | USB_TYPE_CLASS | - USB_RECIP_INTERFACE, - 0, us->ifnum, us->iobuf, 1, HZ); - - /* - * Some devices (i.e. Iomega Zip100) need this -- apparently - * the bulk pipes get STALLed when the GetMaxLUN request is - * processed. This is, in theory, harmless to all other devices - * (regardless of if they stall or not). - */ - if (result < 0) { - usb_stor_clear_halt(us, us->recv_bulk_pipe); - usb_stor_clear_halt(us, us->send_bulk_pipe); + + if (sc->reset) { + /* This happens often on multi-LUN devices. */ + return; } + sc->reset = try + 1; - US_DEBUGP("GetMaxLUN command result is %d, data is %d\n", - result, us->iobuf[0]); +#if 0 /* Not needed because the disconnect waits for us. */ + unsigned long flags; + spin_lock_irqsave(&ub_lock, flags); + sc->openc++; + spin_unlock_irqrestore(&ub_lock, flags); +#endif - /* if we have a successful request, return the result */ - if (result == 1) - return us->iobuf[0]; +#if 0 /* We let them stop themselves. */ + struct list_head *p; + struct ub_lun *lun; + list_for_each(p, &sc->luns) { + lun = list_entry(p, struct ub_lun, link); + blk_stop_queue(lun->disk->queue); + } +#endif - /* return the default -- no LUNs */ - return 0; + schedule_work(&sc->reset_work); +} + +static void ub_reset_task(struct work_struct *work) +{ + struct ub_dev *sc = container_of(work, struct ub_dev, reset_work); + unsigned long flags; + struct list_head *p; + struct ub_lun *lun; + int lkr, rc; + + if (!sc->reset) { + printk(KERN_WARNING "%s: Running reset unrequested\n", + sc->name); + return; + } + + if (atomic_read(&sc->poison)) { + ; + } else if ((sc->reset & 1) == 0) { + ub_sync_reset(sc); + msleep(700); /* usb-storage sleeps 6s (!) */ + ub_probe_clear_stall(sc, sc->recv_bulk_pipe); + ub_probe_clear_stall(sc, sc->send_bulk_pipe); + } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) { + ; + } else { + if ((lkr = usb_lock_device_for_reset(sc->dev, sc->intf)) < 0) { + printk(KERN_NOTICE + "%s: usb_lock_device_for_reset failed (%d)\n", + sc->name, lkr); + } else { + rc = usb_reset_device(sc->dev); + if (rc < 0) { + printk(KERN_NOTICE "%s: " + "usb_lock_device_for_reset failed (%d)\n", + sc->name, rc); + } + + if (lkr) + usb_unlock_device(sc->dev); + } + } + + /* + * In theory, no commands can be running while reset is active, + * so nobody can ask for another reset, and so we do not need any + * queues of resets or anything. We do need a spinlock though, + * to interact with block layer. + */ + spin_lock_irqsave(sc->lock, flags); + sc->reset = 0; + tasklet_schedule(&sc->tasklet); + list_for_each(p, &sc->luns) { + lun = list_entry(p, struct ub_lun, link); + blk_start_queue(lun->disk->queue); + } + wake_up(&sc->reset_wait); + spin_unlock_irqrestore(sc->lock, flags); } -#endif /* * This is called from a process context. */ -static void ub_revalidate(struct ub_dev *sc) +static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun) { - sc->readonly = 0; /* XXX Query this from the device */ + lun->readonly = 0; /* XXX Query this from the device */ - sc->capacity.nsec = 0; - sc->capacity.bsize = 512; - sc->capacity.bshift = 0; + lun->capacity.nsec = 0; + lun->capacity.bsize = 512; + lun->capacity.bshift = 0; - if (ub_sync_tur(sc) != 0) + if (ub_sync_tur(sc, lun) != 0) return; /* Not ready */ - sc->changed = 0; + lun->changed = 0; - if (ub_sync_read_cap(sc, &sc->capacity) != 0) { + if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { /* * The retry here means something is wrong, either with the * device, with the transport, or with our code. * We keep this because sd.c has retries for capacity. */ - if (ub_sync_read_cap(sc, &sc->capacity) != 0) { - sc->capacity.nsec = 0; - sc->capacity.bsize = 512; - sc->capacity.bshift = 0; + if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { + lun->capacity.nsec = 0; + lun->capacity.bsize = 512; + lun->capacity.bshift = 0; } } } @@ -1471,12 +1654,11 @@ static void ub_revalidate(struct ub_dev *sc) static int ub_bd_open(struct inode *inode, struct file *filp) { struct gendisk *disk = inode->i_bdev->bd_disk; - struct ub_dev *sc; + struct ub_lun *lun = disk->private_data; + struct ub_dev *sc = lun->udev; unsigned long flags; int rc; - if ((sc = disk->private_data) == NULL) - return -ENXIO; spin_lock_irqsave(&ub_lock, flags); if (atomic_read(&sc->poison)) { spin_unlock_irqrestore(&ub_lock, flags); @@ -1485,27 +1667,7 @@ static int ub_bd_open(struct inode *inode, struct file *filp) sc->openc++; spin_unlock_irqrestore(&ub_lock, flags); - /* - * This is a workaround for a specific problem in our block layer. - * In 2.6.9, register_disk duplicates the code from rescan_partitions. - * However, if we do add_disk with a device which persistently reports - * a changed media, add_disk calls register_disk, which does do_open, - * which will call rescan_paritions for changed media. After that, - * register_disk attempts to do it all again and causes double kobject - * registration and a eventually an oops on module removal. - * - * The bottom line is, Al Viro says that we should not allow - * bdev->bd_invalidated to be set when doing add_disk no matter what. - */ - if (sc->first_open) { - if (sc->changed) { - sc->first_open = 0; - rc = -ENOMEDIUM; - goto err_open; - } - } - - if (sc->removable || sc->readonly) + if (lun->removable || lun->readonly) check_disk_change(inode->i_bdev); /* @@ -1513,12 +1675,12 @@ static int ub_bd_open(struct inode *inode, struct file *filp) * under some pretty murky conditions (a failure of READ CAPACITY). * We may need it one day. */ - if (sc->removable && sc->changed && !(filp->f_flags & O_NDELAY)) { + if (lun->removable && lun->changed && !(filp->f_flags & O_NDELAY)) { rc = -ENOMEDIUM; goto err_open; } - if (sc->readonly && (filp->f_mode & FMODE_WRITE)) { + if (lun->readonly && (filp->f_mode & FMODE_WRITE)) { rc = -EROFS; goto err_open; } @@ -1526,11 +1688,7 @@ static int ub_bd_open(struct inode *inode, struct file *filp) return 0; err_open: - spin_lock_irqsave(&ub_lock, flags); - --sc->openc; - if (sc->openc == 0 && atomic_read(&sc->poison)) - ub_cleanup(sc); - spin_unlock_irqrestore(&ub_lock, flags); + ub_put(sc); return rc; } @@ -1539,16 +1697,10 @@ err_open: static int ub_bd_release(struct inode *inode, struct file *filp) { struct gendisk *disk = inode->i_bdev->bd_disk; - struct ub_dev *sc = disk->private_data; - unsigned long flags; + struct ub_lun *lun = disk->private_data; + struct ub_dev *sc = lun->udev; - spin_lock_irqsave(&ub_lock, flags); - --sc->openc; - if (sc->openc == 0) - sc->first_open = 0; - if (sc->openc == 0 && atomic_read(&sc->poison)) - ub_cleanup(sc); - spin_unlock_irqrestore(&ub_lock, flags); + ub_put(sc); return 0; } @@ -1576,20 +1728,14 @@ static int ub_bd_ioctl(struct inode *inode, struct file *filp, */ static int ub_bd_revalidate(struct gendisk *disk) { - struct ub_dev *sc = disk->private_data; - - ub_revalidate(sc); - /* This is pretty much a long term P3 */ - if (!atomic_read(&sc->poison)) { /* Cover sc->dev */ - printk(KERN_INFO "%s: device %u capacity nsec %ld bsize %u\n", - sc->name, sc->dev->devnum, - sc->capacity.nsec, sc->capacity.bsize); - } + struct ub_lun *lun = disk->private_data; + + ub_revalidate(lun->udev, lun); /* XXX Support sector size switching like in sr.c */ - blk_queue_hardsect_size(disk->queue, sc->capacity.bsize); - set_capacity(disk, sc->capacity.nsec); - // set_disk_ro(sdkp->disk, sc->readonly); + blk_queue_hardsect_size(disk->queue, lun->capacity.bsize); + set_capacity(disk, lun->capacity.nsec); + // set_disk_ro(sdkp->disk, lun->readonly); return 0; } @@ -1605,9 +1751,9 @@ static int ub_bd_revalidate(struct gendisk *disk) */ static int ub_bd_media_changed(struct gendisk *disk) { - struct ub_dev *sc = disk->private_data; + struct ub_lun *lun = disk->private_data; - if (!sc->removable) + if (!lun->removable) return 0; /* @@ -1619,12 +1765,12 @@ static int ub_bd_media_changed(struct gendisk *disk) * will fail, then block layer discards the data. Since we never * spin drives up, such devices simply cannot be used with ub anyway. */ - if (ub_sync_tur(sc) != 0) { - sc->changed = 1; + if (ub_sync_tur(lun->udev, lun) != 0) { + lun->changed = 1; return 1; } - return sc->changed; + return lun->changed; } static struct block_device_operations ub_bd_fops = { @@ -1648,7 +1794,7 @@ static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) /* * Test if the device has a check condition on it, synchronously. */ -static int ub_sync_tur(struct ub_dev *sc) +static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun) { struct ub_scsi_cmd *cmd; enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) }; @@ -1659,27 +1805,25 @@ static int ub_sync_tur(struct ub_dev *sc) init_completion(&compl); rc = -ENOMEM; - if ((cmd = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) + if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) goto err_alloc; - memset(cmd, 0, ALLOC_SIZE); cmd->cdb[0] = TEST_UNIT_READY; cmd->cdb_len = 6; cmd->dir = UB_DIR_NONE; cmd->state = UB_CMDST_INIT; + cmd->lun = lun; /* This may be NULL, but that's ok */ cmd->done = ub_probe_done; cmd->back = &compl; - spin_lock_irqsave(&sc->lock, flags); + spin_lock_irqsave(sc->lock, flags); cmd->tag = sc->tagcnt++; rc = ub_submit_scsi(sc, cmd); - spin_unlock_irqrestore(&sc->lock, flags); + spin_unlock_irqrestore(sc->lock, flags); - if (rc != 0) { - printk("ub: testing ready: submit error (%d)\n", rc); /* P3 */ + if (rc != 0) goto err_submit; - } wait_for_completion(&compl); @@ -1697,9 +1841,11 @@ err_alloc: /* * Read the SCSI capacity synchronously (for probing). */ -static int ub_sync_read_cap(struct ub_dev *sc, struct ub_capacity *ret) +static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, + struct ub_capacity *ret) { struct ub_scsi_cmd *cmd; + struct scatterlist *sg; char *p; enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 }; unsigned long flags; @@ -1711,40 +1857,40 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_capacity *ret) init_completion(&compl); rc = -ENOMEM; - if ((cmd = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) + if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) goto err_alloc; - memset(cmd, 0, ALLOC_SIZE); p = (char *)cmd + sizeof(struct ub_scsi_cmd); cmd->cdb[0] = 0x25; cmd->cdb_len = 10; cmd->dir = UB_DIR_READ; cmd->state = UB_CMDST_INIT; - cmd->data = p; + cmd->nsg = 1; + sg = &cmd->sgv[0]; + sg->page = virt_to_page(p); + sg->offset = (unsigned long)p & (PAGE_SIZE-1); + sg->length = 8; cmd->len = 8; + cmd->lun = lun; cmd->done = ub_probe_done; cmd->back = &compl; - spin_lock_irqsave(&sc->lock, flags); + spin_lock_irqsave(sc->lock, flags); cmd->tag = sc->tagcnt++; rc = ub_submit_scsi(sc, cmd); - spin_unlock_irqrestore(&sc->lock, flags); + spin_unlock_irqrestore(sc->lock, flags); - if (rc != 0) { - printk("ub: reading capacity: submit error (%d)\n", rc); /* P3 */ + if (rc != 0) goto err_submit; - } wait_for_completion(&compl); if (cmd->error != 0) { - printk("ub: reading capacity: error %d\n", cmd->error); /* P3 */ rc = -EIO; goto err_read; } if (cmd->act_len != 8) { - printk("ub: reading capacity: size %d\n", cmd->act_len); /* P3 */ rc = -EIO; goto err_read; } @@ -1758,7 +1904,6 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_capacity *ret) case 2048: shift = 2; break; case 4096: shift = 3; break; default: - printk("ub: Bad sector size %u\n", bsize); /* P3 */ rc = -EDOM; goto err_inv_bsize; } @@ -1778,7 +1923,7 @@ err_alloc: /* */ -static void ub_probe_urb_complete(struct urb *urb, struct pt_regs *pt) +static void ub_probe_urb_complete(struct urb *urb) { struct completion *cop = urb->context; complete(cop); @@ -1790,6 +1935,126 @@ static void ub_probe_timeout(unsigned long arg) complete(cop); } +/* + * Reset with a Bulk reset. + */ +static int ub_sync_reset(struct ub_dev *sc) +{ + int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber; + struct usb_ctrlrequest *cr; + struct completion compl; + struct timer_list timer; + int rc; + + init_completion(&compl); + + cr = &sc->work_cr; + cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE; + cr->bRequest = US_BULK_RESET_REQUEST; + cr->wValue = cpu_to_le16(0); + cr->wIndex = cpu_to_le16(ifnum); + cr->wLength = cpu_to_le16(0); + + usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, + (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); + sc->work_urb.actual_length = 0; + sc->work_urb.error_count = 0; + sc->work_urb.status = 0; + + if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { + printk(KERN_WARNING + "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc); + return rc; + } + + init_timer(&timer); + timer.function = ub_probe_timeout; + timer.data = (unsigned long) &compl; + timer.expires = jiffies + UB_CTRL_TIMEOUT; + add_timer(&timer); + + wait_for_completion(&compl); + + del_timer_sync(&timer); + usb_kill_urb(&sc->work_urb); + + return sc->work_urb.status; +} + +/* + * Get number of LUNs by the way of Bulk GetMaxLUN command. + */ +static int ub_sync_getmaxlun(struct ub_dev *sc) +{ + int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber; + unsigned char *p; + enum { ALLOC_SIZE = 1 }; + struct usb_ctrlrequest *cr; + struct completion compl; + struct timer_list timer; + int nluns; + int rc; + + init_completion(&compl); + + rc = -ENOMEM; + if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) + goto err_alloc; + *p = 55; + + cr = &sc->work_cr; + cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; + cr->bRequest = US_BULK_GET_MAX_LUN; + cr->wValue = cpu_to_le16(0); + cr->wIndex = cpu_to_le16(ifnum); + cr->wLength = cpu_to_le16(1); + + usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe, + (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl); + sc->work_urb.actual_length = 0; + sc->work_urb.error_count = 0; + sc->work_urb.status = 0; + + if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) + goto err_submit; + + init_timer(&timer); + timer.function = ub_probe_timeout; + timer.data = (unsigned long) &compl; + timer.expires = jiffies + UB_CTRL_TIMEOUT; + add_timer(&timer); + + wait_for_completion(&compl); + + del_timer_sync(&timer); + usb_kill_urb(&sc->work_urb); + + if ((rc = sc->work_urb.status) < 0) + goto err_io; + + if (sc->work_urb.actual_length != 1) { + nluns = 0; + } else { + if ((nluns = *p) == 55) { + nluns = 0; + } else { + /* GetMaxLUN returns the maximum LUN number */ + nluns += 1; + if (nluns > UB_MAX_LUNS) + nluns = UB_MAX_LUNS; + } + } + + kfree(p); + return nluns; + +err_io: +err_submit: + kfree(p); +err_alloc: + return rc; +} + /* * Clear initial stalls. */ @@ -1816,7 +2081,6 @@ static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe) usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); - sc->work_urb.transfer_flags = 0; sc->work_urb.actual_length = 0; sc->work_urb.error_count = 0; sc->work_urb.status = 0; @@ -1876,9 +2140,9 @@ static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev, } if (ep_in == NULL || ep_out == NULL) { - printk(KERN_NOTICE "%s: device %u failed endpoint check\n", - sc->name, sc->dev->devnum); - return -EIO; + printk(KERN_NOTICE "%s: failed endpoint check\n", + sc->name); + return -ENODEV; } /* Calculate and store the pipe values */ @@ -1900,19 +2164,23 @@ static int ub_probe(struct usb_interface *intf, const struct usb_device_id *dev_id) { struct ub_dev *sc; - request_queue_t *q; - struct gendisk *disk; + int nluns; int rc; int i; + if (usb_usual_check_type(dev_id, USB_US_TYPE_UB)) + return -ENXIO; + rc = -ENOMEM; - if ((sc = kmalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL) + if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL) goto err_core; - memset(sc, 0, sizeof(struct ub_dev)); - spin_lock_init(&sc->lock); + sc->lock = ub_next_lock(); + INIT_LIST_HEAD(&sc->luns); usb_init_urb(&sc->work_urb); tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); atomic_set(&sc->poison, 0); + INIT_WORK(&sc->reset_work, ub_reset_task); + init_waitqueue_head(&sc->reset_wait); init_timer(&sc->work_timer); sc->work_timer.data = (unsigned long) sc; @@ -1921,25 +2189,25 @@ static int ub_probe(struct usb_interface *intf, ub_init_completion(&sc->work_done); sc->work_done.done = 1; /* A little yuk, but oh well... */ - rc = -ENOSR; - if ((sc->id = ub_id_get()) == -1) - goto err_id; - snprintf(sc->name, 8, DRV_NAME "%c", sc->id + 'a'); - sc->dev = interface_to_usbdev(intf); sc->intf = intf; // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; - usb_set_intfdata(intf, sc); usb_get_dev(sc->dev); - // usb_get_intf(sc->intf); /* Do we need this? */ + /* + * Since we give the interface struct to the block level through + * disk->driverfs_dev, we have to pin it. Otherwise, block_uevent + * oopses on close after a disconnect (kernels 2.6.16 and up). + */ + usb_get_intf(sc->intf); - /* XXX Verify that we can handle the device (from descriptors) */ + snprintf(sc->name, 12, DRV_NAME "(%d.%d)", + sc->dev->bus->busnum, sc->dev->devnum); - ub_get_pipes(sc, sc->dev, intf); + /* XXX Verify that we can handle the device (from descriptors) */ - if (device_create_file(&sc->intf->dev, &dev_attr_diag) != 0) - goto err_diag; + if (ub_get_pipes(sc, sc->dev, intf) != 0) + goto err_dev_desc; /* * At this point, all USB initialization is done, do upper layer. @@ -1952,8 +2220,10 @@ static int ub_probe(struct usb_interface *intf, * This is needed to clear toggles. It is a problem only if we do * `rmmod ub && modprobe ub` without disconnects, but we like that. */ +#if 0 /* iPod Mini fails if we do this (big white iPod works) */ ub_probe_clear_stall(sc, sc->recv_bulk_pipe); ub_probe_clear_stall(sc, sc->send_bulk_pipe); +#endif /* * The way this is used by the startup code is a little specific. @@ -1970,65 +2240,92 @@ static int ub_probe(struct usb_interface *intf, * has to succeed, so we clear checks with an additional one here. * In any case it's not our business how revaliadation is implemented. */ - for (i = 0; i < 3; i++) { /* Retries for benh's key */ - if ((rc = ub_sync_tur(sc)) <= 0) break; + for (i = 0; i < 3; i++) { /* Retries for the schwag key from KS'04 */ + if ((rc = ub_sync_tur(sc, NULL)) <= 0) break; if (rc != 0x6) break; msleep(10); } - sc->removable = 1; /* XXX Query this from the device */ - sc->changed = 1; /* ub_revalidate clears only */ - sc->first_open = 1; + nluns = 1; + for (i = 0; i < 3; i++) { + if ((rc = ub_sync_getmaxlun(sc)) < 0) + break; + if (rc != 0) { + nluns = rc; + break; + } + msleep(100); + } - ub_revalidate(sc); - /* This is pretty much a long term P3 */ - printk(KERN_INFO "%s: device %u capacity nsec %ld bsize %u\n", - sc->name, sc->dev->devnum, sc->capacity.nsec, sc->capacity.bsize); + for (i = 0; i < nluns; i++) { + ub_probe_lun(sc, i); + } + return 0; + +err_dev_desc: + usb_set_intfdata(intf, NULL); + usb_put_intf(sc->intf); + usb_put_dev(sc->dev); + kfree(sc); +err_core: + return rc; +} + +static int ub_probe_lun(struct ub_dev *sc, int lnum) +{ + struct ub_lun *lun; + request_queue_t *q; + struct gendisk *disk; + int rc; + + rc = -ENOMEM; + if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL) + goto err_alloc; + lun->num = lnum; + + rc = -ENOSR; + if ((lun->id = ub_id_get()) == -1) + goto err_id; + + lun->udev = sc; + + snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)", + lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num); + + lun->removable = 1; /* XXX Query this from the device */ + lun->changed = 1; /* ub_revalidate clears only */ + ub_revalidate(sc, lun); - /* - * Just one disk per sc currently, but maybe more. - */ rc = -ENOMEM; - if ((disk = alloc_disk(UB_MINORS_PER_MAJOR)) == NULL) + if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL) goto err_diskalloc; - sc->disk = disk; - sprintf(disk->disk_name, DRV_NAME "%c", sc->id + 'a'); - sprintf(disk->devfs_name, DEVFS_NAME "/%c", sc->id + 'a'); + sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a'); disk->major = UB_MAJOR; - disk->first_minor = sc->id * UB_MINORS_PER_MAJOR; + disk->first_minor = lun->id * UB_PARTS_PER_LUN; disk->fops = &ub_bd_fops; - disk->private_data = sc; - disk->driverfs_dev = &intf->dev; + disk->private_data = lun; + disk->driverfs_dev = &sc->intf->dev; rc = -ENOMEM; - if ((q = blk_init_queue(ub_bd_rq_fn, &sc->lock)) == NULL) + if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL) goto err_blkqinit; disk->queue = q; - // blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); + blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); blk_queue_max_hw_segments(q, UB_MAX_REQ_SG); blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); - // blk_queue_segment_boundary(q, CARM_SG_BOUNDARY); + blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ blk_queue_max_sectors(q, UB_MAX_SECTORS); - blk_queue_hardsect_size(q, sc->capacity.bsize); - - /* - * This is a serious infraction, caused by a deficiency in the - * USB sg interface (usb_sg_wait()). We plan to remove this once - * we get mileage on the driver and can justify a change to USB API. - * See blk_queue_bounce_limit() to understand this part. - * - * XXX And I still need to be aware of the DMA mask in the HC. - */ - q->bounce_pfn = blk_max_low_pfn; - q->bounce_gfp = GFP_NOIO; + blk_queue_hardsect_size(q, lun->capacity.bsize); - q->queuedata = sc; + lun->disk = disk; + q->queuedata = lun; + list_add(&lun->link, &sc->luns); - set_capacity(disk, sc->capacity.nsec); - if (sc->removable) + set_capacity(disk, lun->capacity.nsec); + if (lun->removable) disk->flags |= GENHD_FL_REMOVABLE; add_disk(disk); @@ -2038,27 +2335,29 @@ static int ub_probe(struct usb_interface *intf, err_blkqinit: put_disk(disk); err_diskalloc: - device_remove_file(&sc->intf->dev, &dev_attr_diag); -err_diag: - usb_set_intfdata(intf, NULL); - // usb_put_intf(sc->intf); - usb_put_dev(sc->dev); - spin_lock_irq(&ub_lock); - ub_id_put(sc->id); - spin_unlock_irq(&ub_lock); + ub_id_put(lun->id); err_id: - kfree(sc); -err_core: + kfree(lun); +err_alloc: return rc; } static void ub_disconnect(struct usb_interface *intf) { struct ub_dev *sc = usb_get_intfdata(intf); - struct gendisk *disk = sc->disk; - request_queue_t *q = disk->queue; + struct list_head *p; + struct ub_lun *lun; unsigned long flags; + /* + * Prevent ub_bd_release from pulling the rug from under us. + * XXX This is starting to look like a kref. + * XXX Why not to take this ref at probe time? + */ + spin_lock_irqsave(&ub_lock, flags); + sc->openc++; + spin_unlock_irqrestore(&ub_lock, flags); + /* * Fence stall clearnings, operations triggered by unlinkings and so on. * We do not attempt to unlink any URBs, because we do not trust the @@ -2066,6 +2365,11 @@ static void ub_disconnect(struct usb_interface *intf) */ atomic_set(&sc->poison, 1); + /* + * Wait for reset to end, if any. + */ + wait_event(sc->reset_wait, !sc->reset); + /* * Blow away queued commands. * @@ -2075,14 +2379,13 @@ static void ub_disconnect(struct usb_interface *intf) * and the whole queue drains. So, we just use this code to * print warnings. */ - spin_lock_irqsave(&sc->lock, flags); + spin_lock_irqsave(sc->lock, flags); { struct ub_scsi_cmd *cmd; int cnt = 0; - while ((cmd = ub_cmdq_pop(sc)) != NULL) { + while ((cmd = ub_cmdq_peek(sc)) != NULL) { cmd->error = -ENOTCONN; cmd->state = UB_CMDST_DONE; - ub_cmdtr_state(sc, cmd); ub_cmdq_pop(sc); (*cmd->done)(sc, cmd); cnt++; @@ -2092,33 +2395,32 @@ static void ub_disconnect(struct usb_interface *intf) "%d was queued after shutdown\n", sc->name, cnt); } } - spin_unlock_irqrestore(&sc->lock, flags); + spin_unlock_irqrestore(sc->lock, flags); /* - * Unregister the upper layer, this waits for all commands to end. + * Unregister the upper layer. */ - if (disk->flags & GENHD_FL_UP) - del_gendisk(disk); - if (q) - blk_cleanup_queue(q); + list_for_each (p, &sc->luns) { + lun = list_entry(p, struct ub_lun, link); + del_gendisk(lun->disk); + /* + * I wish I could do: + * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); + * As it is, we rely on our internal poisoning and let + * the upper levels to spin furiously failing all the I/O. + */ + } /* - * We really expect blk_cleanup_queue() to wait, so no amount - * of paranoya is too much. - * - * Taking a lock on a structure which is about to be freed - * is very nonsensual. Here it is largely a way to do a debug freeze, - * and a bracket which shows where the nonsensual code segment ends. - * * Testing for -EINPROGRESS is always a bug, so we are bending * the rules a little. */ - spin_lock_irqsave(&sc->lock, flags); + spin_lock_irqsave(sc->lock, flags); if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */ printk(KERN_WARNING "%s: " "URB is active after disconnect\n", sc->name); } - spin_unlock_irqrestore(&sc->lock, flags); + spin_unlock_irqrestore(sc->lock, flags); /* * There is virtually no chance that other CPU runs times so long @@ -2132,21 +2434,10 @@ static void ub_disconnect(struct usb_interface *intf) * and no URBs left in transit. */ - device_remove_file(&sc->intf->dev, &dev_attr_diag); - usb_set_intfdata(intf, NULL); - // usb_put_intf(sc->intf); - sc->intf = NULL; - usb_put_dev(sc->dev); - sc->dev = NULL; - - spin_lock_irqsave(&ub_lock, flags); - if (sc->openc == 0) - ub_cleanup(sc); - spin_unlock_irqrestore(&ub_lock, flags); + ub_put(sc); } -struct usb_driver ub_driver = { - .owner = THIS_MODULE, +static struct usb_driver ub_driver = { .name = "ub", .probe = ub_probe, .disconnect = ub_disconnect, @@ -2156,21 +2447,21 @@ struct usb_driver ub_driver = { static int __init ub_init(void) { int rc; + int i; - /* P3 */ printk("ub: sizeof ub_scsi_cmd %zu ub_dev %zu\n", - sizeof(struct ub_scsi_cmd), sizeof(struct ub_dev)); + for (i = 0; i < UB_QLOCK_NUM; i++) + spin_lock_init(&ub_qlockv[i]); if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0) goto err_regblkdev; - devfs_mk_dir(DEVFS_NAME); if ((rc = usb_register(&ub_driver)) != 0) goto err_register; + usb_usual_set_present(USB_US_TYPE_UB); return 0; err_register: - devfs_remove(DEVFS_NAME); unregister_blkdev(UB_MAJOR, DRV_NAME); err_regblkdev: return rc; @@ -2180,8 +2471,8 @@ static void __exit ub_exit(void) { usb_deregister(&ub_driver); - devfs_remove(DEVFS_NAME); unregister_blkdev(UB_MAJOR, DRV_NAME); + usb_usual_clear_present(USB_US_TYPE_UB); } module_init(ub_init);