/*
* sx8.c: Driver for Promise SATA SX8 looks-like-I2O hardware
*
- * Copyright 2004 Red Hat, Inc.
+ * Copyright 2004-2005 Red Hat, Inc.
*
* Author/maintainer: Jeff Garzik <jgarzik@pobox.com>
*
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/sched.h>
-#include <linux/devfs_fs_kernel.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/time.h>
#include <linux/hdreg.h>
+#include <linux/dma-mapping.h>
+#include <linux/completion.h>
#include <asm/io.h>
-#include <asm/semaphore.h>
#include <asm/uaccess.h>
-MODULE_AUTHOR("Jeff Garzik");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Promise SATA SX8 block driver");
-
#if 0
#define CARM_DEBUG
#define CARM_VERBOSE_DEBUG
#undef CARM_NDEBUG
#define DRV_NAME "sx8"
-#define DRV_VERSION "0.8"
+#define DRV_VERSION "1.0"
#define PFX DRV_NAME ": "
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Promise SATA SX8 block driver");
+MODULE_VERSION(DRV_VERSION);
+
+/*
+ * SX8 hardware has a single message queue for all ATA ports.
+ * When this driver was written, the hardware (firmware?) would
+ * corrupt data eventually, if more than one request was outstanding.
+ * As one can imagine, having 8 ports bottlenecking on a single
+ * command hurts performance.
+ *
+ * Based on user reports, later versions of the hardware (firmware?)
+ * seem to be able to survive with more than one command queued.
+ *
+ * Therefore, we default to the safe option -- 1 command -- but
+ * allow the user to increase this.
+ *
+ * SX8 should be able to support up to ~60 queued commands (CARM_MAX_REQ),
+ * but problems seem to occur when you exceed ~30, even on newer hardware.
+ */
+static int max_queue = 1;
+module_param(max_queue, int, 0444);
+MODULE_PARM_DESC(max_queue, "Maximum number of queued commands. (min==1, max==30, safe==1)");
+
+
#define NEXT_RESP(idx) ((idx + 1) % RMSG_Q_LEN)
/* 0xf is just arbitrary, non-zero noise; this is sorta like poisoning */
/* command message queue limits */
CARM_MAX_REQ = 64, /* max command msgs per host */
- CARM_MAX_Q = 1, /* one command at a time */
CARM_MSG_LOW_WATER = (CARM_MAX_REQ / 4), /* refill mark */
/* S/G limits, host-wide and per-request */
CARM_MAX_REQ_SG = 32, /* max s/g entries per request */
- CARM_SG_BOUNDARY = 0xffffUL, /* s/g segment boundary */
CARM_MAX_HOST_SG = 600, /* max s/g entries per host */
CARM_SG_LOW_WATER = (CARM_MAX_HOST_SG / 4), /* re-fill mark */
FL_DYN_MAJOR = (1 << 17),
};
+enum {
+ CARM_SG_BOUNDARY = 0xffffUL, /* s/g segment boundary */
+};
+
enum scatter_gather_types {
SGT_32BIT = 0,
SGT_64BIT = 1,
struct carm_port {
unsigned int port_no;
- unsigned int n_queued;
struct gendisk *disk;
struct carm_host *host;
struct carm_host {
unsigned long flags;
- void *mmio;
+ void __iomem *mmio;
void *shm;
dma_addr_t shm_dma;
struct work_struct fsm_task;
- struct semaphore probe_sem;
+ struct completion probe_comp;
};
struct carm_response {
- u32 ret_handle;
- u32 status;
+ __le32 ret_handle;
+ __le32 status;
} __attribute__((packed));
struct carm_msg_sg {
- u32 start;
- u32 len;
+ __le32 start;
+ __le32 len;
} __attribute__((packed));
struct carm_msg_rw {
u8 id;
u8 sg_count;
u8 sg_type;
- u32 handle;
- u32 lba;
- u16 lba_count;
- u16 lba_high;
+ __le32 handle;
+ __le32 lba;
+ __le16 lba_count;
+ __le16 lba_high;
struct carm_msg_sg sg[32];
} __attribute__((packed));
u8 subtype;
u8 n_sg;
u8 sg_type;
- u32 handle;
- u32 addr;
- u32 len;
- u32 evt_pool;
- u32 n_evt;
- u32 rbuf_pool;
- u32 n_rbuf;
- u32 msg_pool;
- u32 n_msg;
+ __le32 handle;
+ __le32 addr;
+ __le32 len;
+ __le32 evt_pool;
+ __le32 n_evt;
+ __le32 rbuf_pool;
+ __le32 n_rbuf;
+ __le32 msg_pool;
+ __le32 n_msg;
struct carm_msg_sg sg[8];
} __attribute__((packed));
u8 subtype;
u8 array_id;
u8 reserved1;
- u32 handle;
- u32 data_addr;
+ __le32 handle;
+ __le32 data_addr;
u32 reserved2;
} __attribute__((packed));
u8 type;
u8 subtype;
u16 reserved1;
- u32 handle;
+ __le32 handle;
u32 reserved2;
- u32 timestamp;
+ __le32 timestamp;
} __attribute__((packed));
struct carm_msg_get_fw_ver {
u8 type;
u8 subtype;
u16 reserved1;
- u32 handle;
- u32 data_addr;
+ __le32 handle;
+ __le32 data_addr;
u32 reserved2;
} __attribute__((packed));
struct carm_fw_ver {
- u32 version;
+ __le32 version;
u8 features;
u8 reserved1;
u16 reserved2;
} __attribute__((packed));
struct carm_array_info {
- u32 size;
+ __le32 size;
- u16 size_hi;
- u16 stripe_size;
+ __le16 size_hi;
+ __le16 stripe_size;
- u32 mode;
+ __le32 mode;
- u16 stripe_blk_sz;
- u16 reserved1;
+ __le16 stripe_blk_sz;
+ __le16 reserved1;
- u16 cyl;
- u16 head;
+ __le16 cyl;
+ __le16 head;
- u16 sect;
+ __le16 sect;
u8 array_id;
u8 reserved2;
char name[40];
- u32 array_status;
+ __le32 array_status;
/* device list continues beyond this point? */
} __attribute__((packed));
static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static void carm_remove_one (struct pci_dev *pdev);
-static int carm_bdev_ioctl(struct inode *ino, struct file *fil,
- unsigned int cmd, unsigned long arg);
+static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static struct pci_device_id carm_pci_tbl[] = {
{ PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
static struct block_device_operations carm_bd_ops = {
.owner = THIS_MODULE,
- .ioctl = carm_bdev_ioctl,
+ .getgeo = carm_bdev_getgeo,
};
static unsigned int carm_host_id;
-static int carm_bdev_ioctl(struct inode *ino, struct file *fil,
- unsigned int cmd, unsigned long arg)
+static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- void __user *usermem = (void __user *) arg;
- struct carm_port *port = ino->i_bdev->bd_disk->private_data;
- struct hd_geometry geom;
-
- switch (cmd) {
- case HDIO_GETGEO:
- if (!usermem)
- return -EINVAL;
-
- geom.heads = (u8) port->dev_geom_head;
- geom.sectors = (u8) port->dev_geom_sect;
- geom.cylinders = port->dev_geom_cyl;
- geom.start = get_start_sect(ino->i_bdev);
-
- if (copy_to_user(usermem, &geom, sizeof(geom)))
- return -EFAULT;
- return 0;
+ struct carm_port *port = bdev->bd_disk->private_data;
- default:
- break;
- }
-
- return -EOPNOTSUPP;
+ geo->heads = (u8) port->dev_geom_head;
+ geo->sectors = (u8) port->dev_geom_sect;
+ geo->cylinders = port->dev_geom_cyl;
+ return 0;
}
static const u32 msg_sizes[] = { 32, 64, 128, CARM_MSG_SIZE };
for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
if (msg_size <= msg_sizes[i])
return i;
-
+
return -ENOENT;
}
-static void carm_init_buckets(void *mmio)
+static void carm_init_buckets(void __iomem *mmio)
{
unsigned int i;
static int carm_send_msg(struct carm_host *host,
struct carm_request *crq)
{
- void *mmio = host->mmio;
+ void __iomem *mmio = host->mmio;
u32 msg = (u32) carm_ref_msg_dma(host, crq->tag);
u32 cm_bucket = crq->msg_bucket;
u32 tmp;
if (host->hw_sg_used >= (CARM_MAX_HOST_SG - CARM_MAX_REQ_SG))
return NULL;
- for (i = 0; i < CARM_MAX_Q; i++)
+ for (i = 0; i < max_queue; i++)
if ((host->msg_alloc & (1ULL << i)) == 0) {
struct carm_request *crq = &host->req[i];
crq->port = NULL;
assert(host->n_msgs <= CARM_MAX_REQ);
return crq;
}
-
+
DPRINTK("no request available, returning NULL\n");
return NULL;
}
static int carm_put_request(struct carm_host *host, struct carm_request *crq)
{
- assert(crq->tag < CARM_MAX_Q);
+ assert(crq->tag < max_queue);
if (unlikely((host->msg_alloc & (1ULL << crq->tag)) == 0))
return -EINVAL; /* tried to clear a tag that was not active */
spin_unlock_irq(&host->lock);
DPRINTK("blk_insert_request, tag == %u\n", idx);
- blk_insert_request(host->oob_q, crq->rq, 1, crq, 0);
+ blk_insert_request(host->oob_q, crq->rq, 1, crq);
return 0;
crq->msg_bucket = (u32) rc;
DPRINTK("blk_insert_request, tag == %u\n", idx);
- blk_insert_request(host->oob_q, crq->rq, 1, crq, 0);
+ blk_insert_request(host->oob_q, crq->rq, 1, crq);
return 0;
}
rc = end_that_request_first(req, uptodate, req->hard_nr_sectors);
assert(rc == 0);
- end_that_request_last(req);
+ end_that_request_last(req, uptodate);
rc = carm_put_request(host, crq);
assert(rc == 0);
int is_ok)
{
carm_end_request_queued(host, crq, is_ok);
- if (CARM_MAX_Q == 1)
+ if (max_queue == 1)
carm_round_robin(host);
else if ((host->n_msgs <= CARM_MSG_LOW_WATER) &&
(host->hw_sg_used <= CARM_SG_LOW_WATER)) {
port = &host->port[cur_port];
lo = (u64) le32_to_cpu(desc->size);
- hi = (u64) le32_to_cpu(desc->size_hi);
+ hi = (u64) le16_to_cpu(desc->size_hi);
port->capacity = lo | (hi << 32);
port->dev_geom_head = le16_to_cpu(desc->head);
}
static inline void carm_handle_resp(struct carm_host *host,
- u32 ret_handle_le, u32 status)
+ __le32 ret_handle_le, u32 status)
{
u32 handle = le32_to_cpu(ret_handle_le);
unsigned int msg_idx;
static inline void carm_handle_responses(struct carm_host *host)
{
- void *mmio = host->mmio;
+ void __iomem *mmio = host->mmio;
struct carm_response *resp = (struct carm_response *) host->shm;
unsigned int work = 0;
unsigned int idx = host->resp_idx % RMSG_Q_LEN;
else if ((status & (1 << 31)) == 0) {
VPRINTK("handling msg response on index %u\n", idx);
carm_handle_resp(host, resp[idx].ret_handle, status);
- resp[idx].status = 0xffffffff;
+ resp[idx].status = cpu_to_le32(0xffffffff);
}
/* asynchronous events the hardware throws our way */
u8 evt_type = *evt_type_ptr;
printk(KERN_WARNING DRV_NAME "(%s): unhandled event type %d\n",
pci_name(host->pdev), (int) evt_type);
- resp[idx].status = 0xffffffff;
+ resp[idx].status = cpu_to_le32(0xffffffff);
}
idx = NEXT_RESP(idx);
host->resp_idx += work;
}
-static irqreturn_t carm_interrupt(int irq, void *__host, struct pt_regs *regs)
+static irqreturn_t carm_interrupt(int irq, void *__host)
{
struct carm_host *host = __host;
- void *mmio;
+ void __iomem *mmio;
u32 mask;
int handled = 0;
unsigned long flags;
return IRQ_RETVAL(handled);
}
-static void carm_fsm_task (void *_data)
+static void carm_fsm_task (struct work_struct *work)
{
- struct carm_host *host = _data;
+ struct carm_host *host =
+ container_of(work, struct carm_host, fsm_task);
unsigned long flags;
unsigned int state;
int rc, i, next_dev;
}
case HST_PROBE_FINISHED:
- up(&host->probe_sem);
+ complete(&host->probe_comp);
break;
case HST_ERROR:
schedule_work(&host->fsm_task);
}
-static int carm_init_wait(void *mmio, u32 bits, unsigned int test_bit)
+static int carm_init_wait(void __iomem *mmio, u32 bits, unsigned int test_bit)
{
unsigned int i;
static void carm_init_responses(struct carm_host *host)
{
- void *mmio = host->mmio;
+ void __iomem *mmio = host->mmio;
unsigned int i;
struct carm_response *resp = (struct carm_response *) host->shm;
for (i = 0; i < RMSG_Q_LEN; i++)
- resp[i].status = 0xffffffff;
+ resp[i].status = cpu_to_le32(0xffffffff);
writel(0, mmio + CARM_RESP_IDX);
}
static int carm_init_host(struct carm_host *host)
{
- void *mmio = host->mmio;
+ void __iomem *mmio = host->mmio;
u32 tmp;
u8 tmp8;
int rc;
tmp8 = readb(mmio + CARM_INITC);
if (tmp8 & 0x01) {
tmp8 &= ~0x01;
- writeb(tmp8, CARM_INITC);
+ writeb(tmp8, mmio + CARM_INITC);
readb(mmio + CARM_INITC); /* flush */
DPRINTK("snooze...\n");
}
port->disk = disk;
- sprintf(disk->disk_name, DRV_NAME "%u_%u", host->id, i);
- sprintf(disk->devfs_name, DRV_NAME "/%u_%u", host->id, i);
+ sprintf(disk->disk_name, DRV_NAME "/%u",
+ (unsigned int) (host->id * CARM_MAX_PORTS) + i);
disk->major = host->major;
disk->first_minor = i * CARM_MINORS_PER_MAJOR;
disk->fops = &carm_bd_ops;
if (rc)
goto err_out;
-#if IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
- rc = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
+#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
+ rc = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
if (!rc) {
- rc = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
+ rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
if (rc) {
printk(KERN_ERR DRV_NAME "(%s): consistent DMA mask failure\n",
pci_name(pdev));
pci_dac = 1;
} else {
#endif
- rc = pci_set_dma_mask(pdev, 0xffffffffULL);
+ rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {
printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n",
pci_name(pdev));
goto err_out_regions;
}
pci_dac = 0;
-#if IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
+#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
}
#endif
host->pdev = pdev;
host->flags = pci_dac ? FL_DAC : 0;
spin_lock_init(&host->lock);
- INIT_WORK(&host->fsm_task, carm_fsm_task, host);
- init_MUTEX_LOCKED(&host->probe_sem);
+ INIT_WORK(&host->fsm_task, carm_fsm_task);
+ init_completion(&host->probe_comp);
for (i = 0; i < ARRAY_SIZE(host->req); i++)
host->req[i].tag = i;
if (host->flags & FL_DYN_MAJOR)
host->major = rc;
- devfs_mk_dir(DRV_NAME);
-
rc = carm_init_disks(host);
if (rc)
goto err_out_blkdev_disks;
pci_set_master(pdev);
- rc = request_irq(pdev->irq, carm_interrupt, SA_SHIRQ, DRV_NAME, host);
+ rc = request_irq(pdev->irq, carm_interrupt, IRQF_SHARED, DRV_NAME, host);
if (rc) {
printk(KERN_ERR DRV_NAME "(%s): irq alloc failure\n",
pci_name(pdev));
if (rc)
goto err_out_free_irq;
- DPRINTK("waiting for probe_sem\n");
- down(&host->probe_sem);
+ DPRINTK("waiting for probe_comp\n");
+ wait_for_completion(&host->probe_comp);
- printk(KERN_INFO "%s: pci %s, ports %d, io %lx, irq %u, major %d\n",
+ printk(KERN_INFO "%s: pci %s, ports %d, io %llx, irq %u, major %d\n",
host->name, pci_name(pdev), (int) CARM_MAX_PORTS,
- pci_resource_start(pdev, 0), pdev->irq, host->major);
+ (unsigned long long)pci_resource_start(pdev, 0),
+ pdev->irq, host->major);
carm_host_id++;
pci_set_drvdata(pdev, host);
free_irq(pdev->irq, host);
carm_free_disks(host);
- devfs_remove(DRV_NAME);
unregister_blkdev(host->major, host->name);
if (host->major == 160)
clear_bit(0, &carm_major_alloc);
static int __init carm_init(void)
{
- return pci_module_init(&carm_driver);
+ return pci_register_driver(&carm_driver);
}
static void __exit carm_exit(void)