-------------------
begin : Thu Sep 7 2000
copyright : (C) 2000 by Adaptec
- email : deanna_bonds@adaptec.com
July 30, 2001 First version being submitted
for inclusion in the kernel. V2.4
* (at your option) any later version. *
* *
***************************************************************************/
+/***************************************************************************
+ * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
+ - Support 2.6 kernel and DMA-mapping
+ - ioctl fix for raid tools
+ - use schedule_timeout in long long loop
+ **************************************************************************/
-//#define DEBUG 1
-//#define UARTDELAY 1
+/*#define DEBUG 1 */
+/*#define UARTDELAY 1 */
-// On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates
-// high pages. Keep the macro around because of the broken unmerged ia64 tree
+/* On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates
+ high pages. Keep the macro around because of the broken unmerged ia64 tree */
#define ADDR32 (0)
-#error Please convert me to Documentation/DMA-mapping.txt
-
#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h> /* for printk */
#include <linux/sched.h>
#include <linux/reboot.h>
+#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/ioport.h>
-#include <linux/stat.h>
#include <asm/processor.h> /* for boot_cpu_data */
#include <asm/pgtable.h>
#include <asm/io.h> /* for virt_to_bus, etc. */
-#include "scsi.h"
-#include "hosts.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "dpt/dptsig.h"
#include "dpti.h"
#elif defined(__ia64__)
PROC_INTEL, PROC_IA64,
#elif defined(__sparc__)
- PROC_ULTRASPARC,
+ PROC_ULTRASPARC, PROC_ULTRASPARC,
#elif defined(__alpha__)
- PROC_ALPHA ,
+ PROC_ALPHA, PROC_ALPHA,
#else
- (-1),(-1)
+ (-1),(-1),
#endif
FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
static u32 adpt_post_wait_id = 0;
-static spinlock_t adpt_post_wait_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(adpt_post_wait_lock);
/*============================================================================
};
MODULE_DEVICE_TABLE(pci,dptids);
-static int adpt_detect(Scsi_Host_Template* sht)
+static int adpt_detect(struct scsi_host_template* sht)
{
struct pci_dev *pDev = NULL;
adpt_hba* pHba;
/* Active IOPs now in OPERATIONAL state */
PDEBUG("HBA's in OPERATIONAL state\n");
- printk(KERN_INFO"dpti: If you have a lot of devices this could take a few minutes.\n");
+ printk("dpti: If you have a lot of devices this could take a few minutes.\n");
for (pHba = hba_chain; pHba; pHba = pHba->next) {
printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
if (adpt_i2o_lct_get(pHba) < 0){
adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
// adpt_i2o_quiesce_hba(pHba);
adpt_i2o_delete_hba(pHba);
+ scsi_unregister(host);
return 0;
}
if (rcode != 0) {
sprintf(pHba->detail, "Adaptec I2O RAID");
printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
+ if (rcode != -ETIME && rcode != -EINTR)
+ kfree(buf);
} else {
memset(pHba->detail, 0, sizeof(pHba->detail));
memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
memcpy(&(pHba->detail[40]), " FW: ", 4);
memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
pHba->detail[48] = '\0'; /* precautionary */
+ kfree(buf);
}
- kfree(buf);
adpt_i2o_status_get(pHba);
return ;
}
-static int adpt_slave_configure(Scsi_Device * device)
+static int adpt_slave_configure(struct scsi_device * device)
{
struct Scsi_Host *host = device->host;
adpt_hba* pHba;
return 0;
}
-static int adpt_queue(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
+static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
{
adpt_hba* pHba = NULL;
struct adpt_device* pDev = NULL; /* dpt per device information */
cmd->scsi_done(cmd);
return 0;
}
- (struct adpt_device*)(cmd->device->hostdata) = pDev;
+ cmd->device->hostdata = pDev;
}
pDev->pScsi_dev = cmd->device;
heads = 255;
sectors = 63;
}
- cylinders = capacity / (heads * sectors);
+ cylinders = sector_div(capacity, heads * sectors);
// Special case if CDROM
if(sdev->type == 5) { // CDROM
*===========================================================================
*/
-static int adpt_abort(Scsi_Cmnd * cmd)
+static int adpt_abort(struct scsi_cmnd * cmd)
{
adpt_hba* pHba = NULL; /* host bus adapter structure */
struct adpt_device* dptdevice; /* dpt per device information */
// This is the same for BLK and SCSI devices
// NOTE this is wrong in the i2o.h definitions
// This is not currently supported by our adapter but we issue it anyway
-static int adpt_device_reset(Scsi_Cmnd* cmd)
+static int adpt_device_reset(struct scsi_cmnd* cmd)
{
adpt_hba* pHba;
u32 msg[4];
#define I2O_HBA_BUS_RESET 0x87
// This version of bus reset is called by the eh_error handler
-static int adpt_bus_reset(Scsi_Cmnd* cmd)
+static int adpt_bus_reset(struct scsi_cmnd* cmd)
{
adpt_hba* pHba;
u32 msg[4];
}
// This version of reset is called by the eh_error_handler
-static int adpt_reset(Scsi_Cmnd* cmd)
+static int adpt_reset(struct scsi_cmnd* cmd)
{
adpt_hba* pHba;
int rcode;
kfree(p1);
}
// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
- adpt_post_wait_queue = 0;
+ adpt_post_wait_queue = NULL;
printk(KERN_INFO "Adaptec I2O controllers down.\n");
}
#endif
-static int adpt_install_hba(Scsi_Host_Template* sht, struct pci_dev* pDev)
+static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
{
adpt_hba* pHba = NULL;
ulong base_addr1_phys = 0;
u32 hba_map0_area_size = 0;
u32 hba_map1_area_size = 0;
- ulong base_addr_virt = 0;
- ulong msg_addr_virt = 0;
+ void __iomem *base_addr_virt = NULL;
+ void __iomem *msg_addr_virt = NULL;
int raptorFlag = FALSE;
int i;
return -EINVAL;
}
pci_set_master(pDev);
+ if (pci_set_dma_mask(pDev, 0xffffffffffffffffULL) &&
+ pci_set_dma_mask(pDev, 0xffffffffULL))
+ return -EINVAL;
base_addr0_phys = pci_resource_start(pDev,0);
hba_map0_area_size = pci_resource_len(pDev,0);
}
- base_addr_virt = (ulong)ioremap(base_addr0_phys,hba_map0_area_size);
- if(base_addr_virt == 0) {
+ base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
+ if (!base_addr_virt) {
PERROR("dpti: adpt_config_hba: io remap failed\n");
return -EINVAL;
}
if(raptorFlag == TRUE) {
- msg_addr_virt = (ulong)ioremap(base_addr1_phys, hba_map1_area_size );
- if(msg_addr_virt == 0) {
+ msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
+ if (!msg_addr_virt) {
PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
- iounmap((void*)base_addr_virt);
+ iounmap(base_addr_virt);
return -EINVAL;
}
} else {
pHba = kmalloc(sizeof(adpt_hba), GFP_KERNEL);
if( pHba == NULL) {
if(msg_addr_virt != base_addr_virt){
- iounmap((void*)msg_addr_virt);
+ iounmap(msg_addr_virt);
}
- iounmap((void*)base_addr_virt);
+ iounmap(base_addr_virt);
return -ENOMEM;
}
memset(pHba, 0, sizeof(adpt_hba));
// Set up the Virtual Base Address of the I2O Device
pHba->base_addr_virt = base_addr_virt;
- pHba->msg_addr_virt = msg_addr_virt;
- pHba->irq_mask = (ulong)(base_addr_virt+0x30);
- pHba->post_port = (ulong)(base_addr_virt+0x40);
- pHba->reply_port = (ulong)(base_addr_virt+0x44);
+ pHba->msg_addr_virt = msg_addr_virt;
+ pHba->irq_mask = base_addr_virt+0x30;
+ pHba->post_port = base_addr_virt+0x40;
+ pHba->reply_port = base_addr_virt+0x44;
pHba->hrt = NULL;
pHba->lct = NULL;
// Initializing the spinlocks
spin_lock_init(&pHba->state_lock);
+ spin_lock_init(&adpt_post_wait_lock);
if(raptorFlag == 0){
- printk(KERN_INFO"Adaptec I2O RAID controller %d at %lx size=%x irq=%d\n",
+ printk(KERN_INFO"Adaptec I2O RAID controller %d at %p size=%x irq=%d\n",
hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq);
} else {
printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq);
- printk(KERN_INFO" BAR0 %lx - size= %x\n",base_addr_virt,hba_map0_area_size);
- printk(KERN_INFO" BAR1 %lx - size= %x\n",msg_addr_virt,hba_map1_area_size);
+ printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
+ printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
}
if (request_irq (pDev->irq, adpt_isr, SA_SHIRQ, pHba->name, pHba)) {
hba_count--;
up(&adpt_configuration_lock);
- iounmap((void*)pHba->base_addr_virt);
+ iounmap(pHba->base_addr_virt);
if(pHba->msg_addr_virt != pHba->base_addr_virt){
- iounmap((void*)pHba->msg_addr_virt);
+ iounmap(pHba->msg_addr_virt);
}
if(pHba->hrt) {
kfree(pHba->hrt);
{
int i;
- printk(KERN_INFO"Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
+ printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
for (i = 0; i < DPTI_MAX_HBA; i++) {
hbas[i] = NULL;
}
timeout *= HZ;
if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irq(pHba->host->host_lock);
+ if(pHba->host)
+ spin_unlock_irq(pHba->host->host_lock);
if (!timeout)
schedule();
- else
- schedule_timeout(timeout*HZ);
- spin_lock_irq(pHba->host->host_lock);
+ else{
+ timeout = schedule_timeout(timeout);
+ if (timeout == 0) {
+ // I/O issued, but cannot get result in
+ // specified time. Freeing resorces is
+ // dangerous.
+ status = -ETIME;
+ }
+ }
+ if(pHba->host)
+ spin_lock_irq(pHba->host->host_lock);
}
spin_lock_irq(&adpt_wq_i2o_post.lock);
__remove_wait_queue(&adpt_wq_i2o_post, &wait);
{
u32 m = EMPTY_QUEUE;
- u32 *msg;
+ u32 __iomem *msg;
ulong timeout = jiffies + 30*HZ;
do {
rmb();
printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
return -ETIMEDOUT;
}
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
} while(m == EMPTY_QUEUE);
- msg = (u32*) (pHba->msg_addr_virt + m);
+ msg = pHba->msg_addr_virt + m;
memcpy_toio(msg, data, len);
wmb();
printk(KERN_WARNING"Timeout waiting for message!\n");
return -ETIMEDOUT;
}
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
} while (m == EMPTY_QUEUE);
status = (u8*)kmalloc(4, GFP_KERNEL|ADDR32);
return -ETIMEDOUT;
}
rmb();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
}
if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
return -ETIMEDOUT;
}
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
} while (m == EMPTY_QUEUE);
// Flush the offset
adpt_send_nop(pHba, m);
}
-static int adpt_i2o_passthru(adpt_hba* pHba, u32* arg)
+static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
{
u32 msg[MAX_MESSAGE_SIZE];
u32* reply = NULL;
u32 size = 0;
u32 reply_size = 0;
- u32* user_msg = (u32*)arg;
- u32* user_reply = NULL;
- ulong sg_list[pHba->sg_tablesize];
+ u32 __user *user_msg = arg;
+ u32 __user * user_reply = NULL;
+ void *sg_list[pHba->sg_tablesize];
u32 sg_offset = 0;
u32 sg_count = 0;
int sg_index = 0;
u32 i = 0;
u32 rcode = 0;
- ulong p = 0;
+ void *p = NULL;
ulong flags = 0;
memset(&msg, 0, MAX_MESSAGE_SIZE*4);
size *= 4; // Convert to bytes
/* Copy in the user's I2O command */
- if(copy_from_user((void*)msg, (void*)user_msg, size)) {
+ if(copy_from_user(msg, user_msg, size)) {
return -EFAULT;
}
get_user(reply_size, &user_reply[0]);
}
sg_size = sg[i].flag_count & 0xffffff;
/* Allocate memory for the transfer */
- p = (ulong)kmalloc(sg_size, GFP_KERNEL|ADDR32);
- if(p == 0) {
+ p = kmalloc(sg_size, GFP_KERNEL|ADDR32);
+ if(!p) {
printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
pHba->name,sg_size,i,sg_count);
rcode = -ENOMEM;
/* Copy in the user's SG buffer if necessary */
if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
// TODO 64bit fix
- if (copy_from_user((void*)p,(void*)sg[i].addr_bus, sg_size)) {
+ if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) {
printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
rcode = -EFAULT;
goto cleanup;
}
}
//TODO 64bit fix
- sg[i].addr_bus = (u32)virt_to_bus((void*)p);
+ sg[i].addr_bus = (u32)virt_to_bus(p);
}
}
do {
- spin_lock_irqsave(pHba->host->host_lock, flags);
+ if(pHba->host)
+ spin_lock_irqsave(pHba->host->host_lock, flags);
// This state stops any new commands from enterring the
// controller while processing the ioctl
// pHba->state |= DPTI_STATE_IOCTL;
// We can't set this now - The scsi subsystem sets host_blocked and
// the queue empties and stops. We need a way to restart the queue
rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
+ if (rcode != 0)
+ printk("adpt_i2o_passthru: post wait failed %d %p\n",
+ rcode, reply);
// pHba->state &= ~DPTI_STATE_IOCTL;
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
+ if(pHba->host)
+ spin_unlock_irqrestore(pHba->host->host_lock, flags);
} while(rcode == -ETIMEDOUT);
if(rcode){
size = size>>16;
size *= 4;
/* Copy in the user's I2O command */
- if (copy_from_user ((void*)msg, (void*)user_msg, size)) {
+ if (copy_from_user (msg, user_msg, size)) {
rcode = -EFAULT;
goto cleanup;
}
if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
sg_size = sg[j].flag_count & 0xffffff;
// TODO 64bit fix
- if (copy_to_user((void*)sg[j].addr_bus,(void*)sg_list[j], sg_size)) {
- printk(KERN_WARNING"%s: Could not copy %lx TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
+ if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) {
+ printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
rcode = -EFAULT;
goto cleanup;
}
cleanup:
- kfree (reply);
+ if (rcode != -ETIME && rcode != -EINTR)
+ kfree (reply);
while(sg_index) {
if(sg_list[--sg_index]) {
- kfree((void*)(sg_list[sg_index]));
+ if (rcode != -ETIME && rcode != -EINTR)
+ kfree(sg_list[sg_index]);
}
}
return rcode;
*/
/* Get all the info we can not get from kernel services */
-static int adpt_system_info(void *buffer)
+static int adpt_system_info(void __user *buffer)
{
sysInfo_S si;
int minor;
int error = 0;
adpt_hba* pHba;
- ulong flags;
+ ulong flags = 0;
+ void __user *argp = (void __user *)arg;
minor = iminor(inode);
if (minor >= DPTI_MAX_HBA){
switch (cmd) {
// TODO: handle 3 cases
case DPT_SIGNATURE:
- if (copy_to_user((char*)arg, &DPTI_sig, sizeof(DPTI_sig))) {
+ if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
return -EFAULT;
}
break;
case I2OUSRCMD:
- return adpt_i2o_passthru(pHba,(u32*)arg);
- break;
+ return adpt_i2o_passthru(pHba, argp);
case DPT_CTRLINFO:{
drvrHBAinfo_S HbaInfo;
HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
HbaInfo.Interrupt = pHba->pDev->irq;
HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
- if(copy_to_user((void *) arg, &HbaInfo, sizeof(HbaInfo))){
+ if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
return -EFAULT;
}
break;
}
case DPT_SYSINFO:
- return adpt_system_info((void*)arg);
- break;
+ return adpt_system_info(argp);
case DPT_BLINKLED:{
u32 value;
value = (u32)adpt_read_blink_led(pHba);
- if (copy_to_user((char*)arg, &value, sizeof(value))) {
+ if (copy_to_user(argp, &value, sizeof(value))) {
return -EFAULT;
}
break;
}
case I2ORESETCMD:
- spin_lock_irqsave(pHba->host->host_lock, flags);
+ if(pHba->host)
+ spin_lock_irqsave(pHba->host->host_lock, flags);
adpt_hba_reset(pHba);
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
+ if(pHba->host)
+ spin_unlock_irqrestore(pHba->host->host_lock, flags);
break;
case I2ORESCANCMD:
adpt_rescan(pHba);
}
-static void adpt_isr(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t adpt_isr(int irq, void *dev_id, struct pt_regs *regs)
{
- Scsi_Cmnd* cmd;
+ struct scsi_cmnd* cmd;
adpt_hba* pHba = dev_id;
u32 m;
ulong reply;
u32 status=0;
u32 context;
ulong flags = 0;
+ int handled = 0;
- if (pHba == NULL ){
+ if (pHba == NULL){
printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
- return;
+ return IRQ_NONE;
}
- spin_lock_irqsave(pHba->host->host_lock, flags);
+ if(pHba->host)
+ spin_lock_irqsave(pHba->host->host_lock, flags);
+
while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
m = readl(pHba->reply_port);
if(m == EMPTY_QUEUE){
status = I2O_POST_WAIT_OK;
}
if(!(context & 0x40000000)) {
- cmd = (Scsi_Cmnd*) readl(reply+12);
+ cmd = (struct scsi_cmnd*) readl(reply+12);
if(cmd != NULL) {
printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
}
}
adpt_i2o_post_wait_complete(context, status);
} else { // SCSI message
- cmd = (Scsi_Cmnd*) readl(reply+12);
+ cmd = (struct scsi_cmnd*) readl(reply+12);
if(cmd != NULL){
if(cmd->serial_number != 0) { // If not timedout
adpt_i2o_to_scsi(reply, cmd);
wmb();
rmb();
}
-out: spin_unlock_irqrestore(pHba->host->host_lock, flags);
+ handled = 1;
+out: if(pHba->host)
+ spin_unlock_irqrestore(pHba->host->host_lock, flags);
+ return IRQ_RETVAL(handled);
}
-static s32 adpt_scsi_to_i2o(adpt_hba* pHba, Scsi_Cmnd* cmd, struct adpt_device* d)
+static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
{
int i;
u32 msg[MAX_MESSAGE_SIZE];
* cmd->cmnd[0] is an unsigned char
*/
switch(cmd->sc_data_direction){
- case SCSI_DATA_READ:
+ case DMA_FROM_DEVICE:
scsidir =0x40000000; // DATA IN (iop<--dev)
break;
- case SCSI_DATA_WRITE:
+ case DMA_TO_DEVICE:
direction=0x04000000; // SGL OUT
scsidir =0x80000000; // DATA OUT (iop-->dev)
break;
- case SCSI_DATA_NONE:
+ case DMA_NONE:
break;
- case SCSI_DATA_UNKNOWN:
+ case DMA_BIDIRECTIONAL:
scsidir =0x40000000; // DATA IN (iop<--dev)
// Assume In - and continue;
break;
/* Now fill in the SGList and command */
if(cmd->use_sg) {
struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer;
+ int sg_count = pci_map_sg(pHba->pDev, sg, cmd->use_sg,
+ cmd->sc_data_direction);
+
+
len = 0;
- for(i = 0 ; i < cmd->use_sg; i++) {
- *mptr++ = direction|0x10000000|sg->length;
- len+=sg->length;
- *mptr++ = virt_to_bus(sg->address);
+ for(i = 0 ; i < sg_count; i++) {
+ *mptr++ = direction|0x10000000|sg_dma_len(sg);
+ len+=sg_dma_len(sg);
+ *mptr++ = sg_dma_address(sg);
sg++;
}
/* Make this an end of list */
- mptr[-2] = direction|0xD0000000|(sg-1)->length;
+ mptr[-2] = direction|0xD0000000|sg_dma_len(sg-1);
reqlen = mptr - msg;
*lenptr = len;
reqlen = 12;
} else {
*mptr++ = 0xD0000000|direction|cmd->request_bufflen;
- *mptr++ = virt_to_bus(cmd->request_buffer);
+ *mptr++ = pci_map_single(pHba->pDev,
+ cmd->request_buffer,
+ cmd->request_bufflen,
+ cmd->sc_data_direction);
}
}
}
-static s32 adpt_scsi_register(adpt_hba* pHba,Scsi_Host_Template * sht)
+static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht)
{
struct Scsi_Host *host = NULL;
printk ("%s: scsi_register returned NULL\n",pHba->name);
return -1;
}
- (adpt_hba*)(host->hostdata[0]) = pHba;
+ host->hostdata[0] = (unsigned long)pHba;
pHba->host = host;
host->irq = pHba->pDev->irq;
}
-static s32 adpt_i2o_to_scsi(ulong reply, Scsi_Cmnd* cmd)
+static s32 adpt_i2o_to_scsi(ulong reply, struct scsi_cmnd* cmd)
{
adpt_hba* pHba;
u32 hba_status;
static s32 adpt_rescan(adpt_hba* pHba)
{
s32 rcode;
- ulong flags;
+ ulong flags = 0;
- spin_lock_irqsave(pHba->host->host_lock, flags);
+ if(pHba->host)
+ spin_lock_irqsave(pHba->host->host_lock, flags);
if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
goto out;
if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
goto out;
rcode = 0;
-out: spin_unlock_irqrestore(pHba->host->host_lock, flags);
+out: if(pHba->host)
+ spin_unlock_irqrestore(pHba->host->host_lock, flags);
return rcode;
}
static void adpt_fail_posted_scbs(adpt_hba* pHba)
{
- Scsi_Cmnd* cmd = NULL;
- Scsi_Device* d = NULL;
+ struct scsi_cmnd* cmd = NULL;
+ struct scsi_device* d = NULL;
shost_for_each_device(d, pHba->host) {
unsigned long flags;
static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
{
- u32 *msg;
+ u32 __iomem *msg;
ulong timeout = jiffies + 5*HZ;
while(m == EMPTY_QUEUE){
printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
return 2;
}
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
}
- msg = (u32*)(pHba->msg_addr_virt + m);
+ msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
writel( 0,&msg[2]);
static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
{
u8 *status;
- u32 *msg = NULL;
+ u32 __iomem *msg = NULL;
int i;
ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
u32* ptr;
printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
return -ETIMEDOUT;
}
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
} while(m == EMPTY_QUEUE);
- msg=(u32 *)(pHba->msg_addr_virt+m);
+ msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
status = kmalloc(4,GFP_KERNEL|ADDR32);
if (status==NULL) {
rmb();
if(time_after(jiffies,timeout)){
printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
- kfree((void*)status);
return -ETIMEDOUT;
}
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
} while (1);
// If the command was successful, fill the fifo with our reply
{
ulong timeout;
u32 m;
- u32 *msg;
+ u32 __iomem *msg;
u8 *status_block=NULL;
ulong status_block_bus;
pHba->name);
return -ETIMEDOUT;
}
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
} while(m==EMPTY_QUEUE);
- msg=(u32*)(pHba->msg_addr_virt+m);
+ msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
return -ETIMEDOUT;
}
rmb();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
}
// Set up our number of outbound and inbound messages
int group, int field, void *buf, int buflen)
{
u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
- u8 resblk[8+buflen]; /* 8 bytes for header */
+ u8 *resblk;
+
int size;
+ /* 8 bytes for header */
+ resblk = kmalloc(sizeof(u8) * (8+buflen), GFP_KERNEL|ADDR32);
+ if (resblk == NULL) {
+ printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
+ return -ENOMEM;
+ }
+
if (field == -1) /* whole group */
opblk[4] = -1;
size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
- opblk, sizeof(opblk), resblk, sizeof(resblk));
+ opblk, sizeof(opblk), resblk, sizeof(u8)*(8+buflen));
+ if (size == -ETIME) {
+ printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
+ return -ETIME;
+ } else if (size == -EINTR) {
+ printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
+ return -EINTR;
+ }
memcpy(buf, resblk+8, buflen); /* cut off header */
+ kfree(resblk);
if (size < 0)
return size;
msg[8] = virt_to_bus(resblk);
if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
+ printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk);
return wait_status; /* -DetailedStatus */
}
#endif
-static Scsi_Host_Template driver_template = {
+static struct scsi_host_template driver_template = {
.name = "dpt_i2o",
.proc_name = "dpt_i2o",
.proc_info = adpt_proc_info,