X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fscsi%2Fdpt_i2o.c;h=365db537a28da81ddee1d7a1403a50ddc02a0383;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=77cf39f3b5c2abba4db8df69e6f5773143ef661c;hpb=9bf4aaab3e101692164d49b7ca357651eb691cb6;p=linux-2.6.git diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 77cf39f3b..365db537a 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -3,7 +3,6 @@ ------------------- begin : Thu Sep 7 2000 copyright : (C) 2000 by Adaptec - email : deanna_bonds@adaptec.com July 30, 2001 First version being submitted for inclusion in the kernel. V2.4 @@ -35,7 +34,6 @@ #define ADDR32 (0) -#include #include MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn"); @@ -48,7 +46,6 @@ MODULE_DESCRIPTION("Adaptec I2O RAID Driver"); #include #include /* for kmalloc() */ -#include /* for CONFIG_PCI */ #include /* for PCI support */ #include #include @@ -59,10 +56,12 @@ MODULE_DESCRIPTION("Adaptec I2O RAID Driver"); #include #include #include +#include #include #include #include +#include #include /* for boot_cpu_data */ #include @@ -108,13 +107,12 @@ static dpt_sig_S DPTI_sig = { *============================================================================ */ -DECLARE_MUTEX(adpt_configuration_lock); +static DEFINE_MUTEX(adpt_configuration_lock); static struct i2o_sys_tbl *sys_tbl = NULL; static int sys_tbl_ind = 0; static int sys_tbl_len = 0; -static adpt_hba* hbas[DPTI_MAX_HBA]; static adpt_hba* hba_chain = NULL; static int hba_count = 0; @@ -146,7 +144,7 @@ struct adpt_i2o_post_wait_data static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL; static u32 adpt_post_wait_id = 0; -static spinlock_t adpt_post_wait_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(adpt_post_wait_lock); /*============================================================================ @@ -186,7 +184,7 @@ static int adpt_detect(struct scsi_host_template* sht) PINFO("Detecting Adaptec I2O RAID controllers...\n"); /* search for all Adatpec I2O RAID cards */ - while ((pDev = pci_find_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) { + while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) { if(pDev->device == PCI_DPT_DEVICE_ID || pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){ if(adpt_install_hba(sht, pDev) ){ @@ -194,8 +192,11 @@ static int adpt_detect(struct scsi_host_template* sht) PERROR("Will not try to detect others.\n"); return hba_count-1; } + pci_dev_get(pDev); } } + if (pDev) + pci_dev_put(pDev); /* In INIT state, Activate IOPs */ for (pHba = hba_chain; pHba; pHba = pHba->next) { @@ -296,7 +297,7 @@ static void adpt_inquiry(adpt_hba* pHba) s32 rcode; memset(msg, 0, sizeof(msg)); - buf = (u8*)kmalloc(80,GFP_KERNEL|ADDR32); + buf = kmalloc(80,GFP_KERNEL|ADDR32); if(!buf){ printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name); return; @@ -385,7 +386,6 @@ static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) { adpt_hba* pHba = NULL; struct adpt_device* pDev = NULL; /* dpt per device information */ - ulong timeout = jiffies + (TMOUT_SCSI*HZ); cmd->scsi_done = done; /* @@ -421,11 +421,6 @@ static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) return 1; } - if(cmd->eh_state != SCSI_STATE_QUEUED){ - // If we are not doing error recovery - mod_timer(&cmd->eh_timeout, timeout); - } - // TODO if the cmd->device if offline then I may need to issue a bus rescan // followed by a get_lct to see if the device is there anymore if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) { @@ -546,13 +541,13 @@ static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, of */ // Find HBA (host bus adapter) we are looking for - down(&adpt_configuration_lock); + mutex_lock(&adpt_configuration_lock); for (pHba = hba_chain; pHba; pHba = pHba->next) { if (pHba->host == host) { break; /* found adapter */ } } - up(&adpt_configuration_lock); + mutex_unlock(&adpt_configuration_lock); if (pHba == NULL) { return 0; } @@ -669,7 +664,12 @@ static int adpt_abort(struct scsi_cmnd * cmd) msg[2] = 0; msg[3]= 0; msg[4] = (u32)cmd; - if( (rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER)) != 0){ + if (pHba->host) + spin_lock_irq(pHba->host->host_lock); + rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER); + if (pHba->host) + spin_unlock_irq(pHba->host->host_lock); + if (rcode != 0) { if(rcode == -EOPNOTSUPP ){ printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name); return FAILED; @@ -692,7 +692,7 @@ static int adpt_device_reset(struct scsi_cmnd* cmd) u32 msg[4]; u32 rcode; int old_state; - struct adpt_device* d = (void*) cmd->device->hostdata; + struct adpt_device* d = cmd->device->hostdata; pHba = (void*) cmd->device->host->hostdata[0]; printk(KERN_INFO"%s: Trying to reset device\n",pHba->name); @@ -706,10 +706,15 @@ static int adpt_device_reset(struct scsi_cmnd* cmd) msg[2] = 0; msg[3] = 0; + if (pHba->host) + spin_lock_irq(pHba->host->host_lock); old_state = d->state; d->state |= DPTI_DEV_RESET; - if( (rcode = adpt_i2o_post_wait(pHba, (void*)msg,sizeof(msg), FOREVER)) ){ - d->state = old_state; + rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER); + d->state = old_state; + if (pHba->host) + spin_unlock_irq(pHba->host->host_lock); + if (rcode != 0) { if(rcode == -EOPNOTSUPP ){ printk(KERN_INFO"%s: Device reset not supported\n",pHba->name); return FAILED; @@ -717,7 +722,6 @@ static int adpt_device_reset(struct scsi_cmnd* cmd) printk(KERN_INFO"%s: Device reset failed\n",pHba->name); return FAILED; } else { - d->state = old_state; printk(KERN_INFO"%s: Device reset successful\n",pHba->name); return SUCCESS; } @@ -730,6 +734,7 @@ static int adpt_bus_reset(struct scsi_cmnd* cmd) { adpt_hba* pHba; u32 msg[4]; + u32 rcode; pHba = (adpt_hba*)cmd->device->host->hostdata[0]; memset(msg, 0, sizeof(msg)); @@ -738,7 +743,12 @@ static int adpt_bus_reset(struct scsi_cmnd* cmd) msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid); msg[2] = 0; msg[3] = 0; - if(adpt_i2o_post_wait(pHba, (void*)msg,sizeof(msg), FOREVER) ){ + if (pHba->host) + spin_lock_irq(pHba->host->host_lock); + rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER); + if (pHba->host) + spin_unlock_irq(pHba->host->host_lock); + if (rcode != 0) { printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name); return FAILED; } else { @@ -748,7 +758,7 @@ static int adpt_bus_reset(struct scsi_cmnd* cmd) } // This version of reset is called by the eh_error_handler -static int adpt_reset(struct scsi_cmnd* cmd) +static int __adpt_reset(struct scsi_cmnd* cmd) { adpt_hba* pHba; int rcode; @@ -764,6 +774,17 @@ static int adpt_reset(struct scsi_cmnd* cmd) } } +static int adpt_reset(struct scsi_cmnd* cmd) +{ + int rc; + + spin_lock_irq(cmd->device->host->host_lock); + rc = __adpt_reset(cmd); + spin_unlock_irq(cmd->device->host->host_lock); + + return rc; +} + // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset static int adpt_hba_reset(adpt_hba* pHba) { @@ -814,7 +835,7 @@ static int adpt_hba_reset(adpt_hba* pHba) static void adpt_i2o_sys_shutdown(void) { adpt_hba *pHba, *pNext; - struct adpt_i2o_post_wait_data *p1, *p2; + struct adpt_i2o_post_wait_data *p1, *old; printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n"); printk(KERN_INFO" This could take a few minutes if there are many devices attached\n"); @@ -828,13 +849,14 @@ static void adpt_i2o_sys_shutdown(void) } /* Remove any timedout entries from the wait queue. */ - p2 = NULL; // spin_lock_irqsave(&adpt_post_wait_lock, flags); /* Nothing should be outstanding at this point so just * free them */ - for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p2->next) { - kfree(p1); + for(p1 = adpt_post_wait_queue; p1;) { + old = p1; + p1 = p1->next; + kfree(old); } // spin_unlock_irqrestore(&adpt_post_wait_lock, flags); adpt_post_wait_queue = NULL; @@ -872,18 +894,23 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev ulong base_addr1_phys = 0; u32 hba_map0_area_size = 0; u32 hba_map1_area_size = 0; - ulong base_addr_virt = 0; - ulong msg_addr_virt = 0; + void __iomem *base_addr_virt = NULL; + void __iomem *msg_addr_virt = NULL; int raptorFlag = FALSE; - int i; if(pci_enable_device(pDev)) { return -EINVAL; } + + if (pci_request_regions(pDev, "dpt_i2o")) { + PERROR("dpti: adpt_config_hba: pci request region failed\n"); + return -EINVAL; + } + pci_set_master(pDev); - if (pci_set_dma_mask(pDev, 0xffffffffffffffffULL) && - pci_set_dma_mask(pDev, 0xffffffffULL)) + if (pci_set_dma_mask(pDev, DMA_64BIT_MASK) && + pci_set_dma_mask(pDev, DMA_32BIT_MASK)) return -EINVAL; base_addr0_phys = pci_resource_start(pDev,0); @@ -906,18 +933,19 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev raptorFlag = TRUE; } - - base_addr_virt = (ulong)ioremap(base_addr0_phys,hba_map0_area_size); - if(base_addr_virt == 0) { + base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size); + if (!base_addr_virt) { + pci_release_regions(pDev); PERROR("dpti: adpt_config_hba: io remap failed\n"); return -EINVAL; } if(raptorFlag == TRUE) { - msg_addr_virt = (ulong)ioremap(base_addr1_phys, hba_map1_area_size ); - if(msg_addr_virt == 0) { + msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size ); + if (!msg_addr_virt) { PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n"); - iounmap((void*)base_addr_virt); + iounmap(base_addr_virt); + pci_release_regions(pDev); return -EINVAL; } } else { @@ -928,20 +956,15 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev pHba = kmalloc(sizeof(adpt_hba), GFP_KERNEL); if( pHba == NULL) { if(msg_addr_virt != base_addr_virt){ - iounmap((void*)msg_addr_virt); + iounmap(msg_addr_virt); } - iounmap((void*)base_addr_virt); + iounmap(base_addr_virt); + pci_release_regions(pDev); return -ENOMEM; } memset(pHba, 0, sizeof(adpt_hba)); - down(&adpt_configuration_lock); - for(i=0;inext; p = p->next); @@ -951,20 +974,20 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev } pHba->next = NULL; pHba->unit = hba_count; - sprintf(pHba->name, "dpti%d", i); + sprintf(pHba->name, "dpti%d", hba_count); hba_count++; - up(&adpt_configuration_lock); + mutex_unlock(&adpt_configuration_lock); pHba->pDev = pDev; pHba->base_addr_phys = base_addr0_phys; // Set up the Virtual Base Address of the I2O Device pHba->base_addr_virt = base_addr_virt; - pHba->msg_addr_virt = msg_addr_virt; - pHba->irq_mask = (ulong)(base_addr_virt+0x30); - pHba->post_port = (ulong)(base_addr_virt+0x40); - pHba->reply_port = (ulong)(base_addr_virt+0x44); + pHba->msg_addr_virt = msg_addr_virt; + pHba->irq_mask = base_addr_virt+0x30; + pHba->post_port = base_addr_virt+0x40; + pHba->reply_port = base_addr_virt+0x44; pHba->hrt = NULL; pHba->lct = NULL; @@ -980,15 +1003,15 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev spin_lock_init(&adpt_post_wait_lock); if(raptorFlag == 0){ - printk(KERN_INFO"Adaptec I2O RAID controller %d at %lx size=%x irq=%d\n", + printk(KERN_INFO"Adaptec I2O RAID controller %d at %p size=%x irq=%d\n", hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq); } else { printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq); - printk(KERN_INFO" BAR0 %lx - size= %x\n",base_addr_virt,hba_map0_area_size); - printk(KERN_INFO" BAR1 %lx - size= %x\n",msg_addr_virt,hba_map1_area_size); + printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size); + printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size); } - if (request_irq (pDev->irq, adpt_isr, SA_SHIRQ, pHba->name, pHba)) { + if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) { printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq); adpt_i2o_delete_hba(pHba); return -EINVAL; @@ -1010,17 +1033,12 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba) struct adpt_device* pNext; - down(&adpt_configuration_lock); + mutex_lock(&adpt_configuration_lock); // scsi_unregister calls our adpt_release which // does a quiese if(pHba->host){ free_irq(pHba->host->irq, pHba); } - for(i=0;inext){ if(p1 == pHba) { @@ -1034,24 +1052,17 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba) } hba_count--; - up(&adpt_configuration_lock); + mutex_unlock(&adpt_configuration_lock); - iounmap((void*)pHba->base_addr_virt); + iounmap(pHba->base_addr_virt); + pci_release_regions(pHba->pDev); if(pHba->msg_addr_virt != pHba->base_addr_virt){ - iounmap((void*)pHba->msg_addr_virt); - } - if(pHba->hrt) { - kfree(pHba->hrt); - } - if(pHba->lct){ - kfree(pHba->lct); - } - if(pHba->status_block) { - kfree(pHba->status_block); - } - if(pHba->reply_pool){ - kfree(pHba->reply_pool); + iounmap(pHba->msg_addr_virt); } + kfree(pHba->hrt); + kfree(pHba->lct); + kfree(pHba->status_block); + kfree(pHba->reply_pool); for(d = pHba->devices; d ; d = next){ next = d->next; @@ -1067,6 +1078,7 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba) } } } + pci_dev_put(pHba->pDev); kfree(pHba); if(hba_count <= 0){ @@ -1077,12 +1089,7 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba) static int adpt_init(void) { - int i; - printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n"); - for (i = 0; i < DPTI_MAX_HBA; i++) { - hbas[i] = NULL; - } #ifdef REBOOT_NOTIFIER register_reboot_notifier(&adpt_reboot_notifier); #endif @@ -1134,11 +1141,11 @@ static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout) struct adpt_i2o_post_wait_data *p1, *p2; struct adpt_i2o_post_wait_data *wait_data = kmalloc(sizeof(struct adpt_i2o_post_wait_data),GFP_KERNEL); - adpt_wait_queue_t wait; + DECLARE_WAITQUEUE(wait, current); - if(!wait_data){ + if (!wait_data) return -ENOMEM; - } + /* * The spin locking is needed to keep anyone from playing * with the queue pointers and id while we do the same @@ -1156,12 +1163,7 @@ static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout) wait_data->wq = &adpt_wq_i2o_post; wait_data->status = -ETIMEDOUT; - // this code is taken from kernel/sched.c:interruptible_sleep_on_timeout - wait.task = current; - init_waitqueue_entry(&wait, current); - spin_lock_irqsave(&adpt_wq_i2o_post.lock, flags); - __add_wait_queue(&adpt_wq_i2o_post, &wait); - spin_unlock(&adpt_wq_i2o_post.lock); + add_wait_queue(&adpt_wq_i2o_post, &wait); msg[2] |= 0x80000000 | ((u32)wait_data->id); timeout *= HZ; @@ -1179,14 +1181,11 @@ static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout) // dangerous. status = -ETIME; } - schedule_timeout(timeout*HZ); } if(pHba->host) spin_lock_irq(pHba->host->host_lock); } - spin_lock_irq(&adpt_wq_i2o_post.lock); - __remove_wait_queue(&adpt_wq_i2o_post, &wait); - spin_unlock_irqrestore(&adpt_wq_i2o_post.lock, flags); + remove_wait_queue(&adpt_wq_i2o_post, &wait); if(status == -ETIMEDOUT){ printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit); @@ -1222,7 +1221,7 @@ static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len) { u32 m = EMPTY_QUEUE; - u32 *msg; + u32 __iomem *msg; ulong timeout = jiffies + 30*HZ; do { rmb(); @@ -1234,11 +1233,10 @@ static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len) printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit); return -ETIMEDOUT; } - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(1); + schedule_timeout_uninterruptible(1); } while(m == EMPTY_QUEUE); - msg = (u32*) (pHba->msg_addr_virt + m); + msg = pHba->msg_addr_virt + m; memcpy_toio(msg, data, len); wmb(); @@ -1310,11 +1308,10 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba) printk(KERN_WARNING"Timeout waiting for message!\n"); return -ETIMEDOUT; } - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(1); + schedule_timeout_uninterruptible(1); } while (m == EMPTY_QUEUE); - status = (u8*)kmalloc(4, GFP_KERNEL|ADDR32); + status = kmalloc(4, GFP_KERNEL|ADDR32); if(status == NULL) { adpt_send_nop(pHba, m); printk(KERN_ERR"IOP reset failed - no free memory.\n"); @@ -1343,8 +1340,7 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba) return -ETIMEDOUT; } rmb(); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(1); + schedule_timeout_uninterruptible(1); } if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) { @@ -1361,8 +1357,7 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba) printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name); return -ETIMEDOUT; } - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(1); + schedule_timeout_uninterruptible(1); } while (m == EMPTY_QUEUE); // Flush the offset adpt_send_nop(pHba, m); @@ -1449,14 +1444,14 @@ static int adpt_i2o_parse_lct(adpt_hba* pHba) } continue; } - d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL); + d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL); if(d==NULL) { printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name); return -ENOMEM; } - d->controller = (void*)pHba; + d->controller = pHba; d->next = NULL; memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry)); @@ -1561,7 +1556,7 @@ static int adpt_i2o_parse_lct(adpt_hba* pHba) static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d) { - down(&adpt_configuration_lock); + mutex_lock(&adpt_configuration_lock); d->controller=pHba; d->owner=NULL; d->next=pHba->devices; @@ -1572,7 +1567,7 @@ static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d) pHba->devices=d; *d->dev_name = 0; - up(&adpt_configuration_lock); + mutex_unlock(&adpt_configuration_lock); return 0; } @@ -1587,24 +1582,24 @@ static int adpt_open(struct inode *inode, struct file *file) if (minor >= hba_count) { return -ENXIO; } - down(&adpt_configuration_lock); + mutex_lock(&adpt_configuration_lock); for (pHba = hba_chain; pHba; pHba = pHba->next) { if (pHba->unit == minor) { break; /* found adapter */ } } if (pHba == NULL) { - up(&adpt_configuration_lock); + mutex_unlock(&adpt_configuration_lock); return -ENXIO; } // if(pHba->in_use){ - // up(&adpt_configuration_lock); + // mutex_unlock(&adpt_configuration_lock); // return -EBUSY; // } pHba->in_use = 1; - up(&adpt_configuration_lock); + mutex_unlock(&adpt_configuration_lock); return 0; } @@ -1618,13 +1613,13 @@ static int adpt_close(struct inode *inode, struct file *file) if (minor >= hba_count) { return -ENXIO; } - down(&adpt_configuration_lock); + mutex_lock(&adpt_configuration_lock); for (pHba = hba_chain; pHba; pHba = pHba->next) { if (pHba->unit == minor) { break; /* found adapter */ } } - up(&adpt_configuration_lock); + mutex_unlock(&adpt_configuration_lock); if (pHba == NULL) { return -ENXIO; } @@ -1827,9 +1822,9 @@ static int adpt_system_info(void __user *buffer) memset(&si, 0, sizeof(si)); si.osType = OS_LINUX; - si.osMajorVersion = (u8) (LINUX_VERSION_CODE >> 16); - si.osMinorVersion = (u8) (LINUX_VERSION_CODE >> 8 & 0x0ff); - si.osRevision = (u8) (LINUX_VERSION_CODE & 0x0ff); + si.osMajorVersion = 0; + si.osMinorVersion = 0; + si.osRevision = 0; si.busType = SI_PCI_BUS; si.processorFamily = DPTI_sig.dsProcessorFamily; @@ -1922,22 +1917,19 @@ static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, if (minor >= DPTI_MAX_HBA){ return -ENXIO; } - down(&adpt_configuration_lock); + mutex_lock(&adpt_configuration_lock); for (pHba = hba_chain; pHba; pHba = pHba->next) { if (pHba->unit == minor) { break; /* found adapter */ } } - up(&adpt_configuration_lock); + mutex_unlock(&adpt_configuration_lock); if(pHba == NULL){ return -ENXIO; } - while((volatile u32) pHba->state & DPTI_STATE_RESET ) { - set_task_state(current,TASK_UNINTERRUPTIBLE); - schedule_timeout(2); - - } + while((volatile u32) pHba->state & DPTI_STATE_RESET ) + schedule_timeout_uninterruptible(2); switch (cmd) { // TODO: handle 3 cases @@ -1997,12 +1989,12 @@ static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, } -static irqreturn_t adpt_isr(int irq, void *dev_id, struct pt_regs *regs) +static irqreturn_t adpt_isr(int irq, void *dev_id) { struct scsi_cmnd* cmd; adpt_hba* pHba = dev_id; u32 m; - ulong reply; + void __iomem *reply; u32 status=0; u32 context; ulong flags = 0; @@ -2027,11 +2019,11 @@ static irqreturn_t adpt_isr(int irq, void *dev_id, struct pt_regs *regs) goto out; } } - reply = (ulong)bus_to_virt(m); + reply = bus_to_virt(m); if (readl(reply) & MSG_FAIL) { u32 old_m = readl(reply+28); - ulong msg; + void __iomem *msg; u32 old_context; PDEBUG("%s: Failed message\n",pHba->name); if(old_m >= 0x100000){ @@ -2040,16 +2032,16 @@ static irqreturn_t adpt_isr(int irq, void *dev_id, struct pt_regs *regs) continue; } // Transaction context is 0 in failed reply frame - msg = (ulong)(pHba->msg_addr_virt + old_m); + msg = pHba->msg_addr_virt + old_m; old_context = readl(msg+12); writel(old_context, reply+12); adpt_send_nop(pHba, old_m); } context = readl(reply+8); if(context & 0x40000000){ // IOCTL - ulong p = (ulong)(readl(reply+12)); - if( p != 0) { - memcpy((void*)p, (void*)reply, REPLY_FRAME_SIZE * 4); + void *p = (void *)readl(reply+12); + if( p != NULL) { + memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4); } // All IOCTLs will also be post wait } @@ -2220,7 +2212,7 @@ static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht) */ host->io_port = 0; host->n_io_port = 0; - /* see comments in hosts.h */ + /* see comments in scsi_host.h */ host->max_id = 16; host->max_lun = 256; host->max_channel = pHba->top_scsi_channel + 1; @@ -2233,7 +2225,7 @@ static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht) } -static s32 adpt_i2o_to_scsi(ulong reply, struct scsi_cmnd* cmd) +static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd) { adpt_hba* pHba; u32 hba_status; @@ -2325,7 +2317,7 @@ static s32 adpt_i2o_to_scsi(ulong reply, struct scsi_cmnd* cmd) u32 len = sizeof(cmd->sense_buffer); len = (len > 40) ? 40 : len; // Copy over the sense data - memcpy(cmd->sense_buffer, (void*)(reply+28) , len); + memcpy_fromio(cmd->sense_buffer, (reply+28) , len); if(cmd->sense_buffer[0] == 0x70 /* class 7 */ && cmd->sense_buffer[2] == DATA_PROTECT ){ /* This is to handle an array failed */ @@ -2433,14 +2425,14 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba) pDev = pDev->next_lun; } if(!pDev ) { // Something new add it - d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL); + d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL); if(d==NULL) { printk(KERN_CRIT "Out of memory for I2O device data.\n"); return -ENOMEM; } - d->controller = (void*)pHba; + d->controller = pHba; d->next = NULL; memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry)); @@ -2638,7 +2630,7 @@ static int adpt_i2o_online_hba(adpt_hba* pHba) static s32 adpt_send_nop(adpt_hba*pHba,u32 m) { - u32 *msg; + u32 __iomem *msg; ulong timeout = jiffies + 5*HZ; while(m == EMPTY_QUEUE){ @@ -2651,10 +2643,9 @@ static s32 adpt_send_nop(adpt_hba*pHba,u32 m) printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name); return 2; } - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(1); + schedule_timeout_uninterruptible(1); } - msg = (u32*)(pHba->msg_addr_virt + m); + msg = (u32 __iomem *)(pHba->msg_addr_virt + m); writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]); writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]); writel( 0,&msg[2]); @@ -2668,7 +2659,7 @@ static s32 adpt_send_nop(adpt_hba*pHba,u32 m) static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba) { u8 *status; - u32 *msg = NULL; + u32 __iomem *msg = NULL; int i; ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ; u32* ptr; @@ -2686,11 +2677,10 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba) printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name); return -ETIMEDOUT; } - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(1); + schedule_timeout_uninterruptible(1); } while(m == EMPTY_QUEUE); - msg=(u32 *)(pHba->msg_addr_virt+m); + msg=(u32 __iomem *)(pHba->msg_addr_virt+m); status = kmalloc(4,GFP_KERNEL|ADDR32); if (status==NULL) { @@ -2725,23 +2715,20 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba) printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name); return -ETIMEDOUT; } - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(1); + schedule_timeout_uninterruptible(1); } while (1); // If the command was successful, fill the fifo with our reply // message packets if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) { - kfree((void*)status); + kfree(status); return -2; } - kfree((void*)status); + kfree(status); - if(pHba->reply_pool != NULL){ - kfree(pHba->reply_pool); - } + kfree(pHba->reply_pool); - pHba->reply_pool = (u32*)kmalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32); + pHba->reply_pool = kmalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32); if(!pHba->reply_pool){ printk(KERN_ERR"%s: Could not allocate reply pool\n",pHba->name); return -1; @@ -2775,7 +2762,7 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba) { ulong timeout; u32 m; - u32 *msg; + u32 __iomem *msg; u8 *status_block=NULL; ulong status_block_bus; @@ -2804,12 +2791,11 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba) pHba->name); return -ETIMEDOUT; } - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(1); + schedule_timeout_uninterruptible(1); } while(m==EMPTY_QUEUE); - msg=(u32*)(pHba->msg_addr_virt+m); + msg=(u32 __iomem *)(pHba->msg_addr_virt+m); writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]); writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]); @@ -2832,8 +2818,7 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba) return -ETIMEDOUT; } rmb(); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(1); + schedule_timeout_uninterruptible(1); } // Set up our number of outbound and inbound messages @@ -2957,8 +2942,7 @@ static int adpt_i2o_build_sys_table(void) sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs (hba_count) * sizeof(struct i2o_sys_tbl_entry); - if(sys_tbl) - kfree(sys_tbl); + kfree(sys_tbl); sys_tbl = kmalloc(sys_tbl_len, GFP_KERNEL|ADDR32); if(!sys_tbl) { @@ -2987,8 +2971,8 @@ static int adpt_i2o_build_sys_table(void) sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size; sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ?? sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities; - sys_tbl->iops[count].inbound_low = (u32)virt_to_bus((void*)pHba->post_port); - sys_tbl->iops[count].inbound_high = (u32)((u64)virt_to_bus((void*)pHba->post_port)>>32); + sys_tbl->iops[count].inbound_low = (u32)virt_to_bus(pHba->post_port); + sys_tbl->iops[count].inbound_high = (u32)((u64)virt_to_bus(pHba->post_port)>>32); count++; }