int irq;
unsigned long csr_base;
- unsigned char *csr_remap;
+ unsigned char __iomem *csr_remap;
unsigned long csr_len;
#ifdef CONFIG_MM_MAP_MEMORY
unsigned long mem_base;
- unsigned char *mem_remap;
+ unsigned char __iomem *mem_remap;
unsigned long mem_len;
#endif
* Whenever IO on the active page completes, the Ready page is activated
* and the ex-Active page is clean out and made Ready.
* Otherwise the Ready page is only activated when it becomes full, or
- * when mm_unplug_device is called via blk_run_queues().
+ * when mm_unplug_device is called via the unplug_io_fn.
*
* If a request arrives while both pages a full, it is queued, and b_rdev is
* overloaded to record whether it was a read or a write.
geo.start = get_start_sect(i->i_bdev);
geo.cylinders = size / (geo.heads * geo.sectors);
- if (copy_to_user((void *) arg, &geo, sizeof(geo)))
+ if (copy_to_user((void __user *) arg, &geo, sizeof(geo)))
return -EFAULT;
return 0;
}
goto failed_req_mem;
}
- if (!(card->mem_remap = (unsigned char *)ioremap(card->mem_base, cards->mem_len))) {
+ if (!(card->mem_remap = ioremap(card->mem_base, cards->mem_len))) {
printk(KERN_ERR "MM%d: Unable to remap memory region\n", card->card_number);
ret = -ENOMEM;
card->mm_pages[1].page_dma);
failed_magic:
#ifdef CONFIG_MM_MAP_MEMORY
- iounmap((void *) card->mem_remap);
+ iounmap(card->mem_remap);
failed_remap_mem:
release_mem_region(card->mem_base, card->mem_len);
failed_req_mem:
#endif
- iounmap((void *) card->csr_remap);
+ iounmap(card->csr_remap);
failed_remap_csr:
release_mem_region(card->csr_base, card->csr_len);
failed_req_csr: