X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Finfiniband%2Fhw%2Fipath%2Fipath_user_pages.c;h=311ec3bea21d89f4f565e9fa27468db6a65719dc;hb=refs%2Fheads%2Fvserver;hp=f4bf9c7e5b174f0bb6f1e4fac91b721de733b39d;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c index f4bf9c7e5..311ec3bea 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_pages.c +++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c @@ -91,6 +91,62 @@ bail: return ret; } +/** + * ipath_map_page - a safety wrapper around pci_map_page() + * + * A dma_addr of all 0's is interpreted by the chip as "disabled". + * Unfortunately, it can also be a valid dma_addr returned on some + * architectures. + * + * The powerpc iommu assigns dma_addrs in ascending order, so we don't + * have to bother with retries or mapping a dummy page to insure we + * don't just get the same mapping again. + * + * I'm sure we won't be so lucky with other iommu's, so FIXME. + */ +dma_addr_t ipath_map_page(struct pci_dev *hwdev, struct page *page, + unsigned long offset, size_t size, int direction) +{ + dma_addr_t phys; + + phys = pci_map_page(hwdev, page, offset, size, direction); + + if (phys == 0) { + pci_unmap_page(hwdev, phys, size, direction); + phys = pci_map_page(hwdev, page, offset, size, direction); + /* + * FIXME: If we get 0 again, we should keep this page, + * map another, then free the 0 page. + */ + } + + return phys; +} + +/** + * ipath_map_single - a safety wrapper around pci_map_single() + * + * Same idea as ipath_map_page(). + */ +dma_addr_t ipath_map_single(struct pci_dev *hwdev, void *ptr, size_t size, + int direction) +{ + dma_addr_t phys; + + phys = pci_map_single(hwdev, ptr, size, direction); + + if (phys == 0) { + pci_unmap_single(hwdev, phys, size, direction); + phys = pci_map_single(hwdev, ptr, size, direction); + /* + * FIXME: If we get 0 again, we should keep this page, + * map another, then free the 0 page. + */ + } + + return phys; +} + /** * ipath_get_user_pages - lock user pages into memory * @start_page: the start page @@ -160,9 +216,10 @@ struct ipath_user_pages_work { unsigned long num_pages; }; -static void user_pages_account(void *ptr) +static void user_pages_account(struct work_struct *_work) { - struct ipath_user_pages_work *work = ptr; + struct ipath_user_pages_work *work = + container_of(_work, struct ipath_user_pages_work, work); down_write(&work->mm->mmap_sem); vx_vmlocked_sub(work->mm, work->num_pages); @@ -188,7 +245,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages) goto bail; - INIT_WORK(&work->work, user_pages_account, work); + INIT_WORK(&work->work, user_pages_account); work->mm = mm; work->num_pages = num_pages;