fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / drivers / infiniband / core / uverbs_mem.c
index efe147d..4d65a4b 100644 (file)
@@ -36,6 +36,7 @@
 
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
+#include <linux/vs_memory.h>
 
 #include "uverbs.h"
 
@@ -52,8 +53,8 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
        int i;
 
        list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
-               dma_unmap_sg(dev->dma_device, chunk->page_list,
-                            chunk->nents, DMA_BIDIRECTIONAL);
+               ib_dma_unmap_sg(dev, chunk->page_list,
+                               chunk->nents, DMA_BIDIRECTIONAL);
                for (i = 0; i < chunk->nents; ++i) {
                        if (umem->writable && dirty)
                                set_page_dirty_lock(chunk->page_list[i].page);
@@ -136,10 +137,10 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
                                chunk->page_list[i].length = PAGE_SIZE;
                        }
 
-                       chunk->nmap = dma_map_sg(dev->dma_device,
-                                                &chunk->page_list[0],
-                                                chunk->nents,
-                                                DMA_BIDIRECTIONAL);
+                       chunk->nmap = ib_dma_map_sg(dev,
+                                                   &chunk->page_list[0],
+                                                   chunk->nents,
+                                                   DMA_BIDIRECTIONAL);
                        if (chunk->nmap <= 0) {
                                for (i = 0; i < chunk->nents; ++i)
                                        put_page(chunk->page_list[i].page);
@@ -161,7 +162,7 @@ out:
        if (ret < 0)
                __ib_umem_release(dev, mem, 0);
        else
-               current->mm->locked_vm = locked;
+               vx_vmlocked_sub(current->mm, current->mm->locked_vm - locked);
 
        up_write(&current->mm->mmap_sem);
        free_page((unsigned long) page_list);
@@ -174,17 +175,18 @@ void ib_umem_release(struct ib_device *dev, struct ib_umem *umem)
        __ib_umem_release(dev, umem, 1);
 
        down_write(&current->mm->mmap_sem);
-       current->mm->locked_vm -=
-               PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
+       vx_vmlocked_sub(current->mm,
+               PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT);
        up_write(&current->mm->mmap_sem);
 }
 
-static void ib_umem_account(void *work_ptr)
+static void ib_umem_account(struct work_struct *_work)
 {
-       struct ib_umem_account_work *work = work_ptr;
+       struct ib_umem_account_work *work =
+               container_of(_work, struct ib_umem_account_work, work);
 
        down_write(&work->mm->mmap_sem);
-       work->mm->locked_vm -= work->diff;
+       vx_vmlocked_sub(work->mm, work->diff);
        up_write(&work->mm->mmap_sem);
        mmput(work->mm);
        kfree(work);
@@ -216,7 +218,7 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem)
                return;
        }
 
-       INIT_WORK(&work->work, ib_umem_account, work);
+       INIT_WORK(&work->work, ib_umem_account);
        work->mm   = mm;
        work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;