#include <linux/mm.h>
#include <linux/dma-mapping.h>
+#include <linux/vs_memory.h>
#include "uverbs.h"
int i;
list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
- dma_unmap_sg(dev->dma_device, chunk->page_list,
- chunk->nents, DMA_BIDIRECTIONAL);
+ ib_dma_unmap_sg(dev, chunk->page_list,
+ chunk->nents, DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->nents; ++i) {
if (umem->writable && dirty)
set_page_dirty_lock(chunk->page_list[i].page);
chunk->page_list[i].length = PAGE_SIZE;
}
- chunk->nmap = dma_map_sg(dev->dma_device,
- &chunk->page_list[0],
- chunk->nents,
- DMA_BIDIRECTIONAL);
+ chunk->nmap = ib_dma_map_sg(dev,
+ &chunk->page_list[0],
+ chunk->nents,
+ DMA_BIDIRECTIONAL);
if (chunk->nmap <= 0) {
for (i = 0; i < chunk->nents; ++i)
put_page(chunk->page_list[i].page);
if (ret < 0)
__ib_umem_release(dev, mem, 0);
else
- current->mm->locked_vm = locked;
+ vx_vmlocked_sub(current->mm, current->mm->locked_vm - locked);
up_write(¤t->mm->mmap_sem);
free_page((unsigned long) page_list);
__ib_umem_release(dev, umem, 1);
down_write(¤t->mm->mmap_sem);
- current->mm->locked_vm -=
- PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
+ vx_vmlocked_sub(current->mm,
+ PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT);
up_write(¤t->mm->mmap_sem);
}
-static void ib_umem_account(void *work_ptr)
+static void ib_umem_account(struct work_struct *_work)
{
- struct ib_umem_account_work *work = work_ptr;
+ struct ib_umem_account_work *work =
+ container_of(_work, struct ib_umem_account_work, work);
down_write(&work->mm->mmap_sem);
- work->mm->locked_vm -= work->diff;
+ vx_vmlocked_sub(work->mm, work->diff);
up_write(&work->mm->mmap_sem);
mmput(work->mm);
kfree(work);
return;
}
- INIT_WORK(&work->work, ib_umem_account, work);
+ INIT_WORK(&work->work, ib_umem_account);
work->mm = mm;
work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;