pte_t *pte;
int err = -ENOMEM;
- spin_lock(&init_mm.page_table_lock);
-
/* Use upper 10 bits of VA to index the first level map */
pge = pgd_offset_k(va);
pue = pud_offset(pge, va);
pme = pmd_offset(pue, va);
/* Use middle 10 bits of VA to index the second-level map */
- pte = pte_alloc_kernel(&init_mm, pme, va);
+ pte = pte_alloc_kernel(pme, va);
if (pte != 0) {
err = 0;
set_pte(pte, mk_pte_phys(pa & PAGE_MASK, prot));
}
- spin_unlock(&init_mm.page_table_lock);
return err;
}
* portions of the kernel with single large page TLB entries, and
* still get unique uncached pages for consistent DMA.
*/
-void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
+void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
{
struct vm_struct *area;
unsigned long page, va, pa;
*/
if (order > 0) {
struct page *rpage = virt_to_page(page);
-
- for (i = 1; i < (1 << order); i++)
- set_page_count(rpage + i, 1);
+ split_page(rpage, order);
}
err = 0;