X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fppc%2Fkernel%2Fdma-mapping.c;h=61465ec88bc7268b0325001bac308c28d1db8588;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=e0c631cf96b039429a24c609284c9531bef076c8;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/arch/ppc/kernel/dma-mapping.c b/arch/ppc/kernel/dma-mapping.c index e0c631cf9..61465ec88 100644 --- a/arch/ppc/kernel/dma-mapping.c +++ b/arch/ppc/kernel/dma-mapping.c @@ -115,7 +115,7 @@ static struct vm_region consistent_head = { }; static struct vm_region * -vm_region_alloc(struct vm_region *head, size_t size, int gfp) +vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) { unsigned long addr = head->vm_start, end = head->vm_end - size; unsigned long flags; @@ -173,7 +173,7 @@ static struct vm_region *vm_region_find(struct vm_region *head, unsigned long ad * virtual and bus address for that space. */ void * -__dma_alloc_coherent(size_t size, dma_addr_t *handle, int gfp) +__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) { struct page *page; struct vm_region *c; @@ -223,6 +223,8 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, int gfp) pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr); struct page *end = page + (1 << order); + split_page(page, order); + /* * Set the "dma handle" */ @@ -231,7 +233,6 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, int gfp) do { BUG_ON(!pte_none(*pte)); - set_page_count(page, 1); SetPageReserved(page); set_pte_at(&init_mm, vaddr, pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL))); @@ -244,7 +245,6 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, int gfp) * Free the otherwise unused pages. */ while (page < end) { - set_page_count(page, 1); __free_page(page); page++; } @@ -335,8 +335,6 @@ static int __init dma_alloc_init(void) pte_t *pte; int ret = 0; - spin_lock(&init_mm.page_table_lock); - do { pgd = pgd_offset(&init_mm, CONSISTENT_BASE); pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE); @@ -347,7 +345,7 @@ static int __init dma_alloc_init(void) } WARN_ON(!pmd_none(*pmd)); - pte = pte_alloc_kernel(&init_mm, pmd, CONSISTENT_BASE); + pte = pte_alloc_kernel(pmd, CONSISTENT_BASE); if (!pte) { printk(KERN_ERR "%s: no pte tables\n", __func__); ret = -ENOMEM; @@ -357,8 +355,6 @@ static int __init dma_alloc_init(void) consistent_pte = pte; } while (0); - spin_unlock(&init_mm.page_table_lock); - return ret; } @@ -393,7 +389,7 @@ EXPORT_SYMBOL(__dma_sync); * __dma_sync_page() implementation for systems using highmem. * In this case, each page of a buffer must be kmapped/kunmapped * in order to have a virtual address for __dma_sync(). This must - * not sleep so kmap_atmomic()/kunmap_atomic() are used. + * not sleep so kmap_atomic()/kunmap_atomic() are used. * * Note: yes, it is possible and correct to have a buffer extend * beyond the first page. @@ -401,10 +397,10 @@ EXPORT_SYMBOL(__dma_sync); static inline void __dma_sync_page_highmem(struct page *page, unsigned long offset, size_t size, int direction) { - size_t seg_size = min((size_t)PAGE_SIZE, size) - offset; + size_t seg_size = min((size_t)(PAGE_SIZE - offset), size); size_t cur_size = seg_size; unsigned long flags, start, seg_offset = offset; - int nr_segs = PAGE_ALIGN(size + (PAGE_SIZE - offset))/PAGE_SIZE; + int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE; int seg_nr = 0; local_irq_save(flags);