X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Farm%2Fmm%2Fconsistent.c;h=6a9c362fef5e241969c42c6133456fb901ce31a2;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=915cee5c1a21b9a8f57735ca051f4f3c85f3a2e8;hpb=6a77f38946aaee1cd85eeec6cf4229b204c15071;p=linux-2.6.git diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c index 915cee5c1..6a9c362fe 100644 --- a/arch/arm/mm/consistent.c +++ b/arch/arm/mm/consistent.c @@ -18,18 +18,28 @@ #include #include +#include #include -#include #include +#include + +/* Sanity check size */ +#if (CONSISTENT_DMA_SIZE % SZ_2M) +#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" +#endif -#define CONSISTENT_BASE (0xffc00000) #define CONSISTENT_END (0xffe00000) +#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE) + #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) +#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) +#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) + /* - * This is the page table (2MB) covering uncached, DMA consistent allocations + * These are the page tables (2MB each) covering uncached, DMA consistent allocations */ -static pte_t *consistent_pte; +static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; static DEFINE_SPINLOCK(consistent_lock); /* @@ -66,6 +76,7 @@ struct vm_region { unsigned long vm_start; unsigned long vm_end; struct page *vm_pages; + int vm_active; }; static struct vm_region consistent_head = { @@ -75,7 +86,7 @@ static struct vm_region consistent_head = { }; static struct vm_region * -vm_region_alloc(struct vm_region *head, size_t size, int gfp) +vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) { unsigned long addr = head->vm_start, end = head->vm_end - size; unsigned long flags; @@ -104,6 +115,7 @@ vm_region_alloc(struct vm_region *head, size_t size, int gfp) list_add_tail(&new->vm_list, &c->vm_list); new->vm_start = addr; new->vm_end = addr + size; + new->vm_active = 1; spin_unlock_irqrestore(&consistent_lock, flags); return new; @@ -120,7 +132,7 @@ static struct vm_region *vm_region_find(struct vm_region *head, unsigned long ad struct vm_region *c; list_for_each_entry(c, &head->vm_list, vm_list) { - if (c->vm_start == addr) + if (c->vm_active && c->vm_start == addr) goto out; } c = NULL; @@ -133,7 +145,7 @@ static struct vm_region *vm_region_find(struct vm_region *head, unsigned long ad #endif static void * -__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, int gfp, +__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, pgprot_t prot) { struct page *page; @@ -141,7 +153,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, int gfp, unsigned long order; u64 mask = ISA_DMA_THRESHOLD, limit; - if (!consistent_pte) { + if (!consistent_pte[0]) { printk(KERN_ERR "%s: not initialised\n", __func__); dump_stack(); return NULL; @@ -204,11 +216,16 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, int gfp, c = vm_region_alloc(&consistent_head, size, gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); if (c) { - pte_t *pte = consistent_pte + CONSISTENT_OFFSET(c->vm_start); + pte_t *pte; struct page *end = page + (1 << order); + int idx = CONSISTENT_PTE_INDEX(c->vm_start); + u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); + pte = consistent_pte[idx] + off; c->vm_pages = page; + split_page(page, order); + /* * Set the "dma handle" */ @@ -217,21 +234,24 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, int gfp, do { BUG_ON(!pte_none(*pte)); - set_page_count(page, 1); /* * x86 does not mark the pages reserved... */ SetPageReserved(page); - set_pte(pte, mk_pte(page, prot)); + set_pte_ext(pte, mk_pte(page, prot), 0); page++; pte++; + off++; + if (off >= PTRS_PER_PTE) { + off = 0; + pte = consistent_pte[++idx]; + } } while (size -= PAGE_SIZE); /* * Free the otherwise unused pages. */ while (page < end) { - set_page_count(page, 1); __free_page(page); page++; } @@ -251,8 +271,19 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, int gfp, * virtual and bus address for that space. */ void * -dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp) +dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) { + if (arch_is_coherent()) { + void *virt; + + virt = kmalloc(size, gfp); + if (!virt) + return NULL; + *handle = virt_to_dma(dev, virt); + + return virt; + } + return __dma_alloc(dev, size, handle, gfp, pgprot_noncached(pgprot_kernel)); } @@ -263,7 +294,7 @@ EXPORT_SYMBOL(dma_alloc_coherent); * dma_alloc_coherent above. */ void * -dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int gfp) +dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) { return __dma_alloc(dev, size, handle, gfp, pgprot_writecombine(pgprot_kernel)); @@ -319,21 +350,33 @@ EXPORT_SYMBOL(dma_mmap_writecombine); /* * free a page as defined by the above mapping. + * Must not be called with IRQs disabled. */ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) { struct vm_region *c; - unsigned long flags; + unsigned long flags, addr; pte_t *ptep; + int idx; + u32 off; + + WARN_ON(irqs_disabled()); + + if (arch_is_coherent()) { + kfree(cpu_addr); + return; + } size = PAGE_ALIGN(size); spin_lock_irqsave(&consistent_lock, flags); - c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); if (!c) goto no_area; + c->vm_active = 0; + spin_unlock_irqrestore(&consistent_lock, flags); + if ((c->vm_end - c->vm_start) != size) { printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", __func__, c->vm_end - c->vm_start, size); @@ -341,12 +384,21 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr size = c->vm_end - c->vm_start; } - ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start); + idx = CONSISTENT_PTE_INDEX(c->vm_start); + off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); + ptep = consistent_pte[idx] + off; + addr = c->vm_start; do { - pte_t pte = ptep_get_and_clear(ptep); + pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); unsigned long pfn; ptep++; + addr += PAGE_SIZE; + off++; + if (off >= PTRS_PER_PTE) { + off = 0; + ptep = consistent_pte[++idx]; + } if (!pte_none(pte) && pte_present(pte)) { pfn = pte_pfn(pte); @@ -370,8 +422,8 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr flush_tlb_kernel_range(c->vm_start, c->vm_end); + spin_lock_irqsave(&consistent_lock, flags); list_del(&c->vm_list); - spin_unlock_irqrestore(&consistent_lock, flags); kfree(c); @@ -393,13 +445,12 @@ static int __init consistent_init(void) pgd_t *pgd; pmd_t *pmd; pte_t *pte; - int ret = 0; - - spin_lock(&init_mm.page_table_lock); + int ret = 0, i = 0; + u32 base = CONSISTENT_BASE; do { - pgd = pgd_offset(&init_mm, CONSISTENT_BASE); - pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE); + pgd = pgd_offset(&init_mm, base); + pmd = pmd_alloc(&init_mm, pgd, base); if (!pmd) { printk(KERN_ERR "%s: no pmd tables\n", __func__); ret = -ENOMEM; @@ -407,17 +458,16 @@ static int __init consistent_init(void) } WARN_ON(!pmd_none(*pmd)); - pte = pte_alloc_kernel(&init_mm, pmd, CONSISTENT_BASE); + pte = pte_alloc_kernel(pmd, base); if (!pte) { printk(KERN_ERR "%s: no pte tables\n", __func__); ret = -ENOMEM; break; } - consistent_pte = pte; - } while (0); - - spin_unlock(&init_mm.page_table_lock); + consistent_pte[i++] = pte; + base += (1 << PGDIR_SHIFT); + } while (base < CONSISTENT_END); return ret; } @@ -426,6 +476,9 @@ core_initcall(consistent_init); /* * Make an area consistent for devices. + * Note: Drivers should NOT use this function directly, as it will break + * platforms with CONFIG_DMABOUNCE. + * Use the driver DMA support - see dma-mapping.h (dma_sync_*) */ void consistent_sync(void *vaddr, size_t size, int direction) {