X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Farm%2Fmm%2Fconsistent.c;h=915cee5c1a21b9a8f57735ca051f4f3c85f3a2e8;hb=6a77f38946aaee1cd85eeec6cf4229b204c15071;hp=f9b8fe2c75942068786ed8b1439c714ea77ffa03;hpb=9bf4aaab3e101692164d49b7ca357651eb691cb6;p=linux-2.6.git diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c index f9b8fe2c7..915cee5c1 100644 --- a/arch/arm/mm/consistent.c +++ b/arch/arm/mm/consistent.c @@ -1,7 +1,7 @@ /* * linux/arch/arm/mm/consistent.c * - * Copyright (C) 2000-2002 Russell King + * Copyright (C) 2000-2004 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -30,7 +30,7 @@ * This is the page table (2MB) covering uncached, DMA consistent allocations */ static pte_t *consistent_pte; -static spinlock_t consistent_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(consistent_lock); /* * VM region handling support. @@ -65,6 +65,7 @@ struct vm_region { struct list_head vm_list; unsigned long vm_start; unsigned long vm_end; + struct page *vm_pages; }; static struct vm_region consistent_head = { @@ -138,7 +139,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, int gfp, struct page *page; struct vm_region *c; unsigned long order; - u64 mask = 0x00ffffff, limit; /* ISA default */ + u64 mask = ISA_DMA_THRESHOLD, limit; if (!consistent_pte) { printk(KERN_ERR "%s: not initialised\n", __func__); @@ -148,19 +149,34 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, int gfp, if (dev) { mask = dev->coherent_dma_mask; + + /* + * Sanity check the DMA mask - it must be non-zero, and + * must be able to be satisfied by a DMA allocation. + */ if (mask == 0) { dev_warn(dev, "coherent DMA mask is unset\n"); - return NULL; + goto no_page; + } + + if ((~mask) & ISA_DMA_THRESHOLD) { + dev_warn(dev, "coherent DMA mask %#llx is smaller " + "than system GFP_DMA mask %#llx\n", + mask, (unsigned long long)ISA_DMA_THRESHOLD); + goto no_page; } } + /* + * Sanity check the allocation size. + */ size = PAGE_ALIGN(size); limit = (mask + 1) & ~mask; - if ((limit && size >= limit) || size >= (CONSISTENT_END - CONSISTENT_BASE)) { - printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n", - size, mask); - *handle = ~0; - return NULL; + if ((limit && size >= limit) || + size >= (CONSISTENT_END - CONSISTENT_BASE)) { + printk(KERN_WARNING "coherent allocation too big " + "(requested %#x mask %#llx)\n", size, mask); + goto no_page; } order = get_order(size); @@ -191,6 +207,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, int gfp, pte_t *pte = consistent_pte + CONSISTENT_OFFSET(c->vm_start); struct page *end = page + (1 << order); + c->vm_pages = page; + /* * Set the "dma handle" */ @@ -200,6 +218,9 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, int gfp, BUG_ON(!pte_none(*pte)); set_page_count(page, 1); + /* + * x86 does not mark the pages reserved... + */ SetPageReserved(page); set_pte(pte, mk_pte(page, prot)); page++; @@ -221,6 +242,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, int gfp, if (page) __free_pages(page, order); no_page: + *handle = ~0; return NULL; } @@ -248,6 +270,53 @@ dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int } EXPORT_SYMBOL(dma_alloc_writecombine); +static int dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size) +{ + unsigned long flags, user_size, kern_size; + struct vm_region *c; + int ret = -ENXIO; + + user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + + spin_lock_irqsave(&consistent_lock, flags); + c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); + spin_unlock_irqrestore(&consistent_lock, flags); + + if (c) { + unsigned long off = vma->vm_pgoff; + + kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT; + + if (off < kern_size && + user_size <= (kern_size - off)) { + vma->vm_flags |= VM_RESERVED; + ret = remap_pfn_range(vma, vma->vm_start, + page_to_pfn(c->vm_pages) + off, + user_size << PAGE_SHIFT, + vma->vm_page_prot); + } + } + + return ret; +} + +int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size) +{ + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + return dma_mmap(dev, vma, cpu_addr, dma_addr, size); +} +EXPORT_SYMBOL(dma_mmap_coherent); + +int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size) +{ + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + return dma_mmap(dev, vma, cpu_addr, dma_addr, size); +} +EXPORT_SYMBOL(dma_mmap_writecombine); + /* * free a page as defined by the above mapping. */ @@ -284,6 +353,10 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr if (pfn_valid(pfn)) { struct page *page = pfn_to_page(pfn); + + /* + * x86 does not mark the pages reserved... + */ ClearPageReserved(page); __free_page(page);