X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-arm%2Fdma-mapping.h;h=63ca7412a4623984777fcbec72bff0dfa32cacb1;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=011c539c7449bc0dc243db2e816dcf55ad0c9225;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h index 011c539c7..63ca7412a 100644 --- a/include/asm-arm/dma-mapping.h +++ b/include/asm-arm/dma-mapping.h @@ -21,6 +21,9 @@ extern void consistent_sync(void *kaddr, size_t size, int rw); * properly. For example, if your device can only drive the low 24-bits * during bus mastering, then you would pass 0x00ffffff as the mask * to this function. + * + * FIXME: This should really be a platform specific issue - we should + * return false if GFP_DMA allocations may not satisfy the supplied 'mask'. */ static inline int dma_supported(struct device *dev, u64 mask) { @@ -44,7 +47,7 @@ static inline int dma_get_cache_alignment(void) static inline int dma_is_consistent(dma_addr_t handle) { - return 0; + return !!arch_is_coherent(); } /* @@ -67,7 +70,7 @@ static inline int dma_mapping_error(dma_addr_t dma_addr) * device-viewed address. */ extern void * -dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp); +dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); /** * dma_free_coherent - free memory allocated by dma_alloc_coherent @@ -86,6 +89,22 @@ extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle); +/** + * dma_mmap_coherent - map a coherent DMA allocation into user space + * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices + * @vma: vm_area_struct describing requested user mapping + * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent + * @handle: device-view address returned from dma_alloc_coherent + * @size: size of memory originally requested in dma_alloc_coherent + * + * Map a coherent DMA buffer previously allocated by dma_alloc_coherent + * into user space. The coherent DMA buffer must not be freed by the + * driver until the user space mapping has been released. + */ +int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t handle, size_t size); + + /** * dma_alloc_writecombine - allocate writecombining memory for DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices @@ -98,11 +117,14 @@ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, * device-viewed address. */ extern void * -dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int gfp); +dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); #define dma_free_writecombine(dev,size,cpu_addr,handle) \ dma_free_coherent(dev,size,cpu_addr,handle) +int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t handle, size_t size); + /** * dma_map_single - map a single buffer for streaming DMA @@ -123,8 +145,10 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) { - consistent_sync(cpu_addr, size, dir); - return __virt_to_bus((unsigned long)cpu_addr); + if (!arch_is_coherent()) + consistent_sync(cpu_addr, size, dir); + + return virt_to_dma(dev, (unsigned long)cpu_addr); } #else extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction); @@ -231,9 +255,11 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, for (i = 0; i < nents; i++, sg++) { char *virt; - sg->dma_address = page_to_bus(sg->page) + sg->offset; + sg->dma_address = page_to_dma(dev, sg->page) + sg->offset; virt = page_address(sg->page) + sg->offset; - consistent_sync(virt, sg->length, dir); + + if (!arch_is_coherent()) + consistent_sync(virt, sg->length, dir); } return nents; @@ -288,14 +314,16 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - consistent_sync((void *)__bus_to_virt(handle), size, dir); + if (!arch_is_coherent()) + consistent_sync((void *)dma_to_virt(dev, handle), size, dir); } static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - consistent_sync((void *)__bus_to_virt(handle), size, dir); + if (!arch_is_coherent()) + consistent_sync((void *)dma_to_virt(dev, handle), size, dir); } #else extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction); @@ -325,7 +353,8 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, for (i = 0; i < nents; i++, sg++) { char *virt = page_address(sg->page) + sg->offset; - consistent_sync(virt, sg->length, dir); + if (!arch_is_coherent()) + consistent_sync(virt, sg->length, dir); } } @@ -337,7 +366,8 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, for (i = 0; i < nents; i++, sg++) { char *virt = page_address(sg->page) + sg->offset; - consistent_sync(virt, sg->length, dir); + if (!arch_is_coherent()) + consistent_sync(virt, sg->length, dir); } } #else