X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-arm%2Fdma-mapping.h;h=63ca7412a4623984777fcbec72bff0dfa32cacb1;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=925d016dd4b51ecf21fdc8a28d6f9532472d73de;hpb=6a77f38946aaee1cd85eeec6cf4229b204c15071;p=linux-2.6.git diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h index 925d016dd..63ca7412a 100644 --- a/include/asm-arm/dma-mapping.h +++ b/include/asm-arm/dma-mapping.h @@ -21,6 +21,9 @@ extern void consistent_sync(void *kaddr, size_t size, int rw); * properly. For example, if your device can only drive the low 24-bits * during bus mastering, then you would pass 0x00ffffff as the mask * to this function. + * + * FIXME: This should really be a platform specific issue - we should + * return false if GFP_DMA allocations may not satisfy the supplied 'mask'. */ static inline int dma_supported(struct device *dev, u64 mask) { @@ -44,7 +47,7 @@ static inline int dma_get_cache_alignment(void) static inline int dma_is_consistent(dma_addr_t handle) { - return 0; + return !!arch_is_coherent(); } /* @@ -67,7 +70,7 @@ static inline int dma_mapping_error(dma_addr_t dma_addr) * device-viewed address. */ extern void * -dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp); +dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); /** * dma_free_coherent - free memory allocated by dma_alloc_coherent @@ -114,7 +117,7 @@ int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, * device-viewed address. */ extern void * -dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int gfp); +dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); #define dma_free_writecombine(dev,size,cpu_addr,handle) \ dma_free_coherent(dev,size,cpu_addr,handle) @@ -142,7 +145,9 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) { - consistent_sync(cpu_addr, size, dir); + if (!arch_is_coherent()) + consistent_sync(cpu_addr, size, dir); + return virt_to_dma(dev, (unsigned long)cpu_addr); } #else @@ -252,7 +257,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, sg->dma_address = page_to_dma(dev, sg->page) + sg->offset; virt = page_address(sg->page) + sg->offset; - consistent_sync(virt, sg->length, dir); + + if (!arch_is_coherent()) + consistent_sync(virt, sg->length, dir); } return nents; @@ -307,14 +314,16 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - consistent_sync((void *)dma_to_virt(dev, handle), size, dir); + if (!arch_is_coherent()) + consistent_sync((void *)dma_to_virt(dev, handle), size, dir); } static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - consistent_sync((void *)dma_to_virt(dev, handle), size, dir); + if (!arch_is_coherent()) + consistent_sync((void *)dma_to_virt(dev, handle), size, dir); } #else extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction); @@ -344,7 +353,8 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, for (i = 0; i < nents; i++, sg++) { char *virt = page_address(sg->page) + sg->offset; - consistent_sync(virt, sg->length, dir); + if (!arch_is_coherent()) + consistent_sync(virt, sg->length, dir); } } @@ -356,7 +366,8 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, for (i = 0; i < nents; i++, sg++) { char *virt = page_address(sg->page) + sg->offset; - consistent_sync(virt, sg->length, dir); + if (!arch_is_coherent()) + consistent_sync(virt, sg->length, dir); } } #else