* properly. For example, if your device can only drive the low 24-bits
* during bus mastering, then you would pass 0x00ffffff as the mask
* to this function.
+ *
+ * FIXME: This should really be a platform specific issue - we should
+ * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
*/
static inline int dma_supported(struct device *dev, u64 mask)
{
static inline int dma_is_consistent(dma_addr_t handle)
{
- return 0;
+ return !!arch_is_coherent();
}
/*
* device-viewed address.
*/
extern void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp);
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
/**
* dma_free_coherent - free memory allocated by dma_alloc_coherent
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle);
+/**
+ * dma_mmap_coherent - map a coherent DMA allocation into user space
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @vma: vm_area_struct describing requested user mapping
+ * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
+ * @handle: device-view address returned from dma_alloc_coherent
+ * @size: size of memory originally requested in dma_alloc_coherent
+ *
+ * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
+ * into user space. The coherent DMA buffer must not be freed by the
+ * driver until the user space mapping has been released.
+ */
+int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t handle, size_t size);
+
+
/**
* dma_alloc_writecombine - allocate writecombining memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* device-viewed address.
*/
extern void *
-dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int gfp);
+dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
#define dma_free_writecombine(dev,size,cpu_addr,handle) \
dma_free_coherent(dev,size,cpu_addr,handle)
+int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t handle, size_t size);
+
/**
* dma_map_single - map a single buffer for streaming DMA
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction dir)
{
- consistent_sync(cpu_addr, size, dir);
+ if (!arch_is_coherent())
+ consistent_sync(cpu_addr, size, dir);
+
return virt_to_dma(dev, (unsigned long)cpu_addr);
}
#else
sg->dma_address = page_to_dma(dev, sg->page) + sg->offset;
virt = page_address(sg->page) + sg->offset;
- consistent_sync(virt, sg->length, dir);
+
+ if (!arch_is_coherent())
+ consistent_sync(virt, sg->length, dir);
}
return nents;
dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
- consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
+ if (!arch_is_coherent())
+ consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
}
static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
- consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
+ if (!arch_is_coherent())
+ consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
}
#else
extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
for (i = 0; i < nents; i++, sg++) {
char *virt = page_address(sg->page) + sg->offset;
- consistent_sync(virt, sg->length, dir);
+ if (!arch_is_coherent())
+ consistent_sync(virt, sg->length, dir);
}
}
for (i = 0; i < nents; i++, sg++) {
char *virt = page_address(sg->page) + sg->offset;
- consistent_sync(virt, sg->length, dir);
+ if (!arch_is_coherent())
+ consistent_sync(virt, sg->length, dir);
}
}
#else