consistent_free(vaddr, size);
}
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
consistent_sync(vaddr, size, (int)dir);
if (dev->bus == &pci_bus_type)
return virt_to_bus(ptr);
#endif
- dma_cache_sync(ptr, size, dir);
+ dma_cache_sync(dev, ptr, size, dir);
return virt_to_bus(ptr);
}
for (i = 0; i < nents; i++) {
#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
- dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
+ dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
sg[i].length, dir);
#endif
sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
if (dev->bus == &pci_bus_type)
return;
#endif
- dma_cache_sync(bus_to_virt(dma_handle), size, dir);
+ dma_cache_sync(dev, bus_to_virt(dma_handle), size, dir);
}
static inline void dma_sync_single_range(struct device *dev,
if (dev->bus == &pci_bus_type)
return;
#endif
- dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir);
+ dma_cache_sync(dev, bus_to_virt(dma_handle) + offset, size, dir);
}
static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
for (i = 0; i < nelems; i++) {
#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
- dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
+ dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
sg[i].length, dir);
#endif
sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
}
}
-static void dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction dir)
- __attribute__ ((alias("dma_sync_single")));
+static inline void dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync_single(dev, dma_handle, size, dir);
+}
+
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync_single(dev, dma_handle, size, dir);
+}
-static void dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size,
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nelems,
enum dma_data_direction dir)
- __attribute__ ((alias("dma_sync_single")));
+{
+ dma_sync_sg(dev, sg, nelems, dir);
+}
-static void dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction dir)
- __attribute__ ((alias("dma_sync_sg")));
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction dir)
+{
+ dma_sync_sg(dev, sg, nelems, dir);
+}
-static void dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction dir)
- __attribute__ ((alias("dma_sync_sg")));
static inline int dma_get_cache_alignment(void)
{
{
return dma_addr == 0;
}
-
#endif /* __ASM_SH_DMA_MAPPING_H */
-