1 #ifndef __ASM_SH_DMA_MAPPING_H
2 #define __ASM_SH_DMA_MAPPING_H
4 #include <linux/config.h>
6 #include <linux/device.h>
7 #include <asm/scatterlist.h>
10 /* arch/sh/mm/consistent.c */
11 extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle);
12 extern void consistent_free(void *vaddr, size_t size);
13 extern void consistent_sync(void *vaddr, size_t size, int direction);
15 #ifdef CONFIG_SH_DREAMCAST
17 extern struct bus_type pci_bus_type;
18 extern void *__pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
19 dma_addr_t *dma_handle);
20 extern void __pci_free_consistent(struct pci_dev *hwdev, size_t size,
21 void *vaddr, dma_addr_t dma_handle);
24 #define dma_supported(dev, mask) (1)
26 static inline int dma_set_mask(struct device *dev, u64 mask)
28 if (!dev->dma_mask || !dma_supported(dev, mask))
31 *dev->dma_mask = mask;
36 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
37 dma_addr_t *dma_handle, int flag)
40 * Some platforms have special pci_alloc_consistent() implementations,
41 * in these instances we can't use the generic consistent_alloc().
43 #ifdef CONFIG_SH_DREAMCAST
44 if (dev && dev->bus == &pci_bus_type)
45 return __pci_alloc_consistent(NULL, size, dma_handle);
47 if (sh_mv.mv_consistent_alloc)
48 return sh_mv.mv_consistent_alloc(dev, size, dma_handle, flag);
50 return consistent_alloc(flag, size, dma_handle);
53 static inline void dma_free_coherent(struct device *dev, size_t size,
54 void *vaddr, dma_addr_t dma_handle)
57 * Same note as above applies to pci_free_consistent()..
59 #ifdef CONFIG_SH_DREAMCAST
60 if (dev && dev->bus == &pci_bus_type) {
61 __pci_free_consistent(NULL, size, vaddr, dma_handle);
66 if (sh_mv.mv_consistent_free) {
67 sh_mv.mv_consistent_free(dev, size, vaddr, dma_handle);
71 consistent_free(vaddr, size);
74 static inline void dma_cache_sync(void *vaddr, size_t size,
75 enum dma_data_direction dir)
77 consistent_sync(vaddr, size, (int)dir);
80 static inline dma_addr_t dma_map_single(struct device *dev,
81 void *ptr, size_t size,
82 enum dma_data_direction dir)
84 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
85 if (dev->bus == &pci_bus_type)
86 return virt_to_bus(ptr);
88 dma_cache_sync(ptr, size, dir);
90 return virt_to_bus(ptr);
93 #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
95 static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
96 int nents, enum dma_data_direction dir)
100 for (i = 0; i < nents; i++) {
101 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
102 dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
105 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
111 #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
113 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
114 unsigned long offset, size_t size,
115 enum dma_data_direction dir)
117 return dma_map_single(dev, page_address(page) + offset, size, dir);
120 static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
121 size_t size, enum dma_data_direction dir)
123 dma_unmap_single(dev, dma_address, size, dir);
126 static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
127 size_t size, enum dma_data_direction dir)
129 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
130 if (dev->bus == &pci_bus_type)
133 dma_cache_sync(bus_to_virt(dma_handle), size, dir);
136 static inline void dma_sync_single_range(struct device *dev,
137 dma_addr_t dma_handle,
138 unsigned long offset, size_t size,
139 enum dma_data_direction dir)
141 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
142 if (dev->bus == &pci_bus_type)
145 dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir);
148 static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
149 int nelems, enum dma_data_direction dir)
153 for (i = 0; i < nelems; i++) {
154 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
155 dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
158 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
162 static inline void dma_sync_single_for_cpu(struct device *dev,
163 dma_addr_t dma_handle, size_t size,
164 enum dma_data_direction dir)
165 __attribute__ ((alias("dma_sync_single")));
167 static inline void dma_sync_single_for_device(struct device *dev,
168 dma_addr_t dma_handle, size_t size,
169 enum dma_data_direction dir)
170 __attribute__ ((alias("dma_sync_single")));
172 static inline void dma_sync_sg_for_cpu(struct device *dev,
173 struct scatterlist *sg, int nelems,
174 enum dma_data_direction dir)
175 __attribute__ ((alias("dma_sync_sg")));
177 static inline void dma_sync_sg_for_device(struct device *dev,
178 struct scatterlist *sg, int nelems,
179 enum dma_data_direction dir)
180 __attribute__ ((alias("dma_sync_sg")));
182 static inline int dma_get_cache_alignment(void)
185 * Each processor family will define its own L1_CACHE_SHIFT,
186 * L1_CACHE_BYTES wraps to this, so this is always safe.
188 return L1_CACHE_BYTES;
191 static inline int dma_mapping_error(dma_addr_t dma_addr)
193 return dma_addr == 0;
196 #endif /* __ASM_SH_DMA_MAPPING_H */