2 * This is based on both include/asm-sh/dma-mapping.h and
3 * include/asm-ppc/pci.h
5 #ifndef __ASM_PPC_DMA_MAPPING_H
6 #define __ASM_PPC_DMA_MAPPING_H
8 #include <linux/config.h>
9 /* we implement the API below in terms of the existing PCI one,
11 #include <linux/pci.h>
12 /* need struct page definitions */
14 #include <linux/device.h>
15 #include <asm/scatterlist.h>
18 #define dma_supported(dev, mask) (1)
20 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
22 if (!dev->dma_mask || !dma_supported(dev, mask))
25 *dev->dma_mask = dma_mask;
30 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
31 dma_addr_t * dma_handle, int flag)
34 if (dev && dev->bus == &pci_bus_type)
35 return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
38 return consistent_alloc(flag, size, dma_handle);
42 dma_free_coherent(struct device *dev, size_t size, void *vaddr,
43 dma_addr_t dma_handle)
46 if (dev && dev->bus == &pci_bus_type) {
47 pci_free_consistent(to_pci_dev(dev), size, vaddr, dma_handle);
52 consistent_free(vaddr);
55 static inline dma_addr_t
56 dma_map_single(struct device *dev, void *ptr, size_t size,
57 enum dma_data_direction direction)
59 BUG_ON(direction == DMA_NONE);
61 consistent_sync(ptr, size, direction);
63 return virt_to_bus(ptr);
67 #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
69 static inline dma_addr_t
70 dma_map_page(struct device *dev, struct page *page,
71 unsigned long offset, size_t size,
72 enum dma_data_direction direction)
74 BUG_ON(direction == DMA_NONE);
75 consistent_sync_page(page, offset, size, direction);
76 return (page - mem_map) * PAGE_SIZE + PCI_DRAM_OFFSET + offset;
80 #define dma_unmap_page(dev, addr, size, dir) do { } while (0)
83 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
84 enum dma_data_direction direction)
88 BUG_ON(direction == DMA_NONE);
90 for (i = 0; i < nents; i++, sg++) {
92 consistent_sync_page(sg->page, sg->offset,
93 sg->length, direction);
94 sg->dma_address = page_to_bus(sg->page) + sg->offset;
100 /* We don't do anything here. */
101 #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
104 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
106 enum dma_data_direction direction)
108 BUG_ON(direction == DMA_NONE);
110 consistent_sync(bus_to_virt(dma_handle), size, direction);
114 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
116 enum dma_data_direction direction)
118 BUG_ON(direction == DMA_NONE);
120 consistent_sync(bus_to_virt(dma_handle), size, direction);
124 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
125 int nelems, enum dma_data_direction direction)
129 BUG_ON(direction == DMA_NONE);
131 for (i = 0; i < nelems; i++, sg++)
132 consistent_sync_page(sg->page, sg->offset,
133 sg->length, direction);
137 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
138 int nelems, enum dma_data_direction direction)
142 BUG_ON(direction == DMA_NONE);
144 for (i = 0; i < nelems; i++, sg++)
145 consistent_sync_page(sg->page, sg->offset,
146 sg->length, direction);
149 /* Now for the API extensions over the pci_ one */
151 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
152 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
153 #define dma_is_consistent(d) (1)
155 static inline int dma_get_cache_alignment(void)
158 * Each processor family will define its own L1_CACHE_SHIFT,
159 * L1_CACHE_BYTES wraps to this, so this is always safe.
161 return L1_CACHE_BYTES;
165 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
166 unsigned long offset, size_t size,
167 enum dma_data_direction direction)
169 /* just sync everything, that's all the pci API can do */
170 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
174 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
175 unsigned long offset, size_t size,
176 enum dma_data_direction direction)
178 /* just sync everything, that's all the pci API can do */
179 dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
182 static inline void dma_cache_sync(void *vaddr, size_t size,
183 enum dma_data_direction direction)
185 consistent_sync(vaddr, size, (int)direction);
188 static inline int dma_mapping_error(dma_addr_t dma_addr)
193 #endif /* __ASM_PPC_DMA_MAPPING_H */