1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
4 #include <linux/device.h>
9 #include <asm/scatterlist.h>
11 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
12 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
14 void *dma_alloc_coherent(struct device *dev, size_t size,
15 dma_addr_t *dma_handle, int flag);
17 void dma_free_coherent(struct device *dev, size_t size,
18 void *vaddr, dma_addr_t dma_handle);
20 static inline dma_addr_t
21 dma_map_single(struct device *dev, void *ptr, size_t size,
22 enum dma_data_direction direction)
24 BUG_ON(direction == DMA_NONE);
25 flush_write_buffers();
26 return virt_to_phys(ptr);
30 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
31 enum dma_data_direction direction)
33 BUG_ON(direction == DMA_NONE);
37 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
38 enum dma_data_direction direction)
42 BUG_ON(direction == DMA_NONE);
44 for (i = 0; i < nents; i++ ) {
47 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
50 flush_write_buffers();
54 static inline dma_addr_t
55 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
56 size_t size, enum dma_data_direction direction)
58 BUG_ON(direction == DMA_NONE);
59 return page_to_phys(page) + offset;
63 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
64 enum dma_data_direction direction)
66 BUG_ON(direction == DMA_NONE);
71 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
72 enum dma_data_direction direction)
74 BUG_ON(direction == DMA_NONE);
78 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
79 enum dma_data_direction direction)
84 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
85 enum dma_data_direction direction)
87 flush_write_buffers();
91 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
92 unsigned long offset, size_t size,
93 enum dma_data_direction direction)
98 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
99 unsigned long offset, size_t size,
100 enum dma_data_direction direction)
102 flush_write_buffers();
106 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
107 enum dma_data_direction direction)
112 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
113 enum dma_data_direction direction)
115 flush_write_buffers();
119 dma_mapping_error(dma_addr_t dma_addr)
125 dma_supported(struct device *dev, u64 mask)
128 * we fall back to GFP_DMA when the mask isn't all 1s,
129 * so we can't guarantee allocations that must be
130 * within a tighter range than GFP_DMA..
132 if(mask < 0x00ffffff)
139 dma_set_mask(struct device *dev, u64 mask)
141 if(!dev->dma_mask || !dma_supported(dev, mask))
144 *dev->dma_mask = mask;
150 dma_get_cache_alignment(void)
152 /* no easy way to get cache size on all x86, so return the
153 * maximum possible, to be safe */
154 return (1 << L1_CACHE_SHIFT_MAX);
157 #define dma_is_consistent(d) (1)
160 dma_cache_sync(void *vaddr, size_t size,
161 enum dma_data_direction direction)
163 flush_write_buffers();