1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
6 #include <asm/scatterlist.h>
8 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
9 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
11 void *dma_alloc_coherent(struct device *dev, size_t size,
12 dma_addr_t *dma_handle, int flag);
14 void dma_free_coherent(struct device *dev, size_t size,
15 void *vaddr, dma_addr_t dma_handle);
17 static inline dma_addr_t
18 dma_map_single(struct device *dev, void *ptr, size_t size,
19 enum dma_data_direction direction)
21 BUG_ON(direction == DMA_NONE);
22 flush_write_buffers();
23 return virt_to_phys(ptr);
27 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
28 enum dma_data_direction direction)
30 BUG_ON(direction == DMA_NONE);
34 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
35 enum dma_data_direction direction)
39 BUG_ON(direction == DMA_NONE);
41 for (i = 0; i < nents; i++ ) {
44 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
47 flush_write_buffers();
51 static inline dma_addr_t
52 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
53 size_t size, enum dma_data_direction direction)
55 BUG_ON(direction == DMA_NONE);
56 return page_to_phys(page) + offset;
60 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
61 enum dma_data_direction direction)
63 BUG_ON(direction == DMA_NONE);
68 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
69 enum dma_data_direction direction)
71 BUG_ON(direction == DMA_NONE);
75 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
76 enum dma_data_direction direction)
81 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
82 enum dma_data_direction direction)
84 flush_write_buffers();
88 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
89 unsigned long offset, size_t size,
90 enum dma_data_direction direction)
95 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
96 unsigned long offset, size_t size,
97 enum dma_data_direction direction)
99 flush_write_buffers();
103 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
104 enum dma_data_direction direction)
109 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
110 enum dma_data_direction direction)
112 flush_write_buffers();
116 dma_mapping_error(dma_addr_t dma_addr)
122 dma_supported(struct device *dev, u64 mask)
125 * we fall back to GFP_DMA when the mask isn't all 1s,
126 * so we can't guarantee allocations that must be
127 * within a tighter range than GFP_DMA..
129 if(mask < 0x00ffffff)
136 dma_set_mask(struct device *dev, u64 mask)
138 if(!dev->dma_mask || !dma_supported(dev, mask))
141 *dev->dma_mask = mask;
147 dma_get_cache_alignment(void)
149 /* no easy way to get cache size on all x86, so return the
150 * maximum possible, to be safe */
151 return (1 << L1_CACHE_SHIFT_MAX);
154 #define dma_is_consistent(d) (1)
157 dma_cache_sync(void *vaddr, size_t size,
158 enum dma_data_direction direction)
160 flush_write_buffers();