1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
6 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
7 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
9 void *dma_alloc_coherent(struct device *dev, size_t size,
10 dma_addr_t *dma_handle, int flag);
12 void dma_free_coherent(struct device *dev, size_t size,
13 void *vaddr, dma_addr_t dma_handle);
15 static inline dma_addr_t
16 dma_map_single(struct device *dev, void *ptr, size_t size,
17 enum dma_data_direction direction)
19 BUG_ON(direction == DMA_NONE);
20 flush_write_buffers();
21 return virt_to_phys(ptr);
25 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
26 enum dma_data_direction direction)
28 BUG_ON(direction == DMA_NONE);
32 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
33 enum dma_data_direction direction)
37 BUG_ON(direction == DMA_NONE);
39 for (i = 0; i < nents; i++ ) {
42 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
45 flush_write_buffers();
49 static inline dma_addr_t
50 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
51 size_t size, enum dma_data_direction direction)
53 BUG_ON(direction == DMA_NONE);
54 return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;
58 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
59 enum dma_data_direction direction)
61 BUG_ON(direction == DMA_NONE);
66 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
67 enum dma_data_direction direction)
69 BUG_ON(direction == DMA_NONE);
73 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
74 enum dma_data_direction direction)
79 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
80 enum dma_data_direction direction)
82 flush_write_buffers();
86 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
87 unsigned long offset, size_t size,
88 enum dma_data_direction direction)
93 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
94 unsigned long offset, size_t size,
95 enum dma_data_direction direction)
97 flush_write_buffers();
101 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
102 enum dma_data_direction direction)
107 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
108 enum dma_data_direction direction)
110 flush_write_buffers();
114 dma_mapping_error(dma_addr_t dma_addr)
120 dma_supported(struct device *dev, u64 mask)
123 * we fall back to GFP_DMA when the mask isn't all 1s,
124 * so we can't guarantee allocations that must be
125 * within a tighter range than GFP_DMA..
127 if(mask < 0x00ffffff)
134 dma_set_mask(struct device *dev, u64 mask)
136 if(!dev->dma_mask || !dma_supported(dev, mask))
139 *dev->dma_mask = mask;
145 dma_get_cache_alignment(void)
147 /* no easy way to get cache size on all x86, so return the
148 * maximum possible, to be safe */
149 return (1 << L1_CACHE_SHIFT_MAX);
152 #define dma_is_consistent(d) (1)
155 dma_cache_sync(void *vaddr, size_t size,
156 enum dma_data_direction direction)
158 flush_write_buffers();