2 * Dynamic DMA mapping support.
4 * On i386 there is no hardware dynamic DMA address translation,
5 * so consistent alloc/free are merely page allocation/freeing.
6 * The rest of the dynamic DMA mapping interface is implemented
10 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/version.h>
17 #include <xen/balloon.h>
18 #include <asm/tlbflush.h>
19 #include <asm-i386/mach-xen/asm/swiotlb.h>
23 int iommu_merge __read_mostly = 0;
24 EXPORT_SYMBOL(iommu_merge);
26 dma_addr_t bad_dma_address __read_mostly;
27 EXPORT_SYMBOL(bad_dma_address);
29 /* This tells the BIO block layer to assume merging. Default to off
30 because we cannot guarantee merging later. */
31 int iommu_bio_merge __read_mostly = 0;
32 EXPORT_SYMBOL(iommu_bio_merge);
34 __init int iommu_setup(char *p)
40 struct dma_coherent_mem {
45 unsigned long *bitmap;
48 #define IOMMU_BUG_ON(test) \
50 if (unlikely(test)) { \
51 printk(KERN_ALERT "Fatal DMA error! " \
52 "Please use 'swiotlb=force'\n"); \
58 dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
59 enum dma_data_direction direction)
63 if (direction == DMA_NONE)
65 WARN_ON(nents == 0 || sg[0].length == 0);
68 rc = swiotlb_map_sg(hwdev, sg, nents, direction);
70 for (i = 0; i < nents; i++ ) {
72 page_to_bus(sg[i].page) + sg[i].offset;
73 sg[i].dma_length = sg[i].length;
75 IOMMU_BUG_ON(address_needs_mapping(
76 hwdev, sg[i].dma_address));
81 flush_write_buffers();
84 EXPORT_SYMBOL(dma_map_sg);
87 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
88 enum dma_data_direction direction)
90 BUG_ON(direction == DMA_NONE);
92 swiotlb_unmap_sg(hwdev, sg, nents, direction);
94 EXPORT_SYMBOL(dma_unmap_sg);
97 * XXX This file is also used by xenLinux/ia64.
98 * "defined(__i386__) || defined (__x86_64__)" means "!defined(__ia64__)".
99 * This #if work around should be removed once this file is merbed back into
100 * i386' pci-dma or is moved to drivers/xen/core.
102 #if defined(__i386__) || defined(__x86_64__)
104 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
105 size_t size, enum dma_data_direction direction)
109 BUG_ON(direction == DMA_NONE);
112 dma_addr = swiotlb_map_page(
113 dev, page, offset, size, direction);
115 dma_addr = page_to_bus(page) + offset;
116 IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
121 EXPORT_SYMBOL(dma_map_page);
124 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
125 enum dma_data_direction direction)
127 BUG_ON(direction == DMA_NONE);
129 swiotlb_unmap_page(dev, dma_address, size, direction);
131 EXPORT_SYMBOL(dma_unmap_page);
132 #endif /* defined(__i386__) || defined(__x86_64__) */
135 dma_mapping_error(dma_addr_t dma_addr)
138 return swiotlb_dma_mapping_error(dma_addr);
141 EXPORT_SYMBOL(dma_mapping_error);
144 dma_supported(struct device *dev, u64 mask)
147 return swiotlb_dma_supported(dev, mask);
149 * By default we'll BUG when an infeasible DMA is requested, and
150 * request swiotlb=force (see IOMMU_BUG_ON).
154 EXPORT_SYMBOL(dma_supported);
156 void *dma_alloc_coherent(struct device *dev, size_t size,
157 dma_addr_t *dma_handle, gfp_t gfp)
160 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
161 unsigned int order = get_order(size);
162 unsigned long vstart;
163 /* ignore region specifiers */
164 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
167 int page = bitmap_find_free_region(mem->bitmap, mem->size,
170 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
171 ret = mem->virt_base + (page << PAGE_SHIFT);
172 memset(ret, 0, size);
175 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
179 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
182 vstart = __get_free_pages(gfp, order);
183 ret = (void *)vstart;
186 /* NB. Hardcode 31 address bits for now: aacraid limitation. */
187 if (xen_create_contiguous_region(vstart, order, 31) != 0) {
188 free_pages(vstart, order);
191 memset(ret, 0, size);
192 *dma_handle = virt_to_bus(ret);
196 EXPORT_SYMBOL(dma_alloc_coherent);
198 void dma_free_coherent(struct device *dev, size_t size,
199 void *vaddr, dma_addr_t dma_handle)
201 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
202 int order = get_order(size);
204 if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
205 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
207 bitmap_release_region(mem->bitmap, page, order);
209 xen_destroy_contiguous_region((unsigned long)vaddr, order);
210 free_pages((unsigned long)vaddr, order);
213 EXPORT_SYMBOL(dma_free_coherent);
215 #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
216 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
217 dma_addr_t device_addr, size_t size, int flags)
219 void __iomem *mem_base;
220 int pages = size >> PAGE_SHIFT;
221 int bitmap_size = (pages + 31)/32;
223 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
230 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
232 mem_base = ioremap(bus_addr, size);
236 dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
239 memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
240 dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
241 if (!dev->dma_mem->bitmap)
243 memset(dev->dma_mem->bitmap, 0, bitmap_size);
245 dev->dma_mem->virt_base = mem_base;
246 dev->dma_mem->device_base = device_addr;
247 dev->dma_mem->size = pages;
248 dev->dma_mem->flags = flags;
250 if (flags & DMA_MEMORY_MAP)
251 return DMA_MEMORY_MAP;
253 return DMA_MEMORY_IO;
256 kfree(dev->dma_mem->bitmap);
260 EXPORT_SYMBOL(dma_declare_coherent_memory);
262 void dma_release_declared_memory(struct device *dev)
264 struct dma_coherent_mem *mem = dev->dma_mem;
269 iounmap(mem->virt_base);
273 EXPORT_SYMBOL(dma_release_declared_memory);
275 void *dma_mark_declared_memory_occupied(struct device *dev,
276 dma_addr_t device_addr, size_t size)
278 struct dma_coherent_mem *mem = dev->dma_mem;
279 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
283 return ERR_PTR(-EINVAL);
285 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
286 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
289 return mem->virt_base + (pos << PAGE_SHIFT);
291 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
292 #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
295 dma_map_single(struct device *dev, void *ptr, size_t size,
296 enum dma_data_direction direction)
300 if (direction == DMA_NONE)
305 dma = swiotlb_map_single(dev, ptr, size, direction);
307 dma = virt_to_bus(ptr);
308 IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
309 IOMMU_BUG_ON(address_needs_mapping(dev, dma));
312 flush_write_buffers();
315 EXPORT_SYMBOL(dma_map_single);
318 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
319 enum dma_data_direction direction)
321 if (direction == DMA_NONE)
324 swiotlb_unmap_single(dev, dma_addr, size, direction);
326 EXPORT_SYMBOL(dma_unmap_single);
329 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
330 enum dma_data_direction direction)
333 swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
335 EXPORT_SYMBOL(dma_sync_single_for_cpu);
338 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
339 enum dma_data_direction direction)
342 swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
344 EXPORT_SYMBOL(dma_sync_single_for_device);