2 * Dynamic DMA mapping support.
4 * On i386 there is no hardware dynamic DMA address translation,
5 * so consistent alloc/free are merely page allocation/freeing.
6 * The rest of the dynamic DMA mapping interface is implemented
10 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/version.h>
17 #include <xen/balloon.h>
18 #include <asm/swiotlb.h>
19 #include <asm/tlbflush.h>
20 #include <asm-i386/mach-xen/asm/swiotlb.h>
24 #include <asm/proto.h>
25 #include <asm/calgary.h>
27 int iommu_merge __read_mostly = 0;
28 EXPORT_SYMBOL(iommu_merge);
30 dma_addr_t bad_dma_address __read_mostly;
31 EXPORT_SYMBOL(bad_dma_address);
33 /* This tells the BIO block layer to assume merging. Default to off
34 because we cannot guarantee merging later. */
35 int iommu_bio_merge __read_mostly = 0;
36 EXPORT_SYMBOL(iommu_bio_merge);
38 int iommu_sac_force __read_mostly = 0;
39 EXPORT_SYMBOL(iommu_sac_force);
41 int no_iommu __read_mostly;
42 #ifdef CONFIG_IOMMU_DEBUG
43 int panic_on_overflow __read_mostly = 1;
44 int force_iommu __read_mostly = 1;
46 int panic_on_overflow __read_mostly = 0;
47 int force_iommu __read_mostly= 0;
50 /* Set this to 1 if there is a HW IOMMU in the system */
51 int iommu_detected __read_mostly = 0;
53 void __init pci_iommu_alloc(void)
56 * The order of these functions is important for
57 * fall-back/fail-over reasons
63 #ifdef CONFIG_CALGARY_IOMMU
64 #include <asm/calgary.h>
65 /* shut up compiler */
66 use_calgary = use_calgary;
75 static int __init pci_iommu_init(void)
77 #ifdef CONFIG_CALGARY_IOMMU
89 /* Must execute after PCI subsystem */
90 fs_initcall(pci_iommu_init);
93 struct dma_coherent_mem {
98 unsigned long *bitmap;
101 #define IOMMU_BUG_ON(test) \
103 if (unlikely(test)) { \
104 printk(KERN_ALERT "Fatal DMA error! " \
105 "Please use 'swiotlb=force'\n"); \
111 dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
112 enum dma_data_direction direction)
116 BUG_ON(!valid_dma_direction(direction));
117 WARN_ON(nents == 0 || sg[0].length == 0);
120 rc = swiotlb_map_sg(hwdev, sg, nents, direction);
122 for (i = 0; i < nents; i++ ) {
124 page_to_bus(sg[i].page) + sg[i].offset;
125 sg[i].dma_length = sg[i].length;
127 IOMMU_BUG_ON(address_needs_mapping(
128 hwdev, sg[i].dma_address));
133 flush_write_buffers();
136 EXPORT_SYMBOL(dma_map_sg);
139 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
140 enum dma_data_direction direction)
142 BUG_ON(!valid_dma_direction(direction));
144 swiotlb_unmap_sg(hwdev, sg, nents, direction);
146 EXPORT_SYMBOL(dma_unmap_sg);
149 * XXX This file is also used by xenLinux/ia64.
150 * "defined(__i386__) || defined (__x86_64__)" means "!defined(__ia64__)".
151 * This #if work around should be removed once this file is merbed back into
152 * i386' pci-dma or is moved to drivers/xen/core.
154 #if defined(__i386__) || defined(__x86_64__)
156 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
157 size_t size, enum dma_data_direction direction)
161 BUG_ON(!valid_dma_direction(direction));
164 dma_addr = swiotlb_map_page(
165 dev, page, offset, size, direction);
167 dma_addr = page_to_bus(page) + offset;
168 IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
173 EXPORT_SYMBOL(dma_map_page);
176 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
177 enum dma_data_direction direction)
179 BUG_ON(!valid_dma_direction(direction));
181 swiotlb_unmap_page(dev, dma_address, size, direction);
183 EXPORT_SYMBOL(dma_unmap_page);
184 #endif /* defined(__i386__) || defined(__x86_64__) */
187 dma_mapping_error(dma_addr_t dma_addr)
190 return swiotlb_dma_mapping_error(dma_addr);
193 EXPORT_SYMBOL(dma_mapping_error);
196 dma_supported(struct device *dev, u64 mask)
199 return swiotlb_dma_supported(dev, mask);
201 * By default we'll BUG when an infeasible DMA is requested, and
202 * request swiotlb=force (see IOMMU_BUG_ON).
206 EXPORT_SYMBOL(dma_supported);
208 void *dma_alloc_coherent(struct device *dev, size_t size,
209 dma_addr_t *dma_handle, gfp_t gfp)
212 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
213 unsigned int order = get_order(size);
214 unsigned long vstart;
215 /* ignore region specifiers */
216 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
219 int page = bitmap_find_free_region(mem->bitmap, mem->size,
222 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
223 ret = mem->virt_base + (page << PAGE_SHIFT);
224 memset(ret, 0, size);
227 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
231 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
234 vstart = __get_free_pages(gfp, order);
235 ret = (void *)vstart;
238 if (xen_create_contiguous_region(vstart, order,
239 IO_TLB_DMA_BITS) != 0) {
240 free_pages(vstart, order);
243 memset(ret, 0, size);
244 *dma_handle = virt_to_bus(ret);
248 EXPORT_SYMBOL(dma_alloc_coherent);
250 void dma_free_coherent(struct device *dev, size_t size,
251 void *vaddr, dma_addr_t dma_handle)
253 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
254 int order = get_order(size);
256 if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
257 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
259 bitmap_release_region(mem->bitmap, page, order);
261 xen_destroy_contiguous_region((unsigned long)vaddr, order);
262 free_pages((unsigned long)vaddr, order);
265 EXPORT_SYMBOL(dma_free_coherent);
267 #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
268 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
269 dma_addr_t device_addr, size_t size, int flags)
271 void __iomem *mem_base = NULL;
272 int pages = size >> PAGE_SHIFT;
273 int bitmap_size = (pages + 31)/32;
275 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
282 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
284 mem_base = ioremap(bus_addr, size);
288 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
291 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
292 if (!dev->dma_mem->bitmap)
295 dev->dma_mem->virt_base = mem_base;
296 dev->dma_mem->device_base = device_addr;
297 dev->dma_mem->size = pages;
298 dev->dma_mem->flags = flags;
300 if (flags & DMA_MEMORY_MAP)
301 return DMA_MEMORY_MAP;
303 return DMA_MEMORY_IO;
306 kfree(dev->dma_mem->bitmap);
312 EXPORT_SYMBOL(dma_declare_coherent_memory);
314 void dma_release_declared_memory(struct device *dev)
316 struct dma_coherent_mem *mem = dev->dma_mem;
321 iounmap(mem->virt_base);
325 EXPORT_SYMBOL(dma_release_declared_memory);
327 void *dma_mark_declared_memory_occupied(struct device *dev,
328 dma_addr_t device_addr, size_t size)
330 struct dma_coherent_mem *mem = dev->dma_mem;
331 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
335 return ERR_PTR(-EINVAL);
337 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
338 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
341 return mem->virt_base + (pos << PAGE_SHIFT);
343 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
344 #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
347 dma_map_single(struct device *dev, void *ptr, size_t size,
348 enum dma_data_direction direction)
352 BUG_ON(!valid_dma_direction(direction));
356 dma = swiotlb_map_single(dev, ptr, size, direction);
358 dma = virt_to_bus(ptr);
359 IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
360 IOMMU_BUG_ON(address_needs_mapping(dev, dma));
363 flush_write_buffers();
366 EXPORT_SYMBOL(dma_map_single);
369 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
370 enum dma_data_direction direction)
372 BUG_ON(!valid_dma_direction(direction));
374 swiotlb_unmap_single(dev, dma_addr, size, direction);
376 EXPORT_SYMBOL(dma_unmap_single);
379 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
380 enum dma_data_direction direction)
383 swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
385 EXPORT_SYMBOL(dma_sync_single_for_cpu);
388 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
389 enum dma_data_direction direction)
392 swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
394 EXPORT_SYMBOL(dma_sync_single_for_device);