2 * Dynamic DMA mapping support.
4 * On i386 there is no hardware dynamic DMA address translation,
5 * so consistent alloc/free are merely page allocation/freeing.
6 * The rest of the dynamic DMA mapping interface is implemented
10 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/version.h>
17 #include <xen/balloon.h>
18 #include <asm/tlbflush.h>
19 #include <asm-i386/mach-xen/asm/swiotlb.h>
23 int iommu_merge __read_mostly = 0;
24 EXPORT_SYMBOL(iommu_merge);
26 dma_addr_t bad_dma_address __read_mostly;
27 EXPORT_SYMBOL(bad_dma_address);
29 /* This tells the BIO block layer to assume merging. Default to off
30 because we cannot guarantee merging later. */
31 int iommu_bio_merge __read_mostly = 0;
32 EXPORT_SYMBOL(iommu_bio_merge);
34 int iommu_sac_force __read_mostly = 0;
35 EXPORT_SYMBOL(iommu_sac_force);
37 int no_iommu __read_mostly;
38 #ifdef CONFIG_IOMMU_DEBUG
39 int panic_on_overflow __read_mostly = 1;
40 int force_iommu __read_mostly = 1;
42 int panic_on_overflow __read_mostly = 0;
43 int force_iommu __read_mostly= 0;
46 /* Set this to 1 if there is a HW IOMMU in the system */
47 int iommu_detected __read_mostly = 0;
49 void __init pci_iommu_alloc(void)
52 * The order of these functions is important for
53 * fall-back/fail-over reasons
59 #ifdef CONFIG_CALGARY_IOMMU
60 #include <asm/calgary.h>
69 __init int iommu_setup(char *p)
75 struct dma_coherent_mem {
80 unsigned long *bitmap;
83 #define IOMMU_BUG_ON(test) \
85 if (unlikely(test)) { \
86 printk(KERN_ALERT "Fatal DMA error! " \
87 "Please use 'swiotlb=force'\n"); \
93 dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
94 enum dma_data_direction direction)
98 if (direction == DMA_NONE)
100 WARN_ON(nents == 0 || sg[0].length == 0);
103 rc = swiotlb_map_sg(hwdev, sg, nents, direction);
105 for (i = 0; i < nents; i++ ) {
107 page_to_bus(sg[i].page) + sg[i].offset;
108 sg[i].dma_length = sg[i].length;
110 IOMMU_BUG_ON(address_needs_mapping(
111 hwdev, sg[i].dma_address));
116 flush_write_buffers();
119 EXPORT_SYMBOL(dma_map_sg);
122 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
123 enum dma_data_direction direction)
125 BUG_ON(direction == DMA_NONE);
127 swiotlb_unmap_sg(hwdev, sg, nents, direction);
129 EXPORT_SYMBOL(dma_unmap_sg);
132 * XXX This file is also used by xenLinux/ia64.
133 * "defined(__i386__) || defined (__x86_64__)" means "!defined(__ia64__)".
134 * This #if work around should be removed once this file is merbed back into
135 * i386' pci-dma or is moved to drivers/xen/core.
137 #if defined(__i386__) || defined(__x86_64__)
139 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
140 size_t size, enum dma_data_direction direction)
144 BUG_ON(direction == DMA_NONE);
147 dma_addr = swiotlb_map_page(
148 dev, page, offset, size, direction);
150 dma_addr = page_to_bus(page) + offset;
151 IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
156 EXPORT_SYMBOL(dma_map_page);
159 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
160 enum dma_data_direction direction)
162 BUG_ON(direction == DMA_NONE);
164 swiotlb_unmap_page(dev, dma_address, size, direction);
166 EXPORT_SYMBOL(dma_unmap_page);
167 #endif /* defined(__i386__) || defined(__x86_64__) */
170 dma_mapping_error(dma_addr_t dma_addr)
173 return swiotlb_dma_mapping_error(dma_addr);
176 EXPORT_SYMBOL(dma_mapping_error);
179 dma_supported(struct device *dev, u64 mask)
182 return swiotlb_dma_supported(dev, mask);
184 * By default we'll BUG when an infeasible DMA is requested, and
185 * request swiotlb=force (see IOMMU_BUG_ON).
189 EXPORT_SYMBOL(dma_supported);
191 void *dma_alloc_coherent(struct device *dev, size_t size,
192 dma_addr_t *dma_handle, gfp_t gfp)
195 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
196 unsigned int order = get_order(size);
197 unsigned long vstart;
198 /* ignore region specifiers */
199 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
202 int page = bitmap_find_free_region(mem->bitmap, mem->size,
205 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
206 ret = mem->virt_base + (page << PAGE_SHIFT);
207 memset(ret, 0, size);
210 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
214 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
217 vstart = __get_free_pages(gfp, order);
218 ret = (void *)vstart;
221 /* NB. Hardcode 31 address bits for now: aacraid limitation. */
222 if (xen_create_contiguous_region(vstart, order, 31) != 0) {
223 free_pages(vstart, order);
226 memset(ret, 0, size);
227 *dma_handle = virt_to_bus(ret);
231 EXPORT_SYMBOL(dma_alloc_coherent);
233 void dma_free_coherent(struct device *dev, size_t size,
234 void *vaddr, dma_addr_t dma_handle)
236 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
237 int order = get_order(size);
239 if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
240 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
242 bitmap_release_region(mem->bitmap, page, order);
244 xen_destroy_contiguous_region((unsigned long)vaddr, order);
245 free_pages((unsigned long)vaddr, order);
248 EXPORT_SYMBOL(dma_free_coherent);
250 #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
251 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
252 dma_addr_t device_addr, size_t size, int flags)
254 void __iomem *mem_base;
255 int pages = size >> PAGE_SHIFT;
256 int bitmap_size = (pages + 31)/32;
258 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
265 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
267 mem_base = ioremap(bus_addr, size);
271 dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
274 memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
275 dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
276 if (!dev->dma_mem->bitmap)
278 memset(dev->dma_mem->bitmap, 0, bitmap_size);
280 dev->dma_mem->virt_base = mem_base;
281 dev->dma_mem->device_base = device_addr;
282 dev->dma_mem->size = pages;
283 dev->dma_mem->flags = flags;
285 if (flags & DMA_MEMORY_MAP)
286 return DMA_MEMORY_MAP;
288 return DMA_MEMORY_IO;
291 kfree(dev->dma_mem->bitmap);
295 EXPORT_SYMBOL(dma_declare_coherent_memory);
297 void dma_release_declared_memory(struct device *dev)
299 struct dma_coherent_mem *mem = dev->dma_mem;
304 iounmap(mem->virt_base);
308 EXPORT_SYMBOL(dma_release_declared_memory);
310 void *dma_mark_declared_memory_occupied(struct device *dev,
311 dma_addr_t device_addr, size_t size)
313 struct dma_coherent_mem *mem = dev->dma_mem;
314 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
318 return ERR_PTR(-EINVAL);
320 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
321 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
324 return mem->virt_base + (pos << PAGE_SHIFT);
326 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
327 #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
330 dma_map_single(struct device *dev, void *ptr, size_t size,
331 enum dma_data_direction direction)
335 if (direction == DMA_NONE)
340 dma = swiotlb_map_single(dev, ptr, size, direction);
342 dma = virt_to_bus(ptr);
343 IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
344 IOMMU_BUG_ON(address_needs_mapping(dev, dma));
347 flush_write_buffers();
350 EXPORT_SYMBOL(dma_map_single);
353 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
354 enum dma_data_direction direction)
356 if (direction == DMA_NONE)
359 swiotlb_unmap_single(dev, dma_addr, size, direction);
361 EXPORT_SYMBOL(dma_unmap_single);
364 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
365 enum dma_data_direction direction)
368 swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
370 EXPORT_SYMBOL(dma_sync_single_for_cpu);
373 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
374 enum dma_data_direction direction)
377 swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
379 EXPORT_SYMBOL(dma_sync_single_for_device);