2 * Dynamic DMA mapping support.
4 * This implementation is a fallback for platforms that do not support
5 * I/O TLBs (aka DMA address translation hardware).
6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
8 * Copyright (C) 2000, 2003 Hewlett-Packard Co
9 * David Mosberger-Tang <davidm@hpl.hp.com>
10 * Copyright (C) 2005 Keir Fraser <keir@xensource.com>
13 #include <linux/cache.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/ctype.h>
21 #include <linux/init.h>
22 #include <linux/bootmem.h>
23 #include <linux/highmem.h>
27 #include <asm/uaccess.h>
28 #include <xen/interface/memory.h>
31 EXPORT_SYMBOL(swiotlb);
33 #define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
35 #define SG_ENT_PHYS_ADDRESS(sg) (page_to_bus((sg)->page) + (sg)->offset)
38 * Maximum allowable number of contiguous slabs to map,
39 * must be a power of 2. What is the appropriate value ?
40 * The complexity of {map,unmap}_single is linearly dependent on this value.
42 #define IO_TLB_SEGSIZE 128
45 * log of the size of each IO TLB slab. The number of slabs is command line
48 #define IO_TLB_SHIFT 11
50 /* Width of DMA addresses in the IO TLB. 31 bits is an aacraid limitation. */
51 #define IO_TLB_DMA_BITS 31
54 static char *iotlb_virt_start;
55 static unsigned long iotlb_nslabs;
58 * Used to do a quick range check in swiotlb_unmap_single and
59 * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
62 static unsigned long iotlb_pfn_start, iotlb_pfn_end;
64 /* Does the given dma address reside within the swiotlb aperture? */
65 static inline int in_swiotlb_aperture(dma_addr_t dev_addr)
67 unsigned long pfn = mfn_to_local_pfn(dev_addr >> PAGE_SHIFT);
68 return (pfn_valid(pfn)
69 && (pfn >= iotlb_pfn_start)
70 && (pfn < iotlb_pfn_end));
74 * When the IOMMU overflows we return a fallback buffer. This sets the size.
76 static unsigned long io_tlb_overflow = 32*1024;
78 void *io_tlb_overflow_buffer;
81 * This is a free list describing the number of free entries available from
84 static unsigned int *io_tlb_list;
85 static unsigned int io_tlb_index;
88 * We need to save away the original address corresponding to a mapped entry
89 * for the sync operations.
91 static struct phys_addr {
97 * Protect the above data structures in the map and unmap calls
99 static DEFINE_SPINLOCK(io_tlb_lock);
102 setup_io_tlb_npages(char *str)
104 /* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */
106 iotlb_nslabs = simple_strtoul(str, &str, 0) <<
108 iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
109 /* Round up to power of two (xen_create_contiguous_region). */
110 while (iotlb_nslabs & (iotlb_nslabs-1))
111 iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
116 * NB. 'force' enables the swiotlb, but doesn't force its use for
117 * every DMA like it does on native Linux. 'off' forcibly disables
118 * use of the swiotlb.
120 if (!strcmp(str, "force"))
122 else if (!strcmp(str, "off"))
126 __setup("swiotlb=", setup_io_tlb_npages);
127 /* make io_tlb_overflow tunable too? */
130 * Statically reserve bounce buffer space and initialize bounce buffer data
131 * structures for the software IO TLB used to implement the PCI DMA API.
134 swiotlb_init_with_default_size (size_t default_size)
136 unsigned long i, bytes;
139 iotlb_nslabs = (default_size >> IO_TLB_SHIFT);
140 iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
141 /* Round up to power of two (xen_create_contiguous_region). */
142 while (iotlb_nslabs & (iotlb_nslabs-1))
143 iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
146 bytes = iotlb_nslabs * (1UL << IO_TLB_SHIFT);
149 * Get IO TLB memory from the low pages
151 iotlb_virt_start = alloc_bootmem_low_pages(bytes);
152 if (!iotlb_virt_start)
153 panic("Cannot allocate SWIOTLB buffer!\n"
154 "Use dom0_mem Xen boot parameter to reserve\n"
155 "some DMA memory (e.g., dom0_mem=-128M).\n");
157 for (i = 0; i < iotlb_nslabs; i += IO_TLB_SEGSIZE) {
158 int rc = xen_create_contiguous_region(
159 (unsigned long)iotlb_virt_start + (i << IO_TLB_SHIFT),
160 get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT),
166 * Allocate and initialize the free list array. This array is used
167 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE.
169 io_tlb_list = alloc_bootmem(iotlb_nslabs * sizeof(int));
170 for (i = 0; i < iotlb_nslabs; i++)
171 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
173 io_tlb_orig_addr = alloc_bootmem(
174 iotlb_nslabs * sizeof(*io_tlb_orig_addr));
177 * Get the overflow emergency buffer
179 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
181 iotlb_pfn_start = __pa(iotlb_virt_start) >> PAGE_SHIFT;
182 iotlb_pfn_end = iotlb_pfn_start + (bytes >> PAGE_SHIFT);
184 printk(KERN_INFO "Software IO TLB enabled: \n"
185 " Aperture: %lu megabytes\n"
186 " Kernel range: 0x%016lx - 0x%016lx\n",
188 (unsigned long)iotlb_virt_start,
189 (unsigned long)iotlb_virt_start + bytes);
196 size_t defsz = 64 * (1 << 20); /* 64MB default size */
198 if (swiotlb_force == 1) {
200 } else if ((swiotlb_force != -1) &&
201 is_running_on_xen() &&
202 is_initial_xendomain()) {
203 /* Domain 0 always has a swiotlb. */
204 ram_end = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
205 if (ram_end <= 0x7ffff)
206 defsz = 2 * (1 << 20); /* 2MB on <2GB on systems. */
211 swiotlb_init_with_default_size(defsz);
213 printk(KERN_INFO "Software IO TLB disabled\n");
217 * We use __copy_to_user_inatomic to transfer to the host buffer because the
218 * buffer may be mapped read-only (e.g, in blkback driver) but lower-level
219 * drivers map the buffer for DMA_BIDIRECTIONAL access. This causes an
220 * unnecessary copy from the aperture to the host buffer, and a page fault.
223 __sync_single(struct phys_addr buffer, char *dma_addr, size_t size, int dir)
225 if (PageHighMem(buffer.page)) {
227 char *dev, *host, *kmp;
230 if (((bytes = len) + buffer.offset) > PAGE_SIZE)
231 bytes = PAGE_SIZE - buffer.offset;
232 kmp = kmap_atomic(buffer.page, KM_SWIOTLB);
233 dev = dma_addr + size - len;
234 host = kmp + buffer.offset;
235 if (dir == DMA_FROM_DEVICE) {
236 if (__copy_to_user_inatomic(host, dev, bytes))
239 memcpy(dev, host, bytes);
240 kunmap_atomic(kmp, KM_SWIOTLB);
246 char *host = (char *)phys_to_virt(
247 page_to_pseudophys(buffer.page)) + buffer.offset;
248 if (dir == DMA_FROM_DEVICE) {
249 if (__copy_to_user_inatomic(host, dma_addr, size))
251 } else if (dir == DMA_TO_DEVICE)
252 memcpy(dma_addr, host, size);
257 * Allocates bounce buffer and returns its kernel virtual address.
260 map_single(struct device *hwdev, struct phys_addr buffer, size_t size, int dir)
264 unsigned int nslots, stride, index, wrap;
268 * For mappings greater than a page, we limit the stride (and
269 * hence alignment) to a page size.
271 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
272 if (size > PAGE_SIZE)
273 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
280 * Find suitable number of IO TLB entries size that will fit this
281 * request and allocate a buffer from that IO TLB pool.
283 spin_lock_irqsave(&io_tlb_lock, flags);
285 wrap = index = ALIGN(io_tlb_index, stride);
287 if (index >= iotlb_nslabs)
292 * If we find a slot that indicates we have 'nslots'
293 * number of contiguous buffers, we allocate the
294 * buffers from that slot and mark the entries as '0'
295 * indicating unavailable.
297 if (io_tlb_list[index] >= nslots) {
300 for (i = index; i < (int)(index + nslots); i++)
303 (OFFSET(i, IO_TLB_SEGSIZE) !=
304 IO_TLB_SEGSIZE -1) && io_tlb_list[i];
306 io_tlb_list[i] = ++count;
307 dma_addr = iotlb_virt_start +
308 (index << IO_TLB_SHIFT);
311 * Update the indices to avoid searching in
315 ((index + nslots) < iotlb_nslabs
316 ? (index + nslots) : 0);
321 if (index >= iotlb_nslabs)
323 } while (index != wrap);
325 spin_unlock_irqrestore(&io_tlb_lock, flags);
329 spin_unlock_irqrestore(&io_tlb_lock, flags);
332 * Save away the mapping from the original address to the DMA address.
333 * This is needed when we sync the memory. Then we sync the buffer if
336 io_tlb_orig_addr[index] = buffer;
337 if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
338 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
344 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
347 unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
350 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
351 int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
352 struct phys_addr buffer = io_tlb_orig_addr[index];
355 * First, sync the memory before unmapping the entry
357 if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
358 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
361 * Return the buffer to the free list by setting the corresponding
362 * entries to indicate the number of contigous entries available.
363 * While returning the entries to the free list, we merge the entries
364 * with slots below and above the pool being returned.
366 spin_lock_irqsave(&io_tlb_lock, flags);
368 count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
369 io_tlb_list[index + nslots] : 0);
371 * Step 1: return the slots to the free list, merging the
372 * slots with superceeding slots
374 for (i = index + nslots - 1; i >= index; i--)
375 io_tlb_list[i] = ++count;
377 * Step 2: merge the returned slots with the preceding slots,
378 * if available (non zero)
381 (OFFSET(i, IO_TLB_SEGSIZE) !=
382 IO_TLB_SEGSIZE -1) && io_tlb_list[i];
384 io_tlb_list[i] = ++count;
386 spin_unlock_irqrestore(&io_tlb_lock, flags);
390 sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
392 int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
393 struct phys_addr buffer = io_tlb_orig_addr[index];
394 BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
395 __sync_single(buffer, dma_addr, size, dir);
399 swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
402 * Ran out of IOMMU space for this operation. This is very bad.
403 * Unfortunately the drivers cannot handle this operation properly.
404 * unless they check for pci_dma_mapping_error (most don't)
405 * When the mapping is small enough return a static buffer to limit
406 * the damage, or panic when the transfer is too big.
408 printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
409 "device %s\n", (unsigned long)size, dev ? dev->bus_id : "?");
411 if (size > io_tlb_overflow && do_panic) {
412 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
413 panic("PCI-DMA: Memory would be corrupted\n");
414 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
415 panic("PCI-DMA: Random memory would be DMAed\n");
420 * Map a single buffer of the indicated size for DMA in streaming mode. The
421 * PCI address to use is returned.
423 * Once the device is given the dma address, the device owns this memory until
424 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
427 swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
429 dma_addr_t dev_addr = virt_to_bus(ptr);
431 struct phys_addr buffer;
433 BUG_ON(dir == DMA_NONE);
436 * If the pointer passed in happens to be in the device's DMA window,
437 * we can safely return the device addr and not worry about bounce
440 if (!range_straddles_page_boundary(ptr, size) &&
441 !address_needs_mapping(hwdev, dev_addr))
445 * Oh well, have to allocate and map a bounce buffer.
447 buffer.page = virt_to_page(ptr);
448 buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
449 map = map_single(hwdev, buffer, size, dir);
451 swiotlb_full(hwdev, size, dir, 1);
452 map = io_tlb_overflow_buffer;
455 dev_addr = virt_to_bus(map);
460 * Unmap a single streaming mode DMA translation. The dma_addr and size must
461 * match what was provided for in a previous swiotlb_map_single call. All
462 * other usages are undefined.
464 * After this call, reads by the cpu to the buffer are guaranteed to see
465 * whatever the device wrote there.
468 swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
471 BUG_ON(dir == DMA_NONE);
472 if (in_swiotlb_aperture(dev_addr))
473 unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
477 * Make physical memory consistent for a single streaming mode DMA translation
480 * If you perform a swiotlb_map_single() but wish to interrogate the buffer
481 * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
482 * call this function before doing so. At the next point you give the PCI dma
483 * address back to the card, you must first perform a
484 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
487 swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
488 size_t size, int dir)
490 BUG_ON(dir == DMA_NONE);
491 if (in_swiotlb_aperture(dev_addr))
492 sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
496 swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
497 size_t size, int dir)
499 BUG_ON(dir == DMA_NONE);
500 if (in_swiotlb_aperture(dev_addr))
501 sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
505 * Map a set of buffers described by scatterlist in streaming mode for DMA.
506 * This is the scatter-gather version of the above swiotlb_map_single
507 * interface. Here the scatter gather list elements are each tagged with the
508 * appropriate dma address and length. They are obtained via
509 * sg_dma_{address,length}(SG).
511 * NOTE: An implementation may be able to use a smaller number of
512 * DMA address/length pairs than there are SG table elements.
513 * (for example via virtual mapping capabilities)
514 * The routine returns the number of addr/length pairs actually
515 * used, at most nents.
517 * Device ownership issues as mentioned above for swiotlb_map_single are the
521 swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
524 struct phys_addr buffer;
529 BUG_ON(dir == DMA_NONE);
531 for (i = 0; i < nelems; i++, sg++) {
532 dev_addr = SG_ENT_PHYS_ADDRESS(sg);
533 if (address_needs_mapping(hwdev, dev_addr)) {
534 buffer.page = sg->page;
535 buffer.offset = sg->offset;
536 map = map_single(hwdev, buffer, sg->length, dir);
538 /* Don't panic here, we expect map_sg users
539 to do proper error handling. */
540 swiotlb_full(hwdev, sg->length, dir, 0);
541 swiotlb_unmap_sg(hwdev, sg - i, i, dir);
542 sg[0].dma_length = 0;
545 sg->dma_address = (dma_addr_t)virt_to_bus(map);
547 sg->dma_address = dev_addr;
548 sg->dma_length = sg->length;
554 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
555 * concerning calls here are the same as for swiotlb_unmap_single() above.
558 swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
563 BUG_ON(dir == DMA_NONE);
565 for (i = 0; i < nelems; i++, sg++)
566 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
568 (void *)bus_to_virt(sg->dma_address),
569 sg->dma_length, dir);
573 * Make physical memory consistent for a set of streaming mode DMA translations
576 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
580 swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
585 BUG_ON(dir == DMA_NONE);
587 for (i = 0; i < nelems; i++, sg++)
588 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
590 (void *)bus_to_virt(sg->dma_address),
591 sg->dma_length, dir);
595 swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
600 BUG_ON(dir == DMA_NONE);
602 for (i = 0; i < nelems; i++, sg++)
603 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
605 (void *)bus_to_virt(sg->dma_address),
606 sg->dma_length, dir);
610 swiotlb_map_page(struct device *hwdev, struct page *page,
611 unsigned long offset, size_t size,
612 enum dma_data_direction direction)
614 struct phys_addr buffer;
618 dev_addr = page_to_bus(page) + offset;
619 if (address_needs_mapping(hwdev, dev_addr)) {
621 buffer.offset = offset;
622 map = map_single(hwdev, buffer, size, direction);
624 swiotlb_full(hwdev, size, direction, 1);
625 map = io_tlb_overflow_buffer;
627 dev_addr = (dma_addr_t)virt_to_bus(map);
634 swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
635 size_t size, enum dma_data_direction direction)
637 BUG_ON(direction == DMA_NONE);
638 if (in_swiotlb_aperture(dma_address))
639 unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
643 swiotlb_dma_mapping_error(dma_addr_t dma_addr)
645 return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
649 * Return whether the given PCI device DMA address mask can be supported
650 * properly. For example, if your device can only drive the low 24-bits
651 * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
655 swiotlb_dma_supported (struct device *hwdev, u64 mask)
657 return (mask >= ((1UL << IO_TLB_DMA_BITS) - 1));
660 EXPORT_SYMBOL(swiotlb_init);
661 EXPORT_SYMBOL(swiotlb_map_single);
662 EXPORT_SYMBOL(swiotlb_unmap_single);
663 EXPORT_SYMBOL(swiotlb_map_sg);
664 EXPORT_SYMBOL(swiotlb_unmap_sg);
665 EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
666 EXPORT_SYMBOL(swiotlb_sync_single_for_device);
667 EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
668 EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
669 EXPORT_SYMBOL(swiotlb_map_page);
670 EXPORT_SYMBOL(swiotlb_unmap_page);
671 EXPORT_SYMBOL(swiotlb_dma_mapping_error);
672 EXPORT_SYMBOL(swiotlb_dma_supported);