2 * Dynamic DMA mapping support.
4 * This implementation is for IA-64 platforms that do not support
5 * I/O TLBs (aka DMA address translation hardware).
6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
8 * Copyright (C) 2000, 2003 Hewlett-Packard Co
9 * David Mosberger-Tang <davidm@hpl.hp.com>
11 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
12 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
13 * unnecessary i-cache flushing.
14 * 04/07/.. ak Better overflow handling. Assorted fixes.
17 #include <linux/cache.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/spinlock.h>
22 #include <linux/string.h>
23 #include <linux/types.h>
24 #include <linux/ctype.h>
30 #include <linux/init.h>
31 #include <linux/bootmem.h>
33 #define OFFSET(val,align) ((unsigned long) \
34 ( (val) & ( (align) - 1)))
36 #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
37 #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
40 * Maximum allowable number of contiguous slabs to map,
41 * must be a power of 2. What is the appropriate value ?
42 * The complexity of {map,unmap}_single is linearly dependent on this value.
44 #define IO_TLB_SEGSIZE 128
47 * log of the size of each IO TLB slab. The number of slabs is command line controllable.
49 #define IO_TLB_SHIFT 11
54 * Used to do a quick range check in swiotlb_unmap_single and swiotlb_sync_single_*, to see
55 * if the memory was in fact allocated by this API.
57 static char *io_tlb_start, *io_tlb_end;
60 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and io_tlb_end.
61 * This is command line adjustable via setup_io_tlb_npages.
64 static unsigned long io_tlb_nslabs = 32768;
67 * When the IOMMU overflows we return a fallback buffer. This sets the size.
69 static unsigned long io_tlb_overflow = 32*1024;
71 void *io_tlb_overflow_buffer;
74 * This is a free list describing the number of free entries available from each index
76 static unsigned int *io_tlb_list;
77 static unsigned int io_tlb_index;
80 * We need to save away the original address corresponding to a mapped entry for the sync
83 static unsigned char **io_tlb_orig_addr;
86 * Protect the above data structures in the map and unmap calls
88 static spinlock_t io_tlb_lock = SPIN_LOCK_UNLOCKED;
91 setup_io_tlb_npages (char *str)
94 io_tlb_nslabs = simple_strtoul(str, &str, 0) << (PAGE_SHIFT - IO_TLB_SHIFT);
95 /* avoid tail segment of size < IO_TLB_SEGSIZE */
96 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
100 if (!strcmp(str, "force"))
104 __setup("swiotlb=", setup_io_tlb_npages);
105 /* make io_tlb_overflow tunable too? */
108 * Statically reserve bounce buffer space and initialize bounce buffer data structures for
109 * the software IO TLB used to implement the PCI DMA API.
117 * Get IO TLB memory from the low pages
119 io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
121 panic("Cannot allocate SWIOTLB buffer");
122 io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
125 * Allocate and initialize the free list array. This array is used
126 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
127 * between io_tlb_start and io_tlb_end.
129 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
130 for (i = 0; i < io_tlb_nslabs; i++)
131 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
133 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
136 * Get the overflow emergency buffer
138 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
139 printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
140 virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
143 static inline int address_needs_mapping(struct device *hwdev, dma_addr_t addr)
145 dma_addr_t mask = 0xffffffff;
146 if (hwdev && hwdev->dma_mask)
147 mask = *hwdev->dma_mask;
148 return (addr & ~mask) != 0;
152 * Allocates bounce buffer and returns its kernel virtual address.
155 map_single (struct device *hwdev, char *buffer, size_t size, int dir)
159 unsigned int nslots, stride, index, wrap;
163 * For mappings greater than a page size, we limit the stride (and hence alignment)
166 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
167 if (size > (1 << PAGE_SHIFT))
168 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
176 * Find suitable number of IO TLB entries size that will fit this request and
177 * allocate a buffer from that IO TLB pool.
179 spin_lock_irqsave(&io_tlb_lock, flags);
181 wrap = index = ALIGN(io_tlb_index, stride);
183 if (index >= io_tlb_nslabs)
188 * If we find a slot that indicates we have 'nslots' number of
189 * contiguous buffers, we allocate the buffers from that slot and
190 * mark the entries as '0' indicating unavailable.
192 if (io_tlb_list[index] >= nslots) {
195 for (i = index; i < (int) (index + nslots); i++)
197 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1)
198 && io_tlb_list[i]; i--)
199 io_tlb_list[i] = ++count;
200 dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
203 * Update the indices to avoid searching in the next round.
205 io_tlb_index = ((index + nslots) < io_tlb_nslabs
206 ? (index + nslots) : 0);
211 if (index >= io_tlb_nslabs)
213 } while (index != wrap);
215 spin_unlock_irqrestore(&io_tlb_lock, flags);
219 spin_unlock_irqrestore(&io_tlb_lock, flags);
222 * Save away the mapping from the original address to the DMA address. This is
223 * needed when we sync the memory. Then we sync the buffer if needed.
225 io_tlb_orig_addr[index] = buffer;
226 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
227 memcpy(dma_addr, buffer, size);
233 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
236 unmap_single (struct device *hwdev, char *dma_addr, size_t size, int dir)
239 int i, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
240 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
241 char *buffer = io_tlb_orig_addr[index];
244 * First, sync the memory before unmapping the entry
246 if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
248 * bounce... copy the data back into the original buffer * and delete the
251 memcpy(buffer, dma_addr, size);
254 * Return the buffer to the free list by setting the corresponding entries to
255 * indicate the number of contigous entries available. While returning the
256 * entries to the free list, we merge the entries with slots below and above the
257 * pool being returned.
259 spin_lock_irqsave(&io_tlb_lock, flags);
261 int count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
262 io_tlb_list[index + nslots] : 0);
264 * Step 1: return the slots to the free list, merging the slots with
267 for (i = index + nslots - 1; i >= index; i--)
268 io_tlb_list[i] = ++count;
270 * Step 2: merge the returned slots with the preceding slots, if
271 * available (non zero)
273 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) &&
275 io_tlb_list[i] = ++count;
277 spin_unlock_irqrestore(&io_tlb_lock, flags);
281 sync_single (struct device *hwdev, char *dma_addr, size_t size, int dir)
283 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
284 char *buffer = io_tlb_orig_addr[index];
287 * bounce... copy the data back into/from the original buffer
288 * XXX How do you handle DMA_BIDIRECTIONAL here ?
290 if (dir == DMA_FROM_DEVICE)
291 memcpy(buffer, dma_addr, size);
292 else if (dir == DMA_TO_DEVICE)
293 memcpy(dma_addr, buffer, size);
299 swiotlb_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handle, int flags)
301 unsigned long dev_addr;
304 /* XXX fix me: the DMA API should pass us an explicit DMA mask instead: */
307 ret = (void *)__get_free_pages(flags, get_order(size));
311 memset(ret, 0, size);
312 dev_addr = virt_to_phys(ret);
313 if (address_needs_mapping(hwdev,dev_addr))
314 panic("swiotlb_alloc_consistent: allocated memory is out of range for device");
315 *dma_handle = dev_addr;
320 swiotlb_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
322 free_pages((unsigned long) vaddr, get_order(size));
325 static void swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
328 * Ran out of IOMMU space for this operation. This is very bad.
329 * Unfortunately the drivers cannot handle this operation properly.
330 * unless they check for pci_dma_mapping_error (most don't)
331 * When the mapping is small enough return a static buffer to limit
332 * the damage, or panic when the transfer is too big.
336 "PCI-DMA: Out of SW-IOMMU space for %lu bytes at device %s\n",
337 size, dev ? dev->bus_id : "?");
339 if (size > io_tlb_overflow && do_panic) {
340 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
341 panic("PCI-DMA: Memory would be corrupted\n");
342 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
343 panic("PCI-DMA: Random memory would be DMAed\n");
348 * Map a single buffer of the indicated size for DMA in streaming mode. The PCI address
349 * to use is returned.
351 * Once the device is given the dma address, the device owns this memory until either
352 * swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
355 swiotlb_map_single (struct device *hwdev, void *ptr, size_t size, int dir)
357 unsigned long dev_addr = virt_to_phys(ptr);
363 * Check if the PCI device can DMA to ptr... if so, just return ptr
365 if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
367 * Device is bit capable of DMA'ing to the buffer... just return the PCI
373 * get a bounce buffer:
375 map = map_single(hwdev, ptr, size, dir);
377 swiotlb_full(hwdev, size, dir, 1);
378 map = io_tlb_overflow_buffer;
381 dev_addr = virt_to_phys(map);
384 * Ensure that the address returned is DMA'ble:
386 if (address_needs_mapping(hwdev, dev_addr))
387 panic("map_single: bounce buffer is not DMA'ble");
393 * Since DMA is i-cache coherent, any (complete) pages that were written via
394 * DMA can be marked as "clean" so that update_mmu_cache() doesn't have to
395 * flush them when they get mapped into an executable vm-area.
398 mark_clean (void *addr, size_t size)
400 unsigned long pg_addr, end;
402 pg_addr = PAGE_ALIGN((unsigned long) addr);
403 end = (unsigned long) addr + size;
404 while (pg_addr + PAGE_SIZE <= end) {
405 struct page *page = virt_to_page(pg_addr);
406 set_bit(PG_arch_1, &page->flags);
407 pg_addr += PAGE_SIZE;
412 * Unmap a single streaming mode DMA translation. The dma_addr and size must match what
413 * was provided for in a previous swiotlb_map_single call. All other usages are
416 * After this call, reads by the cpu to the buffer are guaranteed to see whatever the
417 * device wrote there.
420 swiotlb_unmap_single (struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir)
422 char *dma_addr = phys_to_virt(dev_addr);
426 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
427 unmap_single(hwdev, dma_addr, size, dir);
428 else if (dir == DMA_FROM_DEVICE)
429 mark_clean(dma_addr, size);
433 * Make physical memory consistent for a single streaming mode DMA translation after a
436 * If you perform a swiotlb_map_single() but wish to interrogate the buffer using the cpu,
437 * yet do not wish to teardown the PCI dma mapping, you must call this function before
438 * doing so. At the next point you give the PCI dma address back to the card, you must
439 * first perform a swiotlb_dma_sync_for_device, and then the device again owns the buffer
442 swiotlb_sync_single_for_cpu (struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir)
444 char *dma_addr = phys_to_virt(dev_addr);
448 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
449 sync_single(hwdev, dma_addr, size, dir);
450 else if (dir == DMA_FROM_DEVICE)
451 mark_clean(dma_addr, size);
455 swiotlb_sync_single_for_device (struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir)
457 char *dma_addr = phys_to_virt(dev_addr);
461 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
462 sync_single(hwdev, dma_addr, size, dir);
463 else if (dir == DMA_FROM_DEVICE)
464 mark_clean(dma_addr, size);
468 * Map a set of buffers described by scatterlist in streaming mode for DMA. This is the
469 * scatter-gather version of the above swiotlb_map_single interface. Here the scatter
470 * gather list elements are each tagged with the appropriate dma address and length. They
471 * are obtained via sg_dma_{address,length}(SG).
473 * NOTE: An implementation may be able to use a smaller number of
474 * DMA address/length pairs than there are SG table elements.
475 * (for example via virtual mapping capabilities)
476 * The routine returns the number of addr/length pairs actually
477 * used, at most nents.
479 * Device ownership issues as mentioned above for swiotlb_map_single are the same here.
482 swiotlb_map_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int dir)
485 unsigned long dev_addr;
491 for (i = 0; i < nelems; i++, sg++) {
492 addr = SG_ENT_VIRT_ADDRESS(sg);
493 dev_addr = virt_to_phys(addr);
494 if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
495 sg->dma_address = (dma_addr_t) virt_to_phys(map_single(hwdev, addr, sg->length, dir));
496 if (!sg->dma_address) {
497 /* Don't panic here, we expect pci_map_sg users
498 to do proper error handling. */
499 swiotlb_full(hwdev, sg->length, dir, 0);
500 swiotlb_unmap_sg(hwdev, sg - i, i, dir);
501 sg[0].dma_length = 0;
505 sg->dma_address = dev_addr;
506 sg->dma_length = sg->length;
512 * Unmap a set of streaming mode DMA translations. Again, cpu read rules concerning calls
513 * here are the same as for swiotlb_unmap_single() above.
516 swiotlb_unmap_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int dir)
523 for (i = 0; i < nelems; i++, sg++)
524 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
525 unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir);
526 else if (dir == DMA_FROM_DEVICE)
527 mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
531 * Make physical memory consistent for a set of streaming mode DMA translations after a
534 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules and
538 swiotlb_sync_sg_for_cpu (struct device *hwdev, struct scatterlist *sg, int nelems, int dir)
545 for (i = 0; i < nelems; i++, sg++)
546 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
547 sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, dir);
551 swiotlb_sync_sg_for_device (struct device *hwdev, struct scatterlist *sg, int nelems, int dir)
558 for (i = 0; i < nelems; i++, sg++)
559 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
560 sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, dir);
564 swiotlb_dma_mapping_error (dma_addr_t dma_addr)
566 return (dma_addr == virt_to_phys(io_tlb_overflow_buffer));
570 * Return whether the given PCI device DMA address mask can be supported properly. For
571 * example, if your device can only drive the low 24-bits during PCI bus mastering, then
572 * you would pass 0x00ffffff as the mask to this function.
575 swiotlb_dma_supported (struct device *hwdev, u64 mask)
580 EXPORT_SYMBOL(swiotlb_init);
581 EXPORT_SYMBOL(swiotlb_map_single);
582 EXPORT_SYMBOL(swiotlb_unmap_single);
583 EXPORT_SYMBOL(swiotlb_map_sg);
584 EXPORT_SYMBOL(swiotlb_unmap_sg);
585 EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
586 EXPORT_SYMBOL(swiotlb_sync_single_for_device);
587 EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
588 EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
589 EXPORT_SYMBOL(swiotlb_dma_mapping_error);
590 EXPORT_SYMBOL(swiotlb_alloc_coherent);
591 EXPORT_SYMBOL(swiotlb_free_coherent);
592 EXPORT_SYMBOL(swiotlb_dma_supported);