2 * Dynamic DMA mapping support.
4 * This implementation is for IA-64 platforms that do not support
5 * I/O TLBs (aka DMA address translation hardware).
6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
8 * Copyright (C) 2000, 2003 Hewlett-Packard Co
9 * David Mosberger-Tang <davidm@hpl.hp.com>
11 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
12 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
13 * unnecessary i-cache flushing.
16 #include <linux/cache.h>
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/spinlock.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
28 #include <linux/init.h>
29 #include <linux/bootmem.h>
31 #define OFFSET(val,align) ((unsigned long) \
32 ( (val) & ( (align) - 1)))
34 #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
35 #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
38 * Maximum allowable number of contiguous slabs to map,
39 * must be a power of 2. What is the appropriate value ?
40 * The complexity of {map,unmap}_single is linearly dependent on this value.
42 #define IO_TLB_SEGSIZE 128
45 * log of the size of each IO TLB slab. The number of slabs is command line controllable.
47 #define IO_TLB_SHIFT 11
50 * Used to do a quick range check in swiotlb_unmap_single and swiotlb_sync_single_*, to see
51 * if the memory was in fact allocated by this API.
53 static char *io_tlb_start, *io_tlb_end;
56 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and io_tlb_end.
57 * This is command line adjustable via setup_io_tlb_npages.
59 static unsigned long io_tlb_nslabs = 1024;
62 * This is a free list describing the number of free entries available from each index
64 static unsigned int *io_tlb_list;
65 static unsigned int io_tlb_index;
68 * We need to save away the original address corresponding to a mapped entry for the sync
71 static unsigned char **io_tlb_orig_addr;
74 * Protect the above data structures in the map and unmap calls
76 static spinlock_t io_tlb_lock = SPIN_LOCK_UNLOCKED;
79 setup_io_tlb_npages (char *str)
81 io_tlb_nslabs = simple_strtoul(str, NULL, 0) << (PAGE_SHIFT - IO_TLB_SHIFT);
83 /* avoid tail segment of size < IO_TLB_SEGSIZE */
84 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
88 __setup("swiotlb=", setup_io_tlb_npages);
92 * Statically reserve bounce buffer space and initialize bounce buffer data structures for
93 * the software IO TLB used to implement the PCI DMA API.
101 * Get IO TLB memory from the low pages
103 io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
106 io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
109 * Allocate and initialize the free list array. This array is used
110 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
111 * between io_tlb_start and io_tlb_end.
113 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
114 for (i = 0; i < io_tlb_nslabs; i++)
115 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
117 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
119 printk(KERN_INFO "Placing software IO TLB between 0x%p - 0x%p\n",
120 (void *) io_tlb_start, (void *) io_tlb_end);
124 * Allocates bounce buffer and returns its kernel virtual address.
127 map_single (struct device *hwdev, char *buffer, size_t size, int dir)
131 unsigned int nslots, stride, index, wrap;
135 * For mappings greater than a page size, we limit the stride (and hence alignment)
138 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
139 if (size > (1 << PAGE_SHIFT))
140 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
148 * Find suitable number of IO TLB entries size that will fit this request and
149 * allocate a buffer from that IO TLB pool.
151 spin_lock_irqsave(&io_tlb_lock, flags);
153 wrap = index = ALIGN(io_tlb_index, stride);
155 if (index >= io_tlb_nslabs)
160 * If we find a slot that indicates we have 'nslots' number of
161 * contiguous buffers, we allocate the buffers from that slot and
162 * mark the entries as '0' indicating unavailable.
164 if (io_tlb_list[index] >= nslots) {
167 for (i = index; i < (int) (index + nslots); i++)
169 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1)
170 && io_tlb_list[i]; i--)
171 io_tlb_list[i] = ++count;
172 dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
175 * Update the indices to avoid searching in the next round.
177 io_tlb_index = ((index + nslots) < io_tlb_nslabs
178 ? (index + nslots) : 0);
183 if (index >= io_tlb_nslabs)
185 } while (index != wrap);
188 * XXX What is a suitable recovery mechanism here? We cannot
189 * sleep because we are called from with in interrupts!
191 panic("map_single: could not allocate software IO TLB (%ld bytes)", size);
194 spin_unlock_irqrestore(&io_tlb_lock, flags);
197 * Save away the mapping from the original address to the DMA address. This is
198 * needed when we sync the memory. Then we sync the buffer if needed.
200 io_tlb_orig_addr[index] = buffer;
201 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
202 memcpy(dma_addr, buffer, size);
208 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
211 unmap_single (struct device *hwdev, char *dma_addr, size_t size, int dir)
214 int i, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
215 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
216 char *buffer = io_tlb_orig_addr[index];
219 * First, sync the memory before unmapping the entry
221 if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
223 * bounce... copy the data back into the original buffer * and delete the
226 memcpy(buffer, dma_addr, size);
229 * Return the buffer to the free list by setting the corresponding entries to
230 * indicate the number of contigous entries available. While returning the
231 * entries to the free list, we merge the entries with slots below and above the
232 * pool being returned.
234 spin_lock_irqsave(&io_tlb_lock, flags);
236 int count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
237 io_tlb_list[index + nslots] : 0);
239 * Step 1: return the slots to the free list, merging the slots with
242 for (i = index + nslots - 1; i >= index; i--)
243 io_tlb_list[i] = ++count;
245 * Step 2: merge the returned slots with the preceding slots, if
246 * available (non zero)
248 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) &&
250 io_tlb_list[i] = ++count;
252 spin_unlock_irqrestore(&io_tlb_lock, flags);
256 sync_single (struct device *hwdev, char *dma_addr, size_t size, int dir)
258 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
259 char *buffer = io_tlb_orig_addr[index];
262 * bounce... copy the data back into/from the original buffer
263 * XXX How do you handle DMA_BIDIRECTIONAL here ?
265 if (dir == DMA_FROM_DEVICE)
266 memcpy(buffer, dma_addr, size);
267 else if (dir == DMA_TO_DEVICE)
268 memcpy(dma_addr, buffer, size);
274 swiotlb_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handle, int flags)
276 unsigned long dev_addr;
279 /* XXX fix me: the DMA API should pass us an explicit DMA mask instead: */
282 ret = (void *)__get_free_pages(flags, get_order(size));
286 memset(ret, 0, size);
287 dev_addr = virt_to_phys(ret);
288 if (hwdev && hwdev->dma_mask && (dev_addr & ~*hwdev->dma_mask) != 0)
289 panic("swiotlb_alloc_consistent: allocated memory is out of range for device");
290 *dma_handle = dev_addr;
295 swiotlb_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
297 free_pages((unsigned long) vaddr, get_order(size));
301 * Map a single buffer of the indicated size for DMA in streaming mode. The PCI address
302 * to use is returned.
304 * Once the device is given the dma address, the device owns this memory until either
305 * swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
308 swiotlb_map_single (struct device *hwdev, void *ptr, size_t size, int dir)
310 unsigned long dev_addr = virt_to_phys(ptr);
315 * Check if the PCI device can DMA to ptr... if so, just return ptr
317 if (hwdev && hwdev->dma_mask && (dev_addr & ~*hwdev->dma_mask) == 0)
319 * Device is bit capable of DMA'ing to the buffer... just return the PCI
325 * get a bounce buffer:
327 dev_addr = virt_to_phys(map_single(hwdev, ptr, size, dir));
330 * Ensure that the address returned is DMA'ble:
332 if (hwdev && hwdev->dma_mask && (dev_addr & ~*hwdev->dma_mask) != 0)
333 panic("map_single: bounce buffer is not DMA'ble");
339 * Since DMA is i-cache coherent, any (complete) pages that were written via
340 * DMA can be marked as "clean" so that update_mmu_cache() doesn't have to
341 * flush them when they get mapped into an executable vm-area.
344 mark_clean (void *addr, size_t size)
346 unsigned long pg_addr, end;
348 pg_addr = PAGE_ALIGN((unsigned long) addr);
349 end = (unsigned long) addr + size;
350 while (pg_addr + PAGE_SIZE <= end) {
351 struct page *page = virt_to_page(pg_addr);
352 set_bit(PG_arch_1, &page->flags);
353 pg_addr += PAGE_SIZE;
358 * Unmap a single streaming mode DMA translation. The dma_addr and size must match what
359 * was provided for in a previous swiotlb_map_single call. All other usages are
362 * After this call, reads by the cpu to the buffer are guaranteed to see whatever the
363 * device wrote there.
366 swiotlb_unmap_single (struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir)
368 char *dma_addr = phys_to_virt(dev_addr);
372 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
373 unmap_single(hwdev, dma_addr, size, dir);
374 else if (dir == DMA_FROM_DEVICE)
375 mark_clean(dma_addr, size);
379 * Make physical memory consistent for a single streaming mode DMA translation after a
382 * If you perform a swiotlb_map_single() but wish to interrogate the buffer using the cpu,
383 * yet do not wish to teardown the PCI dma mapping, you must call this function before
384 * doing so. At the next point you give the PCI dma address back to the card, you must
385 * first perform a swiotlb_dma_sync_for_device, and then the device again owns the buffer
388 swiotlb_sync_single_for_cpu (struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir)
390 char *dma_addr = phys_to_virt(dev_addr);
394 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
395 sync_single(hwdev, dma_addr, size, dir);
396 else if (dir == DMA_FROM_DEVICE)
397 mark_clean(dma_addr, size);
401 swiotlb_sync_single_for_device (struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir)
403 char *dma_addr = phys_to_virt(dev_addr);
407 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
408 sync_single(hwdev, dma_addr, size, dir);
409 else if (dir == DMA_FROM_DEVICE)
410 mark_clean(dma_addr, size);
414 * Map a set of buffers described by scatterlist in streaming mode for DMA. This is the
415 * scatter-gather version of the above swiotlb_map_single interface. Here the scatter
416 * gather list elements are each tagged with the appropriate dma address and length. They
417 * are obtained via sg_dma_{address,length}(SG).
419 * NOTE: An implementation may be able to use a smaller number of
420 * DMA address/length pairs than there are SG table elements.
421 * (for example via virtual mapping capabilities)
422 * The routine returns the number of addr/length pairs actually
423 * used, at most nents.
425 * Device ownership issues as mentioned above for swiotlb_map_single are the same here.
428 swiotlb_map_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int dir)
431 unsigned long dev_addr;
437 for (i = 0; i < nelems; i++, sg++) {
438 addr = SG_ENT_VIRT_ADDRESS(sg);
439 dev_addr = virt_to_phys(addr);
440 if (hwdev && hwdev->dma_mask && (dev_addr & ~*hwdev->dma_mask) != 0)
441 sg->dma_address = (dma_addr_t) map_single(hwdev, addr, sg->length, dir);
443 sg->dma_address = dev_addr;
444 sg->dma_length = sg->length;
450 * Unmap a set of streaming mode DMA translations. Again, cpu read rules concerning calls
451 * here are the same as for swiotlb_unmap_single() above.
454 swiotlb_unmap_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int dir)
461 for (i = 0; i < nelems; i++, sg++)
462 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
463 unmap_single(hwdev, (void *) sg->dma_address, sg->dma_length, dir);
464 else if (dir == DMA_FROM_DEVICE)
465 mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
469 * Make physical memory consistent for a set of streaming mode DMA translations after a
472 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules and
476 swiotlb_sync_sg_for_cpu (struct device *hwdev, struct scatterlist *sg, int nelems, int dir)
483 for (i = 0; i < nelems; i++, sg++)
484 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
485 sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, dir);
489 swiotlb_sync_sg_for_device (struct device *hwdev, struct scatterlist *sg, int nelems, int dir)
496 for (i = 0; i < nelems; i++, sg++)
497 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
498 sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, dir);
502 swiotlb_dma_mapping_error (dma_addr_t dma_addr)
508 * Return whether the given PCI device DMA address mask can be supported properly. For
509 * example, if your device can only drive the low 24-bits during PCI bus mastering, then
510 * you would pass 0x00ffffff as the mask to this function.
513 swiotlb_dma_supported (struct device *hwdev, u64 mask)
518 EXPORT_SYMBOL(swiotlb_init);
519 EXPORT_SYMBOL(swiotlb_map_single);
520 EXPORT_SYMBOL(swiotlb_unmap_single);
521 EXPORT_SYMBOL(swiotlb_map_sg);
522 EXPORT_SYMBOL(swiotlb_unmap_sg);
523 EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
524 EXPORT_SYMBOL(swiotlb_sync_single_for_device);
525 EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
526 EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
527 EXPORT_SYMBOL(swiotlb_dma_mapping_error);
528 EXPORT_SYMBOL(swiotlb_alloc_coherent);
529 EXPORT_SYMBOL(swiotlb_free_coherent);
530 EXPORT_SYMBOL(swiotlb_dma_supported);