2 * arch/ppc64/kernel/iommu.c
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
5 * Rewrite, cleanup, new allocation schemes, virtual merging:
6 * Copyright (C) 2004 Olof Johansson, IBM Corporation
7 * and Ben. Herrenschmidt, IBM Corporation
9 * Dynamic DMA mapping support, bus-independent parts.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/config.h>
28 #include <linux/init.h>
29 #include <linux/types.h>
30 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/string.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/init.h>
38 #include <asm/iommu.h>
39 #include <asm/pci-bridge.h>
40 #include <asm/machdep.h>
41 #include <asm/bitops.h>
45 #ifdef CONFIG_IOMMU_VMERGE
46 static int novmerge = 0;
48 static int novmerge = 1;
51 static int __init setup_iommu(char *str)
53 if (!strcmp(str, "novmerge"))
55 else if (!strcmp(str, "vmerge"))
60 __setup("iommu=", setup_iommu);
62 static unsigned long iommu_range_alloc(struct iommu_table *tbl, unsigned long npages,
63 unsigned long *handle)
65 unsigned long n, end, i, start;
67 int largealloc = npages > 15;
70 /* This allocator was derived from x86_64's bit string search */
73 if (unlikely(npages) == 0) {
74 if (printk_ratelimit())
76 return DMA_ERROR_CODE;
79 if (handle && *handle)
82 start = largealloc ? tbl->it_largehint : tbl->it_hint;
84 /* Use only half of the table for small allocs (15 pages or less) */
85 limit = largealloc ? tbl->it_mapsize : tbl->it_halfpoint;
87 if (largealloc && start < tbl->it_halfpoint)
88 start = tbl->it_halfpoint;
90 /* The case below can happen if we have a small segment appended
91 * to a large, or when the previous alloc was at the very end of
92 * the available space. If so, go back to the initial start.
95 start = largealloc ? tbl->it_largehint : tbl->it_hint;
99 n = find_next_zero_bit(tbl->it_map, limit, start);
102 if (unlikely(end >= limit)) {
103 if (likely(pass < 2)) {
104 /* First failure, just rescan the half of the table.
105 * Second failure, rescan the other half of the table.
107 start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
108 limit = pass ? tbl->it_mapsize : limit;
112 /* Third failure, give up */
113 return DMA_ERROR_CODE;
117 for (i = n; i < end; i++)
118 if (test_bit(i, tbl->it_map)) {
123 for (i = n; i < end; i++)
124 __set_bit(i, tbl->it_map);
126 /* Bump the hint to a new block for small allocs. */
128 /* Don't bump to new block to avoid fragmentation */
129 tbl->it_largehint = end;
131 /* Overflow will be taken care of at the next allocation */
132 tbl->it_hint = (end + tbl->it_blocksize - 1) &
133 ~(tbl->it_blocksize - 1);
136 /* Update handle for SG allocations */
143 static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
144 unsigned int npages, enum dma_data_direction direction)
146 unsigned long entry, flags;
147 dma_addr_t ret = DMA_ERROR_CODE;
149 spin_lock_irqsave(&(tbl->it_lock), flags);
151 entry = iommu_range_alloc(tbl, npages, NULL);
153 if (unlikely(entry == DMA_ERROR_CODE)) {
154 spin_unlock_irqrestore(&(tbl->it_lock), flags);
155 return DMA_ERROR_CODE;
158 entry += tbl->it_offset; /* Offset into real TCE table */
159 ret = entry << PAGE_SHIFT; /* Set the return dma address */
161 /* Put the TCEs in the HW table */
162 ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & PAGE_MASK,
166 /* Flush/invalidate TLB caches if necessary */
167 if (ppc_md.tce_flush)
168 ppc_md.tce_flush(tbl);
170 spin_unlock_irqrestore(&(tbl->it_lock), flags);
172 /* Make sure updates are seen by hardware */
178 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
181 unsigned long entry, free_entry;
184 entry = dma_addr >> PAGE_SHIFT;
185 free_entry = entry - tbl->it_offset;
187 if (((free_entry + npages) > tbl->it_mapsize) ||
188 (entry < tbl->it_offset)) {
189 if (printk_ratelimit()) {
190 printk(KERN_INFO "iommu_free: invalid entry\n");
191 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
192 printk(KERN_INFO "\tdma_addr = 0x%lx\n", (u64)dma_addr);
193 printk(KERN_INFO "\tTable = 0x%lx\n", (u64)tbl);
194 printk(KERN_INFO "\tbus# = 0x%lx\n", (u64)tbl->it_busno);
195 printk(KERN_INFO "\tmapsize = 0x%lx\n", (u64)tbl->it_mapsize);
196 printk(KERN_INFO "\tstartOff = 0x%lx\n", (u64)tbl->it_offset);
197 printk(KERN_INFO "\tindex = 0x%lx\n", (u64)tbl->it_index);
203 ppc_md.tce_free(tbl, entry, npages);
205 for (i = 0; i < npages; i++)
206 __clear_bit(free_entry+i, tbl->it_map);
209 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
214 spin_lock_irqsave(&(tbl->it_lock), flags);
216 __iommu_free(tbl, dma_addr, npages);
218 /* Make sure TLB cache is flushed if the HW needs it. We do
219 * not do an mb() here on purpose, it is not needed on any of
220 * the current platforms.
222 if (ppc_md.tce_flush)
223 ppc_md.tce_flush(tbl);
225 spin_unlock_irqrestore(&(tbl->it_lock), flags);
228 int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
229 struct scatterlist *sglist, int nelems,
230 enum dma_data_direction direction)
232 dma_addr_t dma_next, dma_addr;
234 struct scatterlist *s, *outs, *segstart;
236 unsigned long handle;
238 BUG_ON(direction == DMA_NONE);
240 if ((nelems == 0) || !tbl)
243 outs = s = segstart = &sglist[0];
247 /* Init first segment length for backout at failure */
248 outs->dma_length = 0;
250 DBG("mapping %d elements:\n", nelems);
252 spin_lock_irqsave(&(tbl->it_lock), flags);
254 for (s = outs; nelems; nelems--, s++) {
255 unsigned long vaddr, npages, entry, slen;
263 /* Allocate iommu entries for that segment */
264 vaddr = (unsigned long)page_address(s->page) + s->offset;
265 npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK);
266 npages >>= PAGE_SHIFT;
267 entry = iommu_range_alloc(tbl, npages, &handle);
269 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
272 if (unlikely(entry == DMA_ERROR_CODE)) {
273 if (printk_ratelimit())
274 printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
275 " npages %lx\n", tbl, vaddr, npages);
279 /* Convert entry to a dma_addr_t */
280 entry += tbl->it_offset;
281 dma_addr = entry << PAGE_SHIFT;
282 dma_addr |= s->offset;
284 DBG(" - %lx pages, entry: %lx, dma_addr: %lx\n",
285 npages, entry, dma_addr);
287 /* Insert into HW table */
288 ppc_md.tce_build(tbl, entry, npages, vaddr & PAGE_MASK, direction);
290 /* If we are in an open segment, try merging */
292 DBG(" - trying merge...\n");
293 /* We cannot merge if:
294 * - allocated dma_addr isn't contiguous to previous allocation
296 if (novmerge || (dma_addr != dma_next)) {
297 /* Can't merge: create a new segment */
300 DBG(" can't merge, new segment.\n");
302 outs->dma_length += s->length;
303 DBG(" merged, new len: %lx\n", outs->dma_length);
308 /* This is a new segment, fill entries */
309 DBG(" - filling new segment.\n");
310 outs->dma_address = dma_addr;
311 outs->dma_length = slen;
314 /* Calculate next page pointer for contiguous check */
315 dma_next = dma_addr + slen;
317 DBG(" - dma next is: %lx\n", dma_next);
320 /* Flush/invalidate TLB caches if necessary */
321 if (ppc_md.tce_flush)
322 ppc_md.tce_flush(tbl);
324 spin_unlock_irqrestore(&(tbl->it_lock), flags);
326 /* Make sure updates are seen by hardware */
329 DBG("mapped %d elements:\n", outcount);
331 /* For the sake of iommu_free_sg, we clear out the length in the
332 * next entry of the sglist if we didn't fill the list completely
334 if (outcount < nelems) {
336 outs->dma_address = DMA_ERROR_CODE;
337 outs->dma_length = 0;
342 for (s = &sglist[0]; s <= outs; s++) {
343 if (s->dma_length != 0) {
344 unsigned long vaddr, npages;
346 vaddr = s->dma_address & PAGE_MASK;
347 npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr)
349 __iommu_free(tbl, vaddr, npages);
352 spin_unlock_irqrestore(&(tbl->it_lock), flags);
357 void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
358 int nelems, enum dma_data_direction direction)
362 BUG_ON(direction == DMA_NONE);
367 spin_lock_irqsave(&(tbl->it_lock), flags);
371 dma_addr_t dma_handle = sglist->dma_address;
373 if (sglist->dma_length == 0)
375 npages = (PAGE_ALIGN(dma_handle + sglist->dma_length)
376 - (dma_handle & PAGE_MASK)) >> PAGE_SHIFT;
377 __iommu_free(tbl, dma_handle, npages);
381 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
382 * do not do an mb() here, the affected platforms do not need it
385 if (ppc_md.tce_flush)
386 ppc_md.tce_flush(tbl);
388 spin_unlock_irqrestore(&(tbl->it_lock), flags);
392 * Build a iommu_table structure. This contains a bit map which
393 * is used to manage allocation of the tce space.
395 struct iommu_table *iommu_init_table(struct iommu_table *tbl)
398 static int welcomed = 0;
400 /* it_size is in pages, it_mapsize in number of entries */
401 tbl->it_mapsize = (tbl->it_size << PAGE_SHIFT) / tbl->it_entrysize;
403 /* Set aside 1/4 of the table for large allocations. */
404 tbl->it_halfpoint = tbl->it_mapsize * 3 / 4;
406 /* number of bytes needed for the bitmap */
407 sz = (tbl->it_mapsize + 7) >> 3;
409 tbl->it_map = (unsigned long *)__get_free_pages(GFP_ATOMIC, get_order(sz));
411 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
413 memset(tbl->it_map, 0, sz);
416 tbl->it_largehint = tbl->it_halfpoint;
417 spin_lock_init(&tbl->it_lock);
420 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
421 novmerge ? "disabled" : "enabled");
428 /* Creates TCEs for a user provided buffer. The user buffer must be
429 * contiguous real kernel storage (not vmalloc). The address of the buffer
430 * passed here is the kernel (virtual) address of the buffer. The buffer
431 * need not be page aligned, the dma_addr_t returned will point to the same
432 * byte within the page as vaddr.
434 dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
435 size_t size, enum dma_data_direction direction)
437 dma_addr_t dma_handle = DMA_ERROR_CODE;
441 BUG_ON(direction == DMA_NONE);
443 uaddr = (unsigned long)vaddr;
444 npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK);
445 npages >>= PAGE_SHIFT;
448 dma_handle = iommu_alloc(tbl, vaddr, npages, direction);
449 if (dma_handle == DMA_ERROR_CODE) {
450 if (printk_ratelimit()) {
451 printk(KERN_INFO "iommu_alloc failed, "
452 "tbl %p vaddr %p npages %d\n",
456 dma_handle |= (uaddr & ~PAGE_MASK);
462 void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
463 size_t size, enum dma_data_direction direction)
465 BUG_ON(direction == DMA_NONE);
468 iommu_free(tbl, dma_handle, (PAGE_ALIGN(dma_handle + size) -
469 (dma_handle & PAGE_MASK)) >> PAGE_SHIFT);
472 /* Allocates a contiguous real buffer and creates mappings over it.
473 * Returns the virtual address of the buffer and sets dma_handle
474 * to the dma address (mapping) of the first page.
476 void *iommu_alloc_consistent(struct iommu_table *tbl, size_t size,
477 dma_addr_t *dma_handle)
481 unsigned int npages, order;
483 size = PAGE_ALIGN(size);
484 npages = size >> PAGE_SHIFT;
485 order = get_order(size);
488 * Client asked for way too much space. This is checked later
489 * anyway. It is easier to debug here for the drivers than in
492 if (order >= IOMAP_MAX_ORDER) {
493 printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
494 return (void *)DMA_ERROR_CODE;
500 /* Alloc enough pages (and possibly more) */
501 ret = (void *)__get_free_pages(GFP_ATOMIC, order);
504 memset(ret, 0, size);
506 /* Set up tces to cover the allocated range */
507 mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL);
508 if (mapping == DMA_ERROR_CODE) {
509 free_pages((unsigned long)ret, order);
512 *dma_handle = mapping;
516 void iommu_free_consistent(struct iommu_table *tbl, size_t size,
517 void *vaddr, dma_addr_t dma_handle)
522 size = PAGE_ALIGN(size);
523 npages = size >> PAGE_SHIFT;
524 iommu_free(tbl, dma_handle, npages);
525 free_pages((unsigned long)vaddr, get_order(size));