2 * Dynamic DMA mapping support for AMD Hammer.
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
8 * See Documentation/DMA-mapping.txt for the interface specification.
10 * Copyright 2002 Andi Kleen, SuSE Labs.
13 #include <linux/config.h>
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
19 #include <linux/string.h>
20 #include <linux/spinlock.h>
21 #include <linux/pci.h>
22 #include <linux/module.h>
23 #include <linux/topology.h>
24 #include <linux/interrupt.h>
25 #include <asm/atomic.h>
28 #include <asm/bitops.h>
29 #include <asm/pgtable.h>
30 #include <asm/proto.h>
31 #include <asm/cacheflush.h>
32 #include <asm/kdebug.h>
35 #define preempt_atomic() in_atomic()
37 #define preempt_atomic() 1
40 dma_addr_t bad_dma_address;
42 unsigned long iommu_bus_base; /* GART remapping area (physical) */
43 static unsigned long iommu_size; /* size of remapping area bytes */
44 static unsigned long iommu_pages; /* .. and in pages */
46 u32 *iommu_gatt_base; /* Remapping table */
50 #ifdef CONFIG_IOMMU_DEBUG
51 int panic_on_overflow = 1;
54 int panic_on_overflow = 0;
58 int iommu_sac_force = 0;
60 /* If this is disabled the IOMMU will use an optimized flushing strategy
61 of only flushing when an mapping is reused. With it true the GART is flushed
62 for every mapping. Problem is that doing the lazy flush seems to trigger
63 bugs with some popular PCI cards, in particular 3ware (but has been also
64 also seen with Qlogic at least). */
65 int iommu_fullflush = 1;
69 /* Allocation bitmap for the remapping area */
70 static spinlock_t iommu_bitmap_lock = SPIN_LOCK_UNLOCKED;
71 static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
73 static u32 gart_unmapped_entry;
76 #define GPTE_COHERENT 2
77 #define GPTE_ENCODE(x) \
78 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
79 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
81 #define to_pages(addr,size) \
82 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
84 #define for_all_nb(dev) \
86 while ((dev = pci_find_device(PCI_VENDOR_ID_AMD, 0x1103, dev))!=NULL)\
87 if (dev->bus->number == 0 && \
88 (PCI_SLOT(dev->devfn) >= 24) && (PCI_SLOT(dev->devfn) <= 31))
90 static struct pci_dev *northbridges[MAX_NB];
91 static u32 northbridge_flush_word[MAX_NB];
93 #define EMERGENCY_PAGES 32 /* = 128KB */
96 #define AGPEXTERN extern
101 /* backdoor interface to AGP driver */
102 AGPEXTERN int agp_memory_reserved;
103 AGPEXTERN __u32 *agp_gatt_table;
105 static unsigned long next_bit; /* protected by iommu_bitmap_lock */
106 static int need_flush; /* global flush state. set for each gart wrap */
107 static dma_addr_t pci_map_area(struct pci_dev *dev, unsigned long phys_mem,
108 size_t size, int dir);
110 static unsigned long alloc_iommu(int size)
112 unsigned long offset, flags;
114 spin_lock_irqsave(&iommu_bitmap_lock, flags);
115 offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
118 offset = find_next_zero_string(iommu_gart_bitmap,0,next_bit,size);
121 set_bit_string(iommu_gart_bitmap, offset, size);
122 next_bit = offset+size;
123 if (next_bit >= iommu_pages) {
130 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
134 static void free_iommu(unsigned long offset, int size)
138 clear_bit(offset, iommu_gart_bitmap);
141 spin_lock_irqsave(&iommu_bitmap_lock, flags);
142 __clear_bit_string(iommu_gart_bitmap, offset, size);
143 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
147 * Use global flush state to avoid races with multiple flushers.
149 static void flush_gart(struct pci_dev *dev)
155 spin_lock_irqsave(&iommu_bitmap_lock, flags);
157 for (i = 0; i < MAX_NB; i++) {
159 if (!northbridges[i])
161 pci_write_config_dword(northbridges[i], 0x9c,
162 northbridge_flush_word[i] | 1);
163 /* Make sure the hardware actually executed the flush. */
165 pci_read_config_dword(northbridges[i], 0x9c, &w);
170 printk("nothing to flush?\n");
173 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
177 * Allocate memory for a consistent mapping.
179 void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
180 dma_addr_t *dma_handle)
183 int gfp = preempt_atomic() ? GFP_ATOMIC : GFP_KERNEL;
184 unsigned long dma_mask = 0;
188 dma_mask = hwdev->dev.coherent_dma_mask;
190 dma_mask = 0xffffffff;
192 /* Kludge to make it bug-to-bug compatible with i386. i386
193 uses the normal dma_mask for alloc_consistent. */
195 dma_mask &= hwdev->dma_mask;
198 memory = (void *)__get_free_pages(gfp, get_order(size));
204 bus = virt_to_bus(memory);
205 high = (bus + size) >= dma_mask;
207 if (force_iommu && !(gfp & GFP_DMA))
209 if (no_iommu || dma_mask < 0xffffffffUL) {
211 if (!(gfp & GFP_DMA)) {
219 memset(memory, 0, size);
221 *dma_handle = virt_to_bus(memory);
226 *dma_handle = pci_map_area(hwdev, bus, size, PCI_DMA_BIDIRECTIONAL);
227 if (*dma_handle == bad_dma_address)
233 if (panic_on_overflow)
234 panic("pci_alloc_consistent: overflow %lu bytes\n", size);
236 free_pages((unsigned long)memory, get_order(size));
241 * Unmap consistent memory.
242 * The caller must ensure that the device has finished accessing the mapping.
244 void pci_free_consistent(struct pci_dev *hwdev, size_t size,
245 void *vaddr, dma_addr_t bus)
247 pci_unmap_single(hwdev, bus, size, 0);
248 free_pages((unsigned long)vaddr, get_order(size));
251 #ifdef CONFIG_IOMMU_LEAK
253 #define SET_LEAK(x) if (iommu_leak_tab) \
254 iommu_leak_tab[x] = __builtin_return_address(0);
255 #define CLEAR_LEAK(x) if (iommu_leak_tab) \
256 iommu_leak_tab[x] = NULL;
258 /* Debugging aid for drivers that don't free their IOMMU tables */
259 static void **iommu_leak_tab;
260 static int leak_trace;
261 int iommu_leak_pages = 20;
266 if (dump || !iommu_leak_tab) return;
268 show_stack(NULL,NULL);
269 /* Very crude. dump some from the end of the table too */
270 printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages);
271 for (i = 0; i < iommu_leak_pages; i+=2) {
272 printk("%lu: ", iommu_pages-i);
273 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]);
274 printk("%c", (i+1)%2 == 0 ? '\n' : ' ');
280 #define CLEAR_LEAK(x)
283 static void iommu_full(struct pci_dev *dev, size_t size, int dir)
286 * Ran out of IOMMU space for this operation. This is very bad.
287 * Unfortunately the drivers cannot handle this operation properly.
288 * Return some non mapped prereserved space in the aperture and
289 * let the Northbridge deal with it. This will result in garbage
290 * in the IO operation. When the size exceeds the prereserved space
291 * memory corruption will occur or random memory will be DMAed
292 * out. Hopefully no network devices use single mappings that big.
296 "PCI-DMA: Out of IOMMU space for %lu bytes at device %s[%s]\n",
297 size, dev ? pci_pretty_name(dev) : "", dev ? dev->slot_name : "?");
299 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
300 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
301 panic("PCI-DMA: Memory will be corrupted\n");
302 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
303 panic("PCI-DMA: Random memory will be DMAed\n");
306 #ifdef CONFIG_IOMMU_LEAK
311 static inline int need_iommu(struct pci_dev *dev, unsigned long addr, size_t size)
313 u64 mask = dev ? dev->dma_mask : 0xffffffff;
314 int high = addr + size >= mask;
320 panic("PCI-DMA: high address but no IOMMU.\n");
326 static inline int nonforced_iommu(struct pci_dev *dev, unsigned long addr, size_t size)
328 u64 mask = dev ? dev->dma_mask : 0xffffffff;
329 int high = addr + size >= mask;
333 panic("PCI-DMA: high address but no IOMMU.\n");
339 /* Map a single continuous physical area into the IOMMU.
340 * Caller needs to check if the iommu is needed and flush.
342 static dma_addr_t pci_map_area(struct pci_dev *dev, unsigned long phys_mem,
343 size_t size, int dir)
345 unsigned long npages = to_pages(phys_mem, size);
346 unsigned long iommu_page = alloc_iommu(npages);
348 if (iommu_page == -1) {
349 if (!nonforced_iommu(dev, phys_mem, size))
351 if (panic_on_overflow)
352 panic("pci_map_area overflow %lu bytes\n", size);
353 iommu_full(dev, size, dir);
354 return bad_dma_address;
357 for (i = 0; i < npages; i++) {
358 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
359 SET_LEAK(iommu_page + i);
360 phys_mem += PAGE_SIZE;
362 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
365 /* Map a single area into the IOMMU */
366 dma_addr_t pci_map_single(struct pci_dev *dev, void *addr, size_t size, int dir)
368 unsigned long phys_mem, bus;
370 BUG_ON(dir == PCI_DMA_NONE);
372 #ifdef CONFIG_SWIOTLB
374 return swiotlb_map_single(&dev->dev,addr,size,dir);
377 phys_mem = virt_to_phys(addr);
378 if (!need_iommu(dev, phys_mem, size))
381 bus = pci_map_area(dev, phys_mem, size, dir);
386 /* Fallback for pci_map_sg in case of overflow */
387 static int pci_map_sg_nonforce(struct pci_dev *dev, struct scatterlist *sg,
392 #ifdef CONFIG_IOMMU_DEBUG
393 printk(KERN_DEBUG "pci_map_sg overflow\n");
396 for (i = 0; i < nents; i++ ) {
397 struct scatterlist *s = &sg[i];
398 unsigned long addr = page_to_phys(s->page) + s->offset;
399 if (nonforced_iommu(dev, addr, s->length)) {
400 addr = pci_map_area(dev, addr, s->length, dir);
401 if (addr == bad_dma_address) {
403 pci_unmap_sg(dev, sg, i, dir);
405 sg[0].dma_length = 0;
409 s->dma_address = addr;
410 s->dma_length = s->length;
416 /* Map multiple scatterlist entries continuous into the first. */
417 static int __pci_map_cont(struct scatterlist *sg, int start, int stopat,
418 struct scatterlist *sout, unsigned long pages)
420 unsigned long iommu_start = alloc_iommu(pages);
421 unsigned long iommu_page = iommu_start;
424 if (iommu_start == -1)
427 for (i = start; i < stopat; i++) {
428 struct scatterlist *s = &sg[i];
429 unsigned long pages, addr;
430 unsigned long phys_addr = s->dma_address;
432 BUG_ON(i > start && s->offset);
435 sout->dma_address = iommu_bus_base;
436 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
437 sout->dma_length = s->length;
439 sout->dma_length += s->length;
443 pages = to_pages(s->offset, s->length);
445 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
446 SET_LEAK(iommu_page);
451 BUG_ON(iommu_page - iommu_start != pages);
455 static inline int pci_map_cont(struct scatterlist *sg, int start, int stopat,
456 struct scatterlist *sout,
457 unsigned long pages, int need)
460 BUG_ON(stopat - start != 1);
462 sout->dma_length = sg[start].length;
465 return __pci_map_cont(sg, start, stopat, sout, pages);
469 * DMA map all entries in a scatterlist.
470 * Merge chunks that have page aligned sizes into a continuous mapping.
472 int pci_map_sg(struct pci_dev *dev, struct scatterlist *sg, int nents, int dir)
477 unsigned long pages = 0;
478 int need = 0, nextneed;
480 #ifdef CONFIG_SWIOTLB
482 return swiotlb_map_sg(&dev->dev,sg,nents,dir);
485 BUG_ON(dir == PCI_DMA_NONE);
489 #ifdef CONFIG_SWIOTLB
491 return swiotlb_map_sg(&dev->dev,sg,nents,dir);
496 for (i = 0; i < nents; i++) {
497 struct scatterlist *s = &sg[i];
498 dma_addr_t addr = page_to_phys(s->page) + s->offset;
499 s->dma_address = addr;
500 BUG_ON(s->length == 0);
502 nextneed = need_iommu(dev, addr, s->length);
504 /* Handle the previous not yet processed entries */
506 struct scatterlist *ps = &sg[i-1];
507 /* Can only merge when the last chunk ends on a page
508 boundary and the new one doesn't have an offset. */
509 if (!iommu_merge || !nextneed || !need || s->offset ||
510 (ps->offset + ps->length) % PAGE_SIZE) {
511 if (pci_map_cont(sg, start, i, sg+out, pages,
521 pages += to_pages(s->offset, s->length);
523 if (pci_map_cont(sg, start, i, sg+out, pages, need) < 0)
528 sg[out].dma_length = 0;
533 pci_unmap_sg(dev, sg, nents, dir);
534 /* When it was forced try again unforced */
536 return pci_map_sg_nonforce(dev, sg, nents, dir);
537 if (panic_on_overflow)
538 panic("pci_map_sg: overflow on %lu pages\n", pages);
539 iommu_full(dev, pages << PAGE_SHIFT, dir);
540 for (i = 0; i < nents; i++)
541 sg[i].dma_address = bad_dma_address;
546 * Free a PCI mapping.
548 void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
549 size_t size, int direction)
551 unsigned long iommu_page;
555 #ifdef CONFIG_SWIOTLB
557 swiotlb_unmap_single(&hwdev->dev,dma_addr,size,direction);
562 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
563 dma_addr >= iommu_bus_base + iommu_size)
565 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
566 npages = to_pages(dma_addr, size);
567 for (i = 0; i < npages; i++) {
568 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
569 CLEAR_LEAK(iommu_page + i);
571 free_iommu(iommu_page, npages);
575 * Wrapper for pci_unmap_single working with scatterlists.
577 void pci_unmap_sg(struct pci_dev *dev, struct scatterlist *sg, int nents,
581 for (i = 0; i < nents; i++) {
582 struct scatterlist *s = &sg[i];
583 if (!s->dma_length || !s->length)
585 pci_unmap_single(dev, s->dma_address, s->dma_length, dir);
589 int pci_dma_supported(struct pci_dev *dev, u64 mask)
591 /* Copied from i386. Doesn't make much sense, because it will
592 only work for pci_alloc_consistent.
593 The caller just has to use GFP_DMA in this case. */
594 if (mask < 0x00ffffff)
597 /* Tell the device to use SAC when IOMMU force is on.
598 This allows the driver to use cheaper accesses in some cases.
600 Problem with this is that if we overflow the IOMMU area
601 and return DAC as fallback address the device may not handle it correctly.
603 As a special case some controllers have a 39bit address mode
604 that is as efficient as 32bit (aic79xx). Don't force SAC for these.
605 Assume all masks <= 40 bits are of this type. Normally this doesn't
606 make any difference, but gives more gentle handling of IOMMU overflow. */
607 if (iommu_sac_force && (mask >= 0xffffffffffULL)) {
608 printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->slot_name,mask);
615 EXPORT_SYMBOL(pci_unmap_sg);
616 EXPORT_SYMBOL(pci_map_sg);
617 EXPORT_SYMBOL(pci_map_single);
618 EXPORT_SYMBOL(pci_unmap_single);
619 EXPORT_SYMBOL(pci_dma_supported);
620 EXPORT_SYMBOL(no_iommu);
621 EXPORT_SYMBOL(force_iommu);
622 EXPORT_SYMBOL(bad_dma_address);
623 EXPORT_SYMBOL(iommu_merge);
625 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
629 iommu_size = aper_size;
634 a = aper + iommu_size;
635 iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
637 if (iommu_size < 64*1024*1024)
639 "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20);
644 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
646 unsigned aper_size = 0, aper_base_32;
650 pci_read_config_dword(dev, 0x94, &aper_base_32);
651 pci_read_config_dword(dev, 0x90, &aper_order);
652 aper_order = (aper_order >> 1) & 7;
654 aper_base = aper_base_32 & 0x7fff;
657 aper_size = (32 * 1024 * 1024) << aper_order;
658 if (aper_base + aper_size >= 0xffffffff || !aper_size)
666 * Private Northbridge GATT initialization in case we cannot use the
667 * AGP driver for some reason.
669 static __init int init_k8_gatt(struct agp_kern_info *info)
673 unsigned aper_base, new_aper_base;
674 unsigned aper_size, gatt_size, new_aper_size;
676 aper_size = aper_base = info->aper_size = 0;
678 new_aper_base = read_aperture(dev, &new_aper_size);
683 aper_size = new_aper_size;
684 aper_base = new_aper_base;
686 if (aper_size != new_aper_size || aper_base != new_aper_base)
691 info->aper_base = aper_base;
692 info->aper_size = aper_size>>20;
694 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
695 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
697 panic("Cannot allocate GATT table");
698 memset(gatt, 0, gatt_size);
699 agp_gatt_table = gatt;
705 gatt_reg = __pa(gatt) >> 12;
707 pci_write_config_dword(dev, 0x98, gatt_reg);
708 pci_read_config_dword(dev, 0x90, &ctl);
711 ctl &= ~((1<<4) | (1<<5));
713 pci_write_config_dword(dev, 0x90, ctl);
717 printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10);
721 /* Should not happen anymore */
722 printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
723 KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.");
727 extern int agp_amd64_init(void);
729 static int __init pci_iommu_init(void)
731 struct agp_kern_info info;
732 unsigned long aper_size;
733 unsigned long iommu_start;
735 unsigned long scratch;
738 #ifndef CONFIG_AGP_AMD64
741 /* Makefile puts PCI initialization via subsys_initcall first. */
742 /* Add other K8 AGP bridge drivers here */
744 (agp_amd64_init() < 0) ||
745 (agp_copy_info(&info) < 0);
750 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
754 if (no_iommu || (!force_iommu && end_pfn < 0xffffffff>>PAGE_SHIFT) ||
756 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
763 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
765 if (force_iommu || end_pfn >= 0xffffffff>>PAGE_SHIFT)
766 err = init_k8_gatt(&info);
768 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
774 aper_size = info.aper_size * 1024 * 1024;
775 iommu_size = check_iommu_size(info.aper_base, aper_size);
776 iommu_pages = iommu_size >> PAGE_SHIFT;
778 iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL,
779 get_order(iommu_pages/8));
780 if (!iommu_gart_bitmap)
781 panic("Cannot allocate iommu bitmap\n");
782 memset(iommu_gart_bitmap, 0, iommu_pages/8);
784 #ifdef CONFIG_IOMMU_LEAK
786 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
787 get_order(iommu_pages*sizeof(void *)));
789 memset(iommu_leak_tab, 0, iommu_pages * 8);
791 printk("PCI-DMA: Cannot allocate leak trace area\n");
796 * Out of IOMMU space handling.
797 * Reserve some invalid pages at the beginning of the GART.
799 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
801 agp_memory_reserved = iommu_size;
803 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
806 iommu_start = aper_size - iommu_size;
807 iommu_bus_base = info.aper_base + iommu_start;
808 bad_dma_address = iommu_bus_base;
809 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
812 * Unmap the IOMMU part of the GART. The alias of the page is
813 * always mapped with cache enabled and there is no full cache
814 * coherency across the GART remapping. The unmapping avoids
815 * automatic prefetches from the CPU allocating cache lines in
816 * there. All CPU accesses are done via the direct mapping to
817 * the backing memory. The GART address is only used by PCI
820 clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
823 * Try to workaround a bug (thanks to BenH)
824 * Set unmapped entries to a scratch page instead of 0.
825 * Any prefetches that hit unmapped entries won't get an bus abort
828 scratch = get_zeroed_page(GFP_KERNEL);
830 panic("Cannot allocate iommu scratch page");
831 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
832 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
833 iommu_gatt_base[i] = gart_unmapped_entry;
837 int cpu = PCI_SLOT(dev->devfn) - 24;
840 northbridges[cpu] = dev;
841 pci_read_config_dword(dev, 0x9c, &flag); /* cache flush word */
842 northbridge_flush_word[cpu] = flag;
850 /* Must execute after PCI subsystem */
851 fs_initcall(pci_iommu_init);
853 /* iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]][,merge]
854 [,forcesac][,fullflush][,nomerge]
855 size set size of iommu (in bytes)
856 noagp don't initialize the AGP driver and use full aperture.
857 off don't use the IOMMU
858 leak turn on simple iommu leak tracing (only when CONFIG_IOMMU_LEAK is on)
859 memaper[=order] allocate an own aperture over RAM with size 32MB^order.
860 noforce don't force IOMMU usage. Default.
862 merge Do SG merging. Implies force (experimental)
863 nomerge Don't do SG merging.
864 forcesac For SAC mode for masks <40bits (experimental)
865 fullflush Flush IOMMU on each allocation (default)
866 nofullflush Don't use IOMMU fullflush
867 allowed overwrite iommu off workarounds for specific chipsets.
868 soft Use software bounce buffering (default for Intel machines)
870 __init int iommu_setup(char *opt)
876 if (!memcmp(p,"noagp", 5))
878 if (!memcmp(p,"off", 3))
880 if (!memcmp(p,"force", 5)) {
882 iommu_aperture_allowed = 1;
884 if (!memcmp(p,"allowed",7))
885 iommu_aperture_allowed = 1;
886 if (!memcmp(p,"noforce", 7)) {
890 if (!memcmp(p, "memaper", 7)) {
891 fallback_aper_force = 1;
893 if (*p == '=' && get_option(&p, &arg))
894 fallback_aper_order = arg;
896 if (!memcmp(p, "panic", 5))
897 panic_on_overflow = 1;
898 if (!memcmp(p, "nopanic", 7))
899 panic_on_overflow = 0;
900 if (!memcmp(p, "merge", 5)) {
904 if (!memcmp(p, "nomerge", 7))
906 if (!memcmp(p, "forcesac", 8))
908 if (!memcmp(p, "fullflush", 9))
910 if (!memcmp(p, "nofullflush", 11))
912 if (!memcmp(p, "soft", 4))
914 #ifdef CONFIG_IOMMU_LEAK
915 if (!memcmp(p,"leak", 4)) {
919 if (isdigit(*p) && get_option(&p, &arg))
920 iommu_leak_pages = arg;
923 if (isdigit(*p) && get_option(&p, &arg))
926 if (*p == ' ' || *p == 0)
928 } while (*p++ != ',');