2 * Dynamic DMA mapping support for AMD Hammer.
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
8 * See Documentation/DMA-mapping.txt for the interface specification.
10 * Copyright 2002 Andi Kleen, SuSE Labs.
13 #include <linux/config.h>
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
19 #include <linux/string.h>
20 #include <linux/spinlock.h>
21 #include <linux/pci.h>
22 #include <linux/module.h>
23 #include <linux/topology.h>
24 #include <linux/interrupt.h>
25 #include <asm/atomic.h>
28 #include <asm/bitops.h>
29 #include <asm/pgtable.h>
30 #include <asm/proto.h>
31 #include <asm/cacheflush.h>
32 #include <asm/kdebug.h>
33 #include <asm/proto.h>
36 #define preempt_atomic() in_atomic()
38 #define preempt_atomic() 1
41 dma_addr_t bad_dma_address;
43 unsigned long iommu_bus_base; /* GART remapping area (physical) */
44 static unsigned long iommu_size; /* size of remapping area bytes */
45 static unsigned long iommu_pages; /* .. and in pages */
47 u32 *iommu_gatt_base; /* Remapping table */
51 #ifdef CONFIG_IOMMU_DEBUG
52 int panic_on_overflow = 1;
55 int panic_on_overflow = 0;
59 int iommu_sac_force = 0;
61 /* If this is disabled the IOMMU will use an optimized flushing strategy
62 of only flushing when an mapping is reused. With it true the GART is flushed
63 for every mapping. Problem is that doing the lazy flush seems to trigger
64 bugs with some popular PCI cards, in particular 3ware (but has been also
65 also seen with Qlogic at least). */
66 int iommu_fullflush = 1;
70 /* Allocation bitmap for the remapping area */
71 static spinlock_t iommu_bitmap_lock = SPIN_LOCK_UNLOCKED;
72 static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
75 #define GPTE_COHERENT 2
76 #define GPTE_ENCODE(x) \
77 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
78 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
80 #define to_pages(addr,size) \
81 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
83 #define for_all_nb(dev) \
85 while ((dev = pci_find_device(PCI_VENDOR_ID_AMD, 0x1103, dev))!=NULL)\
86 if (dev->bus->number == 0 && \
87 (PCI_SLOT(dev->devfn) >= 24) && (PCI_SLOT(dev->devfn) <= 31))
89 static struct pci_dev *northbridges[MAX_NB];
90 static u32 northbridge_flush_word[MAX_NB];
92 #define EMERGENCY_PAGES 32 /* = 128KB */
95 #define AGPEXTERN extern
100 /* backdoor interface to AGP driver */
101 AGPEXTERN int agp_memory_reserved;
102 AGPEXTERN __u32 *agp_gatt_table;
104 static unsigned long next_bit; /* protected by iommu_bitmap_lock */
105 static int need_flush; /* global flush state. set for each gart wrap */
106 static dma_addr_t pci_map_area(struct pci_dev *dev, unsigned long phys_mem,
107 size_t size, int dir);
109 static unsigned long alloc_iommu(int size)
111 unsigned long offset, flags;
113 spin_lock_irqsave(&iommu_bitmap_lock, flags);
114 offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
117 offset = find_next_zero_string(iommu_gart_bitmap,0,next_bit,size);
120 set_bit_string(iommu_gart_bitmap, offset, size);
121 next_bit = offset+size;
122 if (next_bit >= iommu_pages) {
129 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
133 static void free_iommu(unsigned long offset, int size)
137 clear_bit(offset, iommu_gart_bitmap);
140 spin_lock_irqsave(&iommu_bitmap_lock, flags);
141 __clear_bit_string(iommu_gart_bitmap, offset, size);
142 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
146 * Use global flush state to avoid races with multiple flushers.
148 static void flush_gart(struct pci_dev *dev)
151 int bus = dev ? dev->bus->number : -1;
152 cpumask_const_t bus_cpumask = pcibus_to_cpumask(bus);
156 spin_lock_irqsave(&iommu_bitmap_lock, flags);
158 for (i = 0; i < MAX_NB; i++) {
160 if (!northbridges[i])
162 if (bus >= 0 && !(cpu_isset_const(i, bus_cpumask)))
164 pci_write_config_dword(northbridges[i], 0x9c,
165 northbridge_flush_word[i] | 1);
166 /* Make sure the hardware actually executed the flush. */
168 pci_read_config_dword(northbridges[i], 0x9c, &w);
173 printk("nothing to flush? %d\n", bus);
176 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
180 * Allocate memory for a consistent mapping.
182 void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
183 dma_addr_t *dma_handle)
186 int gfp = preempt_atomic() ? GFP_ATOMIC : GFP_KERNEL;
187 unsigned long dma_mask = 0;
191 dma_mask = hwdev->dev.coherent_dma_mask;
193 dma_mask = 0xffffffff;
195 /* Kludge to make it bug-to-bug compatible with i386. i386
196 uses the normal dma_mask for alloc_consistent. */
198 dma_mask &= hwdev->dma_mask;
201 memory = (void *)__get_free_pages(gfp, get_order(size));
207 bus = virt_to_bus(memory);
208 high = (bus + size) >= dma_mask;
210 if (force_iommu && !(gfp & GFP_DMA))
212 if (no_iommu || dma_mask < 0xffffffffUL) {
214 if (!(gfp & GFP_DMA)) {
222 memset(memory, 0, size);
224 *dma_handle = virt_to_bus(memory);
229 *dma_handle = pci_map_area(hwdev, bus, size, PCI_DMA_BIDIRECTIONAL);
230 if (*dma_handle == bad_dma_address)
236 if (panic_on_overflow)
237 panic("pci_alloc_consistent: overflow %lu bytes\n", size);
239 free_pages((unsigned long)memory, get_order(size));
244 * Unmap consistent memory.
245 * The caller must ensure that the device has finished accessing the mapping.
247 void pci_free_consistent(struct pci_dev *hwdev, size_t size,
248 void *vaddr, dma_addr_t bus)
250 pci_unmap_single(hwdev, bus, size, 0);
251 free_pages((unsigned long)vaddr, get_order(size));
254 #ifdef CONFIG_IOMMU_LEAK
256 #define SET_LEAK(x) if (iommu_leak_tab) \
257 iommu_leak_tab[x] = __builtin_return_address(0);
258 #define CLEAR_LEAK(x) if (iommu_leak_tab) \
259 iommu_leak_tab[x] = 0;
261 /* Debugging aid for drivers that don't free their IOMMU tables */
262 static void **iommu_leak_tab;
263 static int leak_trace;
264 int iommu_leak_pages = 20;
269 if (dump || !iommu_leak_tab) return;
271 show_stack(NULL,NULL);
272 /* Very crude. dump some from the end of the table too */
273 printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages);
274 for (i = 0; i < iommu_leak_pages; i+=2) {
275 printk("%lu: ", iommu_pages-i);
276 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]);
277 printk("%c", (i+1)%2 == 0 ? '\n' : ' ');
283 #define CLEAR_LEAK(x)
286 static void iommu_full(struct pci_dev *dev, size_t size, int dir)
289 * Ran out of IOMMU space for this operation. This is very bad.
290 * Unfortunately the drivers cannot handle this operation properly.
291 * Return some non mapped prereserved space in the aperture and
292 * let the Northbridge deal with it. This will result in garbage
293 * in the IO operation. When the size exceeds the prereserved space
294 * memory corruption will occur or random memory will be DMAed
295 * out. Hopefully no network devices use single mappings that big.
299 "PCI-DMA: Out of IOMMU space for %lu bytes at device %s[%s]\n",
300 size, dev ? pci_pretty_name(dev) : "", dev ? dev->slot_name : "?");
302 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
303 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
304 panic("PCI-DMA: Memory will be corrupted\n");
305 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
306 panic("PCI-DMA: Random memory will be DMAed\n");
309 #ifdef CONFIG_IOMMU_LEAK
314 static inline int need_iommu(struct pci_dev *dev, unsigned long addr, size_t size)
316 u64 mask = dev ? dev->dma_mask : 0xffffffff;
317 int high = addr + size >= mask;
323 panic("PCI-DMA: high address but no IOMMU.\n");
329 static inline int nonforced_iommu(struct pci_dev *dev, unsigned long addr, size_t size)
331 u64 mask = dev ? dev->dma_mask : 0xffffffff;
332 int high = addr + size >= mask;
336 panic("PCI-DMA: high address but no IOMMU.\n");
342 /* Map a single continuous physical area into the IOMMU.
343 * Caller needs to check if the iommu is needed and flush.
345 static dma_addr_t pci_map_area(struct pci_dev *dev, unsigned long phys_mem,
346 size_t size, int dir)
348 unsigned long npages = to_pages(phys_mem, size);
349 unsigned long iommu_page = alloc_iommu(npages);
351 if (iommu_page == -1) {
352 if (!nonforced_iommu(dev, phys_mem, size))
354 if (panic_on_overflow)
355 panic("pci_map_area overflow %lu bytes\n", size);
356 iommu_full(dev, size, dir);
357 return bad_dma_address;
360 for (i = 0; i < npages; i++) {
361 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
362 SET_LEAK(iommu_page + i);
363 phys_mem += PAGE_SIZE;
365 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
368 /* Map a single area into the IOMMU */
369 dma_addr_t pci_map_single(struct pci_dev *dev, void *addr, size_t size, int dir)
371 unsigned long phys_mem, bus;
373 BUG_ON(dir == PCI_DMA_NONE);
375 #ifdef CONFIG_SWIOTLB
377 return swiotlb_map_single(&dev->dev,addr,size,dir);
380 phys_mem = virt_to_phys(addr);
381 if (!need_iommu(dev, phys_mem, size))
384 bus = pci_map_area(dev, phys_mem, size, dir);
389 /* Fallback for pci_map_sg in case of overflow */
390 static int pci_map_sg_nonforce(struct pci_dev *dev, struct scatterlist *sg,
395 #ifdef CONFIG_IOMMU_DEBUG
396 printk(KERN_DEBUG "pci_map_sg overflow\n");
399 for (i = 0; i < nents; i++ ) {
400 struct scatterlist *s = &sg[i];
401 unsigned long addr = page_to_phys(s->page) + s->offset;
402 if (nonforced_iommu(dev, addr, s->length)) {
403 addr = pci_map_area(dev, addr, s->length, dir);
404 if (addr == bad_dma_address) {
406 pci_unmap_sg(dev, sg, i, dir);
408 sg[0].dma_length = 0;
412 s->dma_address = addr;
413 s->dma_length = s->length;
419 /* Map multiple scatterlist entries continuous into the first. */
420 static int __pci_map_cont(struct scatterlist *sg, int start, int stopat,
421 struct scatterlist *sout, unsigned long pages)
423 unsigned long iommu_start = alloc_iommu(pages);
424 unsigned long iommu_page = iommu_start;
427 if (iommu_start == -1)
430 for (i = start; i < stopat; i++) {
431 struct scatterlist *s = &sg[i];
432 unsigned long pages, addr;
433 unsigned long phys_addr = s->dma_address;
435 BUG_ON(i > start && s->offset);
438 sout->dma_address = iommu_bus_base;
439 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
440 sout->dma_length = s->length;
442 sout->dma_length += s->length;
446 pages = to_pages(s->offset, s->length);
448 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
449 SET_LEAK(iommu_page);
454 BUG_ON(iommu_page - iommu_start != pages);
458 static inline int pci_map_cont(struct scatterlist *sg, int start, int stopat,
459 struct scatterlist *sout,
460 unsigned long pages, int need)
463 BUG_ON(stopat - start != 1);
465 sout->dma_length = sg[start].length;
468 return __pci_map_cont(sg, start, stopat, sout, pages);
472 * DMA map all entries in a scatterlist.
473 * Merge chunks that have page aligned sizes into a continuous mapping.
475 int pci_map_sg(struct pci_dev *dev, struct scatterlist *sg, int nents, int dir)
480 unsigned long pages = 0;
481 int need = 0, nextneed;
483 BUG_ON(dir == PCI_DMA_NONE);
487 #ifdef CONFIG_SWIOTLB
489 return swiotlb_map_sg(&dev->dev,sg,nents,dir);
494 for (i = 0; i < nents; i++) {
495 struct scatterlist *s = &sg[i];
496 dma_addr_t addr = page_to_phys(s->page) + s->offset;
497 s->dma_address = addr;
498 BUG_ON(s->length == 0);
500 nextneed = need_iommu(dev, addr, s->length);
502 /* Handle the previous not yet processed entries */
504 struct scatterlist *ps = &sg[i-1];
505 /* Can only merge when the last chunk ends on a page
506 boundary and the new one doesn't have an offset. */
507 if (!iommu_merge || !nextneed || !need || s->offset ||
508 (ps->offset + ps->length) % PAGE_SIZE) {
509 if (pci_map_cont(sg, start, i, sg+out, pages,
519 pages += to_pages(s->offset, s->length);
521 if (pci_map_cont(sg, start, i, sg+out, pages, need) < 0)
526 sg[out].dma_length = 0;
531 pci_unmap_sg(dev, sg, nents, dir);
532 /* When it was forced try again unforced */
534 return pci_map_sg_nonforce(dev, sg, nents, dir);
535 if (panic_on_overflow)
536 panic("pci_map_sg: overflow on %lu pages\n", pages);
537 iommu_full(dev, pages << PAGE_SHIFT, dir);
538 for (i = 0; i < nents; i++)
539 sg[i].dma_address = bad_dma_address;
544 * Free a PCI mapping.
546 void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
547 size_t size, int direction)
549 unsigned long iommu_page;
553 #ifdef CONFIG_SWIOTLB
555 swiotlb_unmap_single(&hwdev->dev,dma_addr,size,direction);
560 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
561 dma_addr >= iommu_bus_base + iommu_size)
563 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
564 npages = to_pages(dma_addr, size);
565 for (i = 0; i < npages; i++) {
566 iommu_gatt_base[iommu_page + i] = 0;
567 CLEAR_LEAK(iommu_page + i);
569 free_iommu(iommu_page, npages);
573 * Wrapper for pci_unmap_single working with scatterlists.
575 void pci_unmap_sg(struct pci_dev *dev, struct scatterlist *sg, int nents,
579 for (i = 0; i < nents; i++) {
580 struct scatterlist *s = &sg[i];
581 if (!s->dma_length || !s->length)
583 pci_unmap_single(dev, s->dma_address, s->dma_length, dir);
587 int pci_dma_supported(struct pci_dev *dev, u64 mask)
589 /* Copied from i386. Doesn't make much sense, because it will
590 only work for pci_alloc_consistent.
591 The caller just has to use GFP_DMA in this case. */
592 if (mask < 0x00ffffff)
595 /* Tell the device to use SAC when IOMMU force is on.
596 This allows the driver to use cheaper accesses in some cases.
598 Problem with this is that if we overflow the IOMMU area
599 and return DAC as fallback address the device may not handle it correctly.
601 As a special case some controllers have a 39bit address mode
602 that is as efficient as 32bit (aic79xx). Don't force SAC for these.
603 Assume all masks <= 40 bits are of this type. Normally this doesn't
604 make any difference, but gives more gentle handling of IOMMU overflow. */
605 if (iommu_sac_force && (mask >= 0xffffffffffULL)) {
606 printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->slot_name,mask);
613 EXPORT_SYMBOL(pci_unmap_sg);
614 EXPORT_SYMBOL(pci_map_sg);
615 EXPORT_SYMBOL(pci_map_single);
616 EXPORT_SYMBOL(pci_unmap_single);
617 EXPORT_SYMBOL(pci_dma_supported);
618 EXPORT_SYMBOL(no_iommu);
619 EXPORT_SYMBOL(force_iommu);
620 EXPORT_SYMBOL(bad_dma_address);
621 EXPORT_SYMBOL(iommu_merge);
623 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
627 iommu_size = aper_size;
632 a = aper + iommu_size;
633 iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
635 if (iommu_size < 64*1024*1024)
637 "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20);
642 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
644 unsigned aper_size = 0, aper_base_32;
648 pci_read_config_dword(dev, 0x94, &aper_base_32);
649 pci_read_config_dword(dev, 0x90, &aper_order);
650 aper_order = (aper_order >> 1) & 7;
652 aper_base = aper_base_32 & 0x7fff;
655 aper_size = (32 * 1024 * 1024) << aper_order;
656 if (aper_base + aper_size >= 0xffffffff || !aper_size)
664 * Private Northbridge GATT initialization in case we cannot use the
665 * AGP driver for some reason.
667 static __init int init_k8_gatt(struct agp_kern_info *info)
671 unsigned aper_base, new_aper_base;
672 unsigned aper_size, gatt_size, new_aper_size;
674 aper_size = aper_base = info->aper_size = 0;
676 new_aper_base = read_aperture(dev, &new_aper_size);
681 aper_size = new_aper_size;
682 aper_base = new_aper_base;
684 if (aper_size != new_aper_size || aper_base != new_aper_base)
689 info->aper_base = aper_base;
690 info->aper_size = aper_size>>20;
692 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
693 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
695 panic("Cannot allocate GATT table");
696 memset(gatt, 0, gatt_size);
697 agp_gatt_table = gatt;
703 gatt_reg = __pa(gatt) >> 12;
705 pci_write_config_dword(dev, 0x98, gatt_reg);
706 pci_read_config_dword(dev, 0x90, &ctl);
709 ctl &= ~((1<<4) | (1<<5));
711 pci_write_config_dword(dev, 0x90, ctl);
715 printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10);
719 /* Should not happen anymore */
720 printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
721 KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.");
725 extern int agp_amd64_init(void);
727 static int __init pci_iommu_init(void)
729 struct agp_kern_info info;
730 unsigned long aper_size;
731 unsigned long iommu_start;
735 #ifndef CONFIG_AGP_AMD64
738 /* Makefile puts PCI initialization via subsys_initcall first. */
739 /* Add other K8 AGP bridge drivers here */
741 (agp_amd64_init() < 0) ||
742 (agp_copy_info(&info) < 0);
747 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
751 if (no_iommu || (!force_iommu && end_pfn < 0xffffffff>>PAGE_SHIFT) ||
753 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
760 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
762 if (force_iommu || end_pfn >= 0xffffffff>>PAGE_SHIFT)
763 err = init_k8_gatt(&info);
765 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
771 aper_size = info.aper_size * 1024 * 1024;
772 iommu_size = check_iommu_size(info.aper_base, aper_size);
773 iommu_pages = iommu_size >> PAGE_SHIFT;
775 iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL,
776 get_order(iommu_pages/8));
777 if (!iommu_gart_bitmap)
778 panic("Cannot allocate iommu bitmap\n");
779 memset(iommu_gart_bitmap, 0, iommu_pages/8);
781 #ifdef CONFIG_IOMMU_LEAK
783 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
784 get_order(iommu_pages*sizeof(void *)));
786 memset(iommu_leak_tab, 0, iommu_pages * 8);
788 printk("PCI-DMA: Cannot allocate leak trace area\n");
793 * Out of IOMMU space handling.
794 * Reserve some invalid pages at the beginning of the GART.
796 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
798 agp_memory_reserved = iommu_size;
800 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
803 iommu_start = aper_size - iommu_size;
804 iommu_bus_base = info.aper_base + iommu_start;
805 bad_dma_address = iommu_bus_base;
806 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
809 * Unmap the IOMMU part of the GART. The alias of the page is
810 * always mapped with cache enabled and there is no full cache
811 * coherency across the GART remapping. The unmapping avoids
812 * automatic prefetches from the CPU allocating cache lines in
813 * there. All CPU accesses are done via the direct mapping to
814 * the backing memory. The GART address is only used by PCI
817 clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
821 int cpu = PCI_SLOT(dev->devfn) - 24;
824 northbridges[cpu] = dev;
825 pci_read_config_dword(dev, 0x9c, &flag); /* cache flush word */
826 northbridge_flush_word[cpu] = flag;
834 /* Must execute after PCI subsystem */
835 fs_initcall(pci_iommu_init);
837 /* iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]][,merge]
838 [,forcesac][,fullflush][,nomerge]
839 size set size of iommu (in bytes)
840 noagp don't initialize the AGP driver and use full aperture.
841 off don't use the IOMMU
842 leak turn on simple iommu leak tracing (only when CONFIG_IOMMU_LEAK is on)
843 memaper[=order] allocate an own aperture over RAM with size 32MB^order.
844 noforce don't force IOMMU usage. Default.
846 merge Do SG merging. Implies force (experimental)
847 nomerge Don't do SG merging.
848 forcesac For SAC mode for masks <40bits (experimental)
849 fullflush Flush IOMMU on each allocation (default)
850 nofullflush Don't use IOMMU fullflush
851 allowed overwrite iommu off workarounds for specific chipsets.
852 soft Use software bounce buffering (default for Intel machines)
854 __init int iommu_setup(char *opt)
860 if (!memcmp(p,"noagp", 5))
862 if (!memcmp(p,"off", 3))
864 if (!memcmp(p,"force", 5)) {
866 iommu_aperture_allowed = 1;
868 if (!memcmp(p,"allowed",7))
869 iommu_aperture_allowed = 1;
870 if (!memcmp(p,"noforce", 7)) {
874 if (!memcmp(p, "memaper", 7)) {
875 fallback_aper_force = 1;
877 if (*p == '=' && get_option(&p, &arg))
878 fallback_aper_order = arg;
880 if (!memcmp(p, "panic", 5))
881 panic_on_overflow = 1;
882 if (!memcmp(p, "nopanic", 7))
883 panic_on_overflow = 0;
884 if (!memcmp(p, "merge", 5)) {
888 if (!memcmp(p, "nomerge", 7))
890 if (!memcmp(p, "forcesac", 8))
892 if (!memcmp(p, "fullflush", 9))
894 if (!memcmp(p, "nofullflush", 11))
896 if (!memcmp(p, "soft", 4))
898 #ifdef CONFIG_IOMMU_LEAK
899 if (!memcmp(p,"leak", 4)) {
903 if (isdigit(*p) && get_option(&p, &arg))
904 iommu_leak_pages = arg;
907 if (isdigit(*p) && get_option(&p, &arg))
910 if (*p == ' ' || *p == 0)
912 } while (*p++ != ',');