2 * Dynamic DMA mapping support for AMD Hammer.
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
8 * See Documentation/DMA-mapping.txt for the interface specification.
10 * Copyright 2002 Andi Kleen, SuSE Labs.
13 #include <linux/config.h>
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
19 #include <linux/string.h>
20 #include <linux/spinlock.h>
21 #include <linux/pci.h>
22 #include <linux/module.h>
23 #include <linux/topology.h>
24 #include <linux/interrupt.h>
25 #include <asm/atomic.h>
28 #include <asm/bitops.h>
29 #include <asm/pgtable.h>
30 #include <asm/proto.h>
31 #include <asm/cacheflush.h>
32 #include <asm/kdebug.h>
35 #define preempt_atomic() in_atomic()
37 #define preempt_atomic() 1
40 dma_addr_t bad_dma_address;
42 unsigned long iommu_bus_base; /* GART remapping area (physical) */
43 static unsigned long iommu_size; /* size of remapping area bytes */
44 static unsigned long iommu_pages; /* .. and in pages */
46 u32 *iommu_gatt_base; /* Remapping table */
50 #ifdef CONFIG_IOMMU_DEBUG
51 int panic_on_overflow = 1;
54 int panic_on_overflow = 0;
58 int iommu_sac_force = 0;
60 /* If this is disabled the IOMMU will use an optimized flushing strategy
61 of only flushing when an mapping is reused. With it true the GART is flushed
62 for every mapping. Problem is that doing the lazy flush seems to trigger
63 bugs with some popular PCI cards, in particular 3ware (but has been also
64 also seen with Qlogic at least). */
65 int iommu_fullflush = 1;
69 /* Allocation bitmap for the remapping area */
70 static spinlock_t iommu_bitmap_lock = SPIN_LOCK_UNLOCKED;
71 static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
74 #define GPTE_COHERENT 2
75 #define GPTE_ENCODE(x) \
76 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
77 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
79 #define to_pages(addr,size) \
80 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
82 #define for_all_nb(dev) \
84 while ((dev = pci_find_device(PCI_VENDOR_ID_AMD, 0x1103, dev))!=NULL)\
85 if (dev->bus->number == 0 && \
86 (PCI_SLOT(dev->devfn) >= 24) && (PCI_SLOT(dev->devfn) <= 31))
88 static struct pci_dev *northbridges[MAX_NB];
89 static u32 northbridge_flush_word[MAX_NB];
91 #define EMERGENCY_PAGES 32 /* = 128KB */
94 #define AGPEXTERN extern
99 /* backdoor interface to AGP driver */
100 AGPEXTERN int agp_memory_reserved;
101 AGPEXTERN __u32 *agp_gatt_table;
103 static unsigned long next_bit; /* protected by iommu_bitmap_lock */
104 static int need_flush; /* global flush state. set for each gart wrap */
105 static dma_addr_t pci_map_area(struct pci_dev *dev, unsigned long phys_mem,
106 size_t size, int dir);
108 static unsigned long alloc_iommu(int size)
110 unsigned long offset, flags;
112 spin_lock_irqsave(&iommu_bitmap_lock, flags);
113 offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
116 offset = find_next_zero_string(iommu_gart_bitmap,0,next_bit,size);
119 set_bit_string(iommu_gart_bitmap, offset, size);
120 next_bit = offset+size;
121 if (next_bit >= iommu_pages) {
128 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
132 static void free_iommu(unsigned long offset, int size)
136 clear_bit(offset, iommu_gart_bitmap);
139 spin_lock_irqsave(&iommu_bitmap_lock, flags);
140 __clear_bit_string(iommu_gart_bitmap, offset, size);
141 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
145 * Use global flush state to avoid races with multiple flushers.
147 static void flush_gart(struct pci_dev *dev)
150 int bus = dev ? dev->bus->number : -1;
151 cpumask_const_t bus_cpumask = pcibus_to_cpumask(bus);
155 spin_lock_irqsave(&iommu_bitmap_lock, flags);
157 for (i = 0; i < MAX_NB; i++) {
159 if (!northbridges[i])
161 if (bus >= 0 && !(cpu_isset_const(i, bus_cpumask)))
163 pci_write_config_dword(northbridges[i], 0x9c,
164 northbridge_flush_word[i] | 1);
165 /* Make sure the hardware actually executed the flush. */
167 pci_read_config_dword(northbridges[i], 0x9c, &w);
172 printk("nothing to flush? %d\n", bus);
175 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
179 * Allocate memory for a consistent mapping.
181 void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
182 dma_addr_t *dma_handle)
185 int gfp = preempt_atomic() ? GFP_ATOMIC : GFP_KERNEL;
186 unsigned long dma_mask = 0;
190 dma_mask = hwdev->dev.coherent_dma_mask;
192 dma_mask = 0xffffffff;
194 /* Kludge to make it bug-to-bug compatible with i386. i386
195 uses the normal dma_mask for alloc_consistent. */
197 dma_mask &= hwdev->dma_mask;
200 memory = (void *)__get_free_pages(gfp, get_order(size));
206 bus = virt_to_bus(memory);
207 high = (bus + size) >= dma_mask;
209 if (force_iommu && !(gfp & GFP_DMA))
211 if (no_iommu || dma_mask < 0xffffffffUL) {
213 if (!(gfp & GFP_DMA)) {
221 memset(memory, 0, size);
223 *dma_handle = virt_to_bus(memory);
228 *dma_handle = pci_map_area(hwdev, bus, size, PCI_DMA_BIDIRECTIONAL);
229 if (*dma_handle == bad_dma_address)
235 if (panic_on_overflow)
236 panic("pci_alloc_consistent: overflow %lu bytes\n", size);
238 free_pages((unsigned long)memory, get_order(size));
243 * Unmap consistent memory.
244 * The caller must ensure that the device has finished accessing the mapping.
246 void pci_free_consistent(struct pci_dev *hwdev, size_t size,
247 void *vaddr, dma_addr_t bus)
249 pci_unmap_single(hwdev, bus, size, 0);
250 free_pages((unsigned long)vaddr, get_order(size));
253 #ifdef CONFIG_IOMMU_LEAK
255 #define SET_LEAK(x) if (iommu_leak_tab) \
256 iommu_leak_tab[x] = __builtin_return_address(0);
257 #define CLEAR_LEAK(x) if (iommu_leak_tab) \
258 iommu_leak_tab[x] = 0;
260 /* Debugging aid for drivers that don't free their IOMMU tables */
261 static void **iommu_leak_tab;
262 static int leak_trace;
263 int iommu_leak_pages = 20;
268 if (dump || !iommu_leak_tab) return;
270 show_stack(NULL,NULL);
271 /* Very crude. dump some from the end of the table too */
272 printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages);
273 for (i = 0; i < iommu_leak_pages; i+=2) {
274 printk("%lu: ", iommu_pages-i);
275 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]);
276 printk("%c", (i+1)%2 == 0 ? '\n' : ' ');
282 #define CLEAR_LEAK(x)
285 static void iommu_full(struct pci_dev *dev, size_t size, int dir)
288 * Ran out of IOMMU space for this operation. This is very bad.
289 * Unfortunately the drivers cannot handle this operation properly.
290 * Return some non mapped prereserved space in the aperture and
291 * let the Northbridge deal with it. This will result in garbage
292 * in the IO operation. When the size exceeds the prereserved space
293 * memory corruption will occur or random memory will be DMAed
294 * out. Hopefully no network devices use single mappings that big.
298 "PCI-DMA: Out of IOMMU space for %lu bytes at device %s[%s]\n",
299 size, dev ? pci_pretty_name(dev) : "", dev ? dev->slot_name : "?");
301 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
302 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
303 panic("PCI-DMA: Memory will be corrupted\n");
304 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
305 panic("PCI-DMA: Random memory will be DMAed\n");
308 #ifdef CONFIG_IOMMU_LEAK
313 static inline int need_iommu(struct pci_dev *dev, unsigned long addr, size_t size)
315 u64 mask = dev ? dev->dma_mask : 0xffffffff;
316 int high = addr + size >= mask;
322 panic("PCI-DMA: high address but no IOMMU.\n");
328 static inline int nonforced_iommu(struct pci_dev *dev, unsigned long addr, size_t size)
330 u64 mask = dev ? dev->dma_mask : 0xffffffff;
331 int high = addr + size >= mask;
335 panic("PCI-DMA: high address but no IOMMU.\n");
341 /* Map a single continuous physical area into the IOMMU.
342 * Caller needs to check if the iommu is needed and flush.
344 static dma_addr_t pci_map_area(struct pci_dev *dev, unsigned long phys_mem,
345 size_t size, int dir)
347 unsigned long npages = to_pages(phys_mem, size);
348 unsigned long iommu_page = alloc_iommu(npages);
350 if (iommu_page == -1) {
351 if (!nonforced_iommu(dev, phys_mem, size))
353 if (panic_on_overflow)
354 panic("pci_map_area overflow %lu bytes\n", size);
355 iommu_full(dev, size, dir);
356 return bad_dma_address;
359 for (i = 0; i < npages; i++) {
360 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
361 SET_LEAK(iommu_page + i);
362 phys_mem += PAGE_SIZE;
364 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
367 /* Map a single area into the IOMMU */
368 dma_addr_t pci_map_single(struct pci_dev *dev, void *addr, size_t size, int dir)
370 unsigned long phys_mem, bus;
372 BUG_ON(dir == PCI_DMA_NONE);
374 #ifdef CONFIG_SWIOTLB
376 return swiotlb_map_single(&dev->dev,addr,size,dir);
379 phys_mem = virt_to_phys(addr);
380 if (!need_iommu(dev, phys_mem, size))
383 bus = pci_map_area(dev, phys_mem, size, dir);
388 /* Fallback for pci_map_sg in case of overflow */
389 static int pci_map_sg_nonforce(struct pci_dev *dev, struct scatterlist *sg,
394 #ifdef CONFIG_IOMMU_DEBUG
395 printk(KERN_DEBUG "pci_map_sg overflow\n");
398 for (i = 0; i < nents; i++ ) {
399 struct scatterlist *s = &sg[i];
400 unsigned long addr = page_to_phys(s->page) + s->offset;
401 if (nonforced_iommu(dev, addr, s->length)) {
402 addr = pci_map_area(dev, addr, s->length, dir);
403 if (addr == bad_dma_address) {
405 pci_unmap_sg(dev, sg, i, dir);
407 sg[0].dma_length = 0;
411 s->dma_address = addr;
412 s->dma_length = s->length;
418 /* Map multiple scatterlist entries continuous into the first. */
419 static int __pci_map_cont(struct scatterlist *sg, int start, int stopat,
420 struct scatterlist *sout, unsigned long pages)
422 unsigned long iommu_start = alloc_iommu(pages);
423 unsigned long iommu_page = iommu_start;
426 if (iommu_start == -1)
429 for (i = start; i < stopat; i++) {
430 struct scatterlist *s = &sg[i];
431 unsigned long pages, addr;
432 unsigned long phys_addr = s->dma_address;
434 BUG_ON(i > start && s->offset);
437 sout->dma_address = iommu_bus_base;
438 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
439 sout->dma_length = s->length;
441 sout->dma_length += s->length;
445 pages = to_pages(s->offset, s->length);
447 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
448 SET_LEAK(iommu_page);
453 BUG_ON(iommu_page - iommu_start != pages);
457 static inline int pci_map_cont(struct scatterlist *sg, int start, int stopat,
458 struct scatterlist *sout,
459 unsigned long pages, int need)
462 BUG_ON(stopat - start != 1);
464 sout->dma_length = sg[start].length;
467 return __pci_map_cont(sg, start, stopat, sout, pages);
471 * DMA map all entries in a scatterlist.
472 * Merge chunks that have page aligned sizes into a continuous mapping.
474 int pci_map_sg(struct pci_dev *dev, struct scatterlist *sg, int nents, int dir)
479 unsigned long pages = 0;
480 int need = 0, nextneed;
482 BUG_ON(dir == PCI_DMA_NONE);
486 #ifdef CONFIG_SWIOTLB
488 return swiotlb_map_sg(&dev->dev,sg,nents,dir);
493 for (i = 0; i < nents; i++) {
494 struct scatterlist *s = &sg[i];
495 dma_addr_t addr = page_to_phys(s->page) + s->offset;
496 s->dma_address = addr;
497 BUG_ON(s->length == 0);
499 nextneed = need_iommu(dev, addr, s->length);
501 /* Handle the previous not yet processed entries */
503 struct scatterlist *ps = &sg[i-1];
504 /* Can only merge when the last chunk ends on a page
505 boundary and the new one doesn't have an offset. */
506 if (!iommu_merge || !nextneed || !need || s->offset ||
507 (ps->offset + ps->length) % PAGE_SIZE) {
508 if (pci_map_cont(sg, start, i, sg+out, pages,
518 pages += to_pages(s->offset, s->length);
520 if (pci_map_cont(sg, start, i, sg+out, pages, need) < 0)
525 sg[out].dma_length = 0;
530 pci_unmap_sg(dev, sg, nents, dir);
531 /* When it was forced try again unforced */
533 return pci_map_sg_nonforce(dev, sg, nents, dir);
534 if (panic_on_overflow)
535 panic("pci_map_sg: overflow on %lu pages\n", pages);
536 iommu_full(dev, pages << PAGE_SHIFT, dir);
537 for (i = 0; i < nents; i++)
538 sg[i].dma_address = bad_dma_address;
543 * Free a PCI mapping.
545 void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
546 size_t size, int direction)
548 unsigned long iommu_page;
552 #ifdef CONFIG_SWIOTLB
554 swiotlb_unmap_single(&hwdev->dev,dma_addr,size,direction);
559 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
560 dma_addr >= iommu_bus_base + iommu_size)
562 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
563 npages = to_pages(dma_addr, size);
564 for (i = 0; i < npages; i++) {
565 iommu_gatt_base[iommu_page + i] = 0;
566 CLEAR_LEAK(iommu_page + i);
568 free_iommu(iommu_page, npages);
572 * Wrapper for pci_unmap_single working with scatterlists.
574 void pci_unmap_sg(struct pci_dev *dev, struct scatterlist *sg, int nents,
578 for (i = 0; i < nents; i++) {
579 struct scatterlist *s = &sg[i];
580 if (!s->dma_length || !s->length)
582 pci_unmap_single(dev, s->dma_address, s->dma_length, dir);
586 int pci_dma_supported(struct pci_dev *dev, u64 mask)
588 /* Copied from i386. Doesn't make much sense, because it will
589 only work for pci_alloc_consistent.
590 The caller just has to use GFP_DMA in this case. */
591 if (mask < 0x00ffffff)
594 /* Tell the device to use SAC when IOMMU force is on.
595 This allows the driver to use cheaper accesses in some cases.
597 Problem with this is that if we overflow the IOMMU area
598 and return DAC as fallback address the device may not handle it correctly.
600 As a special case some controllers have a 39bit address mode
601 that is as efficient as 32bit (aic79xx). Don't force SAC for these.
602 Assume all masks <= 40 bits are of this type. Normally this doesn't
603 make any difference, but gives more gentle handling of IOMMU overflow. */
604 if (iommu_sac_force && (mask >= 0xffffffffffULL)) {
605 printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->slot_name,mask);
612 EXPORT_SYMBOL(pci_unmap_sg);
613 EXPORT_SYMBOL(pci_map_sg);
614 EXPORT_SYMBOL(pci_map_single);
615 EXPORT_SYMBOL(pci_unmap_single);
616 EXPORT_SYMBOL(pci_dma_supported);
617 EXPORT_SYMBOL(no_iommu);
618 EXPORT_SYMBOL(force_iommu);
619 EXPORT_SYMBOL(bad_dma_address);
620 EXPORT_SYMBOL(iommu_merge);
622 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
626 iommu_size = aper_size;
631 a = aper + iommu_size;
632 iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
634 if (iommu_size < 64*1024*1024)
636 "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20);
641 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
643 unsigned aper_size = 0, aper_base_32;
647 pci_read_config_dword(dev, 0x94, &aper_base_32);
648 pci_read_config_dword(dev, 0x90, &aper_order);
649 aper_order = (aper_order >> 1) & 7;
651 aper_base = aper_base_32 & 0x7fff;
654 aper_size = (32 * 1024 * 1024) << aper_order;
655 if (aper_base + aper_size >= 0xffffffff || !aper_size)
663 * Private Northbridge GATT initialization in case we cannot use the
664 * AGP driver for some reason.
666 static __init int init_k8_gatt(struct agp_kern_info *info)
670 unsigned aper_base, new_aper_base;
671 unsigned aper_size, gatt_size, new_aper_size;
673 aper_size = aper_base = info->aper_size = 0;
675 new_aper_base = read_aperture(dev, &new_aper_size);
680 aper_size = new_aper_size;
681 aper_base = new_aper_base;
683 if (aper_size != new_aper_size || aper_base != new_aper_base)
688 info->aper_base = aper_base;
689 info->aper_size = aper_size>>20;
691 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
692 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
694 panic("Cannot allocate GATT table");
695 memset(gatt, 0, gatt_size);
696 agp_gatt_table = gatt;
702 gatt_reg = __pa(gatt) >> 12;
704 pci_write_config_dword(dev, 0x98, gatt_reg);
705 pci_read_config_dword(dev, 0x90, &ctl);
708 ctl &= ~((1<<4) | (1<<5));
710 pci_write_config_dword(dev, 0x90, ctl);
714 printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10);
718 /* Should not happen anymore */
719 printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
720 KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.");
724 extern int agp_amd64_init(void);
726 static int __init pci_iommu_init(void)
728 struct agp_kern_info info;
729 unsigned long aper_size;
730 unsigned long iommu_start;
734 #ifndef CONFIG_AGP_AMD64
737 /* Makefile puts PCI initialization via subsys_initcall first. */
738 /* Add other K8 AGP bridge drivers here */
740 (agp_amd64_init() < 0) ||
741 (agp_copy_info(&info) < 0);
746 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
750 if (no_iommu || (!force_iommu && end_pfn < 0xffffffff>>PAGE_SHIFT) ||
752 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
759 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
761 if (force_iommu || end_pfn >= 0xffffffff>>PAGE_SHIFT)
762 err = init_k8_gatt(&info);
764 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
770 aper_size = info.aper_size * 1024 * 1024;
771 iommu_size = check_iommu_size(info.aper_base, aper_size);
772 iommu_pages = iommu_size >> PAGE_SHIFT;
774 iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL,
775 get_order(iommu_pages/8));
776 if (!iommu_gart_bitmap)
777 panic("Cannot allocate iommu bitmap\n");
778 memset(iommu_gart_bitmap, 0, iommu_pages/8);
780 #ifdef CONFIG_IOMMU_LEAK
782 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
783 get_order(iommu_pages*sizeof(void *)));
785 memset(iommu_leak_tab, 0, iommu_pages * 8);
787 printk("PCI-DMA: Cannot allocate leak trace area\n");
792 * Out of IOMMU space handling.
793 * Reserve some invalid pages at the beginning of the GART.
795 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
797 agp_memory_reserved = iommu_size;
799 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
802 iommu_start = aper_size - iommu_size;
803 iommu_bus_base = info.aper_base + iommu_start;
804 bad_dma_address = iommu_bus_base;
805 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
808 * Unmap the IOMMU part of the GART. The alias of the page is
809 * always mapped with cache enabled and there is no full cache
810 * coherency across the GART remapping. The unmapping avoids
811 * automatic prefetches from the CPU allocating cache lines in
812 * there. All CPU accesses are done via the direct mapping to
813 * the backing memory. The GART address is only used by PCI
816 clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
820 int cpu = PCI_SLOT(dev->devfn) - 24;
823 northbridges[cpu] = dev;
824 pci_read_config_dword(dev, 0x9c, &flag); /* cache flush word */
825 northbridge_flush_word[cpu] = flag;
833 /* Must execute after PCI subsystem */
834 fs_initcall(pci_iommu_init);
836 /* iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]][,merge]
837 [,forcesac][,fullflush][,nomerge]
838 size set size of iommu (in bytes)
839 noagp don't initialize the AGP driver and use full aperture.
840 off don't use the IOMMU
841 leak turn on simple iommu leak tracing (only when CONFIG_IOMMU_LEAK is on)
842 memaper[=order] allocate an own aperture over RAM with size 32MB^order.
843 noforce don't force IOMMU usage. Default.
845 merge Do SG merging. Implies force (experimental)
846 nomerge Don't do SG merging.
847 forcesac For SAC mode for masks <40bits (experimental)
848 fullflush Flush IOMMU on each allocation (default)
849 nofullflush Don't use IOMMU fullflush
850 allowed overwrite iommu off workarounds for specific chipsets.
851 soft Use software bounce buffering (default for Intel machines)
853 __init int iommu_setup(char *opt)
859 if (!memcmp(p,"noagp", 5))
861 if (!memcmp(p,"off", 3))
863 if (!memcmp(p,"force", 5)) {
865 iommu_aperture_allowed = 1;
867 if (!memcmp(p,"allowed",7))
868 iommu_aperture_allowed = 1;
869 if (!memcmp(p,"noforce", 7)) {
873 if (!memcmp(p, "memaper", 7)) {
874 fallback_aper_force = 1;
876 if (*p == '=' && get_option(&p, &arg))
877 fallback_aper_order = arg;
879 if (!memcmp(p, "panic", 5))
880 panic_on_overflow = 1;
881 if (!memcmp(p, "nopanic", 7))
882 panic_on_overflow = 0;
883 if (!memcmp(p, "merge", 5)) {
887 if (!memcmp(p, "nomerge", 7))
889 if (!memcmp(p, "forcesac", 8))
891 if (!memcmp(p, "fullflush", 9))
893 if (!memcmp(p, "nofullflush", 11))
895 if (!memcmp(p, "soft", 4))
897 #ifdef CONFIG_IOMMU_LEAK
898 if (!memcmp(p,"leak", 4)) {
902 if (isdigit(*p) && get_option(&p, &arg))
903 iommu_leak_pages = arg;
906 if (isdigit(*p) && get_option(&p, &arg))
909 if (*p == ' ' || *p == 0)
911 } while (*p++ != ',');