2 * Dynamic DMA mapping support for AMD Hammer.
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
8 * See Documentation/DMA-mapping.txt for the interface specification.
10 * Copyright 2002 Andi Kleen, SuSE Labs.
13 #include <linux/config.h>
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
19 #include <linux/string.h>
20 #include <linux/spinlock.h>
21 #include <linux/pci.h>
22 #include <linux/module.h>
23 #include <linux/topology.h>
24 #include <linux/interrupt.h>
25 #include <linux/bitops.h>
26 #include <asm/atomic.h>
29 #include <asm/pgtable.h>
30 #include <asm/proto.h>
31 #include <asm/cacheflush.h>
32 #include <asm/kdebug.h>
33 #include <asm/swiotlb.h>
36 unsigned long iommu_bus_base; /* GART remapping area (physical) */
37 static unsigned long iommu_size; /* size of remapping area bytes */
38 static unsigned long iommu_pages; /* .. and in pages */
40 u32 *iommu_gatt_base; /* Remapping table */
42 /* If this is disabled the IOMMU will use an optimized flushing strategy
43 of only flushing when an mapping is reused. With it true the GART is flushed
44 for every mapping. Problem is that doing the lazy flush seems to trigger
45 bugs with some popular PCI cards, in particular 3ware (but has been also
46 also seen with Qlogic at least). */
47 int iommu_fullflush = 1;
51 /* Allocation bitmap for the remapping area */
52 static DEFINE_SPINLOCK(iommu_bitmap_lock);
53 static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
55 static u32 gart_unmapped_entry;
58 #define GPTE_COHERENT 2
59 #define GPTE_ENCODE(x) \
60 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
61 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
63 #define to_pages(addr,size) \
64 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
66 #define for_all_nb(dev) \
68 while ((dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, dev))!=NULL)\
69 if (dev->bus->number == 0 && \
70 (PCI_SLOT(dev->devfn) >= 24) && (PCI_SLOT(dev->devfn) <= 31))
72 static struct pci_dev *northbridges[MAX_NB];
73 static u32 northbridge_flush_word[MAX_NB];
75 #define EMERGENCY_PAGES 32 /* = 128KB */
78 #define AGPEXTERN extern
83 /* backdoor interface to AGP driver */
84 AGPEXTERN int agp_memory_reserved;
85 AGPEXTERN __u32 *agp_gatt_table;
87 static unsigned long next_bit; /* protected by iommu_bitmap_lock */
88 static int need_flush; /* global flush state. set for each gart wrap */
90 static unsigned long alloc_iommu(int size)
92 unsigned long offset, flags;
94 spin_lock_irqsave(&iommu_bitmap_lock, flags);
95 offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
98 offset = find_next_zero_string(iommu_gart_bitmap,0,next_bit,size);
101 set_bit_string(iommu_gart_bitmap, offset, size);
102 next_bit = offset+size;
103 if (next_bit >= iommu_pages) {
110 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
114 static void free_iommu(unsigned long offset, int size)
117 spin_lock_irqsave(&iommu_bitmap_lock, flags);
118 __clear_bit_string(iommu_gart_bitmap, offset, size);
119 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
123 * Use global flush state to avoid races with multiple flushers.
125 static void flush_gart(struct device *dev)
131 spin_lock_irqsave(&iommu_bitmap_lock, flags);
134 for (i = 0; i < MAX_NB; i++) {
135 if (!northbridges[i])
137 pci_write_config_dword(northbridges[i], 0x9c,
138 northbridge_flush_word[i] | 1);
142 for (i = 0; i <= max; i++) {
144 if (!northbridges[i])
146 /* Make sure the hardware actually executed the flush. */
148 pci_read_config_dword(northbridges[i], 0x9c, &w);
152 printk("nothing to flush?\n");
155 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
160 #ifdef CONFIG_IOMMU_LEAK
162 #define SET_LEAK(x) if (iommu_leak_tab) \
163 iommu_leak_tab[x] = __builtin_return_address(0);
164 #define CLEAR_LEAK(x) if (iommu_leak_tab) \
165 iommu_leak_tab[x] = NULL;
167 /* Debugging aid for drivers that don't free their IOMMU tables */
168 static void **iommu_leak_tab;
169 static int leak_trace;
170 int iommu_leak_pages = 20;
175 if (dump || !iommu_leak_tab) return;
177 show_stack(NULL,NULL);
178 /* Very crude. dump some from the end of the table too */
179 printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages);
180 for (i = 0; i < iommu_leak_pages; i+=2) {
181 printk("%lu: ", iommu_pages-i);
182 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]);
183 printk("%c", (i+1)%2 == 0 ? '\n' : ' ');
189 #define CLEAR_LEAK(x)
192 static void iommu_full(struct device *dev, size_t size, int dir)
195 * Ran out of IOMMU space for this operation. This is very bad.
196 * Unfortunately the drivers cannot handle this operation properly.
197 * Return some non mapped prereserved space in the aperture and
198 * let the Northbridge deal with it. This will result in garbage
199 * in the IO operation. When the size exceeds the prereserved space
200 * memory corruption will occur or random memory will be DMAed
201 * out. Hopefully no network devices use single mappings that big.
205 "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
208 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
209 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
210 panic("PCI-DMA: Memory would be corrupted\n");
211 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
212 panic(KERN_ERR "PCI-DMA: Random memory would be DMAed\n");
215 #ifdef CONFIG_IOMMU_LEAK
220 static inline int need_iommu(struct device *dev, unsigned long addr, size_t size)
222 u64 mask = *dev->dma_mask;
223 int high = addr + size >= mask;
230 static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
232 u64 mask = *dev->dma_mask;
233 int high = addr + size >= mask;
238 /* Map a single continuous physical area into the IOMMU.
239 * Caller needs to check if the iommu is needed and flush.
241 static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
242 size_t size, int dir)
244 unsigned long npages = to_pages(phys_mem, size);
245 unsigned long iommu_page = alloc_iommu(npages);
247 if (iommu_page == -1) {
248 if (!nonforced_iommu(dev, phys_mem, size))
250 if (panic_on_overflow)
251 panic("dma_map_area overflow %lu bytes\n", size);
252 iommu_full(dev, size, dir);
253 return bad_dma_address;
256 for (i = 0; i < npages; i++) {
257 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
258 SET_LEAK(iommu_page + i);
259 phys_mem += PAGE_SIZE;
261 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
264 static dma_addr_t gart_map_simple(struct device *dev, char *buf,
265 size_t size, int dir)
267 dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
272 /* Map a single area into the IOMMU */
273 dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
275 unsigned long phys_mem, bus;
277 BUG_ON(dir == DMA_NONE);
282 phys_mem = virt_to_phys(addr);
283 if (!need_iommu(dev, phys_mem, size))
286 bus = gart_map_simple(dev, addr, size, dir);
291 * Wrapper for pci_unmap_single working with scatterlists.
293 void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
297 for (i = 0; i < nents; i++) {
298 struct scatterlist *s = &sg[i];
299 if (!s->dma_length || !s->length)
301 dma_unmap_single(dev, s->dma_address, s->dma_length, dir);
305 /* Fallback for dma_map_sg in case of overflow */
306 static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
311 #ifdef CONFIG_IOMMU_DEBUG
312 printk(KERN_DEBUG "dma_map_sg overflow\n");
315 for (i = 0; i < nents; i++ ) {
316 struct scatterlist *s = &sg[i];
317 unsigned long addr = page_to_phys(s->page) + s->offset;
318 if (nonforced_iommu(dev, addr, s->length)) {
319 addr = dma_map_area(dev, addr, s->length, dir);
320 if (addr == bad_dma_address) {
322 gart_unmap_sg(dev, sg, i, dir);
324 sg[0].dma_length = 0;
328 s->dma_address = addr;
329 s->dma_length = s->length;
335 /* Map multiple scatterlist entries continuous into the first. */
336 static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
337 struct scatterlist *sout, unsigned long pages)
339 unsigned long iommu_start = alloc_iommu(pages);
340 unsigned long iommu_page = iommu_start;
343 if (iommu_start == -1)
346 for (i = start; i < stopat; i++) {
347 struct scatterlist *s = &sg[i];
348 unsigned long pages, addr;
349 unsigned long phys_addr = s->dma_address;
351 BUG_ON(i > start && s->offset);
354 sout->dma_address = iommu_bus_base;
355 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
356 sout->dma_length = s->length;
358 sout->dma_length += s->length;
362 pages = to_pages(s->offset, s->length);
364 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
365 SET_LEAK(iommu_page);
370 BUG_ON(iommu_page - iommu_start != pages);
374 static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat,
375 struct scatterlist *sout,
376 unsigned long pages, int need)
379 BUG_ON(stopat - start != 1);
381 sout->dma_length = sg[start].length;
384 return __dma_map_cont(sg, start, stopat, sout, pages);
388 * DMA map all entries in a scatterlist.
389 * Merge chunks that have page aligned sizes into a continuous mapping.
391 int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
396 unsigned long pages = 0;
397 int need = 0, nextneed;
399 BUG_ON(dir == DMA_NONE);
408 for (i = 0; i < nents; i++) {
409 struct scatterlist *s = &sg[i];
410 dma_addr_t addr = page_to_phys(s->page) + s->offset;
411 s->dma_address = addr;
412 BUG_ON(s->length == 0);
414 nextneed = need_iommu(dev, addr, s->length);
416 /* Handle the previous not yet processed entries */
418 struct scatterlist *ps = &sg[i-1];
419 /* Can only merge when the last chunk ends on a page
420 boundary and the new one doesn't have an offset. */
421 if (!iommu_merge || !nextneed || !need || s->offset ||
422 (ps->offset + ps->length) % PAGE_SIZE) {
423 if (dma_map_cont(sg, start, i, sg+out, pages,
433 pages += to_pages(s->offset, s->length);
435 if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0)
440 sg[out].dma_length = 0;
445 gart_unmap_sg(dev, sg, nents, dir);
446 /* When it was forced or merged try again in a dumb way */
447 if (force_iommu || iommu_merge) {
448 out = dma_map_sg_nonforce(dev, sg, nents, dir);
452 if (panic_on_overflow)
453 panic("dma_map_sg: overflow on %lu pages\n", pages);
454 iommu_full(dev, pages << PAGE_SHIFT, dir);
455 for (i = 0; i < nents; i++)
456 sg[i].dma_address = bad_dma_address;
461 * Free a DMA mapping.
463 void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
464 size_t size, int direction)
466 unsigned long iommu_page;
470 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
471 dma_addr >= iommu_bus_base + iommu_size)
473 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
474 npages = to_pages(dma_addr, size);
475 for (i = 0; i < npages; i++) {
476 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
477 CLEAR_LEAK(iommu_page + i);
479 free_iommu(iommu_page, npages);
484 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
488 iommu_size = aper_size;
493 a = aper + iommu_size;
494 iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
496 if (iommu_size < 64*1024*1024)
498 "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20);
503 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
505 unsigned aper_size = 0, aper_base_32;
509 pci_read_config_dword(dev, 0x94, &aper_base_32);
510 pci_read_config_dword(dev, 0x90, &aper_order);
511 aper_order = (aper_order >> 1) & 7;
513 aper_base = aper_base_32 & 0x7fff;
516 aper_size = (32 * 1024 * 1024) << aper_order;
517 if (aper_base + aper_size >= 0xffffffff || !aper_size)
525 * Private Northbridge GATT initialization in case we cannot use the
526 * AGP driver for some reason.
528 static __init int init_k8_gatt(struct agp_kern_info *info)
532 unsigned aper_base, new_aper_base;
533 unsigned aper_size, gatt_size, new_aper_size;
535 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
536 aper_size = aper_base = info->aper_size = 0;
538 new_aper_base = read_aperture(dev, &new_aper_size);
543 aper_size = new_aper_size;
544 aper_base = new_aper_base;
546 if (aper_size != new_aper_size || aper_base != new_aper_base)
551 info->aper_base = aper_base;
552 info->aper_size = aper_size>>20;
554 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
555 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
557 panic("Cannot allocate GATT table");
558 memset(gatt, 0, gatt_size);
559 agp_gatt_table = gatt;
565 gatt_reg = __pa(gatt) >> 12;
567 pci_write_config_dword(dev, 0x98, gatt_reg);
568 pci_read_config_dword(dev, 0x90, &ctl);
571 ctl &= ~((1<<4) | (1<<5));
573 pci_write_config_dword(dev, 0x90, ctl);
577 printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10);
581 /* Should not happen anymore */
582 printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
583 KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n");
587 extern int agp_amd64_init(void);
589 static struct dma_mapping_ops gart_dma_ops = {
590 .mapping_error = NULL,
591 .map_single = gart_map_single,
592 .map_simple = gart_map_simple,
593 .unmap_single = gart_unmap_single,
594 .sync_single_for_cpu = NULL,
595 .sync_single_for_device = NULL,
596 .sync_single_range_for_cpu = NULL,
597 .sync_single_range_for_device = NULL,
598 .sync_sg_for_cpu = NULL,
599 .sync_sg_for_device = NULL,
600 .map_sg = gart_map_sg,
601 .unmap_sg = gart_unmap_sg,
604 static int __init pci_iommu_init(void)
606 struct agp_kern_info info;
607 unsigned long aper_size;
608 unsigned long iommu_start;
610 unsigned long scratch;
613 #ifndef CONFIG_AGP_AMD64
616 /* Makefile puts PCI initialization via subsys_initcall first. */
617 /* Add other K8 AGP bridge drivers here */
619 (agp_amd64_init() < 0) ||
620 (agp_copy_info(agp_bridge, &info) < 0);
627 (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
629 (no_agp && init_k8_gatt(&info) < 0)) {
630 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
631 if (end_pfn > MAX_DMA32_PFN) {
632 printk(KERN_ERR "WARNING more than 4GB of memory "
633 "but IOMMU not compiled in.\n"
634 KERN_ERR "WARNING 32bit PCI may malfunction.\n"
635 KERN_ERR "You might want to enable "
636 "CONFIG_GART_IOMMU\n");
641 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
642 aper_size = info.aper_size * 1024 * 1024;
643 iommu_size = check_iommu_size(info.aper_base, aper_size);
644 iommu_pages = iommu_size >> PAGE_SHIFT;
646 iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL,
647 get_order(iommu_pages/8));
648 if (!iommu_gart_bitmap)
649 panic("Cannot allocate iommu bitmap\n");
650 memset(iommu_gart_bitmap, 0, iommu_pages/8);
652 #ifdef CONFIG_IOMMU_LEAK
654 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
655 get_order(iommu_pages*sizeof(void *)));
657 memset(iommu_leak_tab, 0, iommu_pages * 8);
659 printk("PCI-DMA: Cannot allocate leak trace area\n");
664 * Out of IOMMU space handling.
665 * Reserve some invalid pages at the beginning of the GART.
667 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
669 agp_memory_reserved = iommu_size;
671 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
674 iommu_start = aper_size - iommu_size;
675 iommu_bus_base = info.aper_base + iommu_start;
676 bad_dma_address = iommu_bus_base;
677 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
680 * Unmap the IOMMU part of the GART. The alias of the page is
681 * always mapped with cache enabled and there is no full cache
682 * coherency across the GART remapping. The unmapping avoids
683 * automatic prefetches from the CPU allocating cache lines in
684 * there. All CPU accesses are done via the direct mapping to
685 * the backing memory. The GART address is only used by PCI
688 clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
691 * Try to workaround a bug (thanks to BenH)
692 * Set unmapped entries to a scratch page instead of 0.
693 * Any prefetches that hit unmapped entries won't get an bus abort
696 scratch = get_zeroed_page(GFP_KERNEL);
698 panic("Cannot allocate iommu scratch page");
699 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
700 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
701 iommu_gatt_base[i] = gart_unmapped_entry;
705 int cpu = PCI_SLOT(dev->devfn) - 24;
708 northbridges[cpu] = dev;
709 pci_read_config_dword(dev, 0x9c, &flag); /* cache flush word */
710 northbridge_flush_word[cpu] = flag;
715 dma_ops = &gart_dma_ops;
720 /* Must execute after PCI subsystem */
721 fs_initcall(pci_iommu_init);
723 void gart_parse_options(char *p)
727 #ifdef CONFIG_IOMMU_LEAK
728 if (!strncmp(p,"leak",4)) {
732 if (isdigit(*p) && get_option(&p, &arg))
733 iommu_leak_pages = arg;
736 if (isdigit(*p) && get_option(&p, &arg))
738 if (!strncmp(p, "fullflush",8))
740 if (!strncmp(p, "nofullflush",11))
742 if (!strncmp(p,"noagp",5))
744 if (!strncmp(p, "noaperture",10))
746 /* duplicated from pci-dma.c */
747 if (!strncmp(p,"force",5))
748 iommu_aperture_allowed = 1;
749 if (!strncmp(p,"allowed",7))
750 iommu_aperture_allowed = 1;
751 if (!strncmp(p, "memaper", 7)) {
752 fallback_aper_force = 1;
756 if (get_option(&p, &arg))
757 fallback_aper_order = arg;