static spinlock_t iommu_bitmap_lock = SPIN_LOCK_UNLOCKED;
static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
+static u32 gart_unmapped_entry;
+
#define GPTE_VALID 1
#define GPTE_COHERENT 2
#define GPTE_ENCODE(x) \
static void flush_gart(struct pci_dev *dev)
{
unsigned long flags;
- int bus = dev ? dev->bus->number : -1;
- cpumask_const_t bus_cpumask = pcibus_to_cpumask(bus);
int flushed = 0;
int i;
u32 w;
if (!northbridges[i])
continue;
- if (bus >= 0 && !(cpu_isset_const(i, bus_cpumask)))
- continue;
pci_write_config_dword(northbridges[i], 0x9c,
northbridge_flush_word[i] | 1);
/* Make sure the hardware actually executed the flush. */
flushed++;
}
if (!flushed)
- printk("nothing to flush? %d\n", bus);
+ printk("nothing to flush?\n");
need_flush = 0;
}
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
#define SET_LEAK(x) if (iommu_leak_tab) \
iommu_leak_tab[x] = __builtin_return_address(0);
#define CLEAR_LEAK(x) if (iommu_leak_tab) \
- iommu_leak_tab[x] = 0;
+ iommu_leak_tab[x] = NULL;
/* Debugging aid for drivers that don't free their IOMMU tables */
static void **iommu_leak_tab;
unsigned long pages = 0;
int need = 0, nextneed;
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb)
+ return swiotlb_map_sg(&dev->dev,sg,nents,dir);
+#endif
+
BUG_ON(dir == PCI_DMA_NONE);
if (nents == 0)
return 0;
iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
npages = to_pages(dma_addr, size);
for (i = 0; i < npages; i++) {
- iommu_gatt_base[iommu_page + i] = 0;
+ iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
CLEAR_LEAK(iommu_page + i);
}
free_iommu(iommu_page, npages);
unsigned long aper_size;
unsigned long iommu_start;
struct pci_dev *dev;
-
+ unsigned long scratch;
+ long i;
#ifndef CONFIG_AGP_AMD64
no_agp = 1;
return -1;
}
}
-
+
aper_size = info.aper_size * 1024 * 1024;
iommu_size = check_iommu_size(info.aper_base, aper_size);
iommu_pages = iommu_size >> PAGE_SHIFT;
*/
clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
+ /*
+ * Try to workaround a bug (thanks to BenH)
+ * Set unmapped entries to a scratch page instead of 0.
+ * Any prefetches that hit unmapped entries won't get an bus abort
+ * then.
+ */
+ scratch = get_zeroed_page(GFP_KERNEL);
+ if (!scratch)
+ panic("Cannot allocate iommu scratch page");
+ gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
+ for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
+ iommu_gatt_base[i] = gart_unmapped_entry;
+
for_all_nb(dev) {
u32 flag;
int cpu = PCI_SLOT(dev->devfn) - 24;