* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
prom_printf("Unable to allocate iommu structure\n");
prom_halt();
}
- prom_getproperty(iommund, "reg", (void *) iommu_promregs,
- sizeof(iommu_promregs));
- memset(&r, 0, sizeof(r));
- r.flags = iommu_promregs[0].which_io;
- r.start = iommu_promregs[0].phys_addr;
- iommu->regs = (struct iommu_regs *)
- sbus_ioremap(&r, 0, PAGE_SIZE * 3, "iommu_regs");
- if(!iommu->regs) {
+ iommu->regs = NULL;
+ if (prom_getproperty(iommund, "reg", (void *) iommu_promregs,
+ sizeof(iommu_promregs)) != -1) {
+ memset(&r, 0, sizeof(r));
+ r.flags = iommu_promregs[0].which_io;
+ r.start = iommu_promregs[0].phys_addr;
+ iommu->regs = (struct iommu_regs *)
+ sbus_ioremap(&r, 0, PAGE_SIZE * 3, "iommu_regs");
+ }
+ if (!iommu->regs) {
prom_printf("Cannot map IOMMU registers\n");
prom_halt();
}
prom_halt();
}
bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
+ /* To be coherent on HyperSparc, the page color of DVMA
+ * and physical addresses must match.
+ */
+ if (srmmu_modtype == HyperSparc)
+ iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
+ else
+ iommu->usemap.num_colors = 1;
printk("IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
impl, vers, iommu->page_table,
}
/* This begs to be btfixup-ed by srmmu. */
-static void iommu_viking_flush_iotlb(iopte_t *iopte, unsigned int niopte)
+/* Flush the iotlb entries to ram. */
+/* This could be better if we didn't have to flush whole pages. */
+static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
{
unsigned long start;
unsigned long end;
- start = (unsigned long)iopte & PAGE_MASK;
+ start = (unsigned long)iopte;
end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
+ start &= PAGE_MASK;
if (viking_mxcc_present) {
while(start < end) {
viking_mxcc_flush_page(start);
viking_flush_page(start);
start += PAGE_SIZE;
}
+ } else {
+ while(start < end) {
+ __flush_page_to_ram(start);
+ start += PAGE_SIZE;
+ }
}
}
unsigned int busa, busa0;
int i;
- ioptex = bit_map_string_get(&iommu->usemap, npages, 1);
+ /* page color = pfn of page */
+ ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
if (ioptex < 0)
panic("iommu out");
busa0 = iommu->start + (ioptex << PAGE_SHIFT);
page++;
}
- iommu_viking_flush_iotlb(iopte0, npages);
+ iommu_flush_iotlb(iopte0, npages);
return busa0;
}
int ioptex;
int i;
- if (busa < iommu->start)
- BUG();
+ BUG_ON(busa < iommu->start);
ioptex = (busa - iommu->start) >> PAGE_SHIFT;
for (i = 0; i < npages; i++) {
iopte_val(iommu->page_table[ioptex + i]) = 0;
iopte_t *first;
int ioptex;
- if ((va & ~PAGE_MASK) != 0) BUG();
- if ((addr & ~PAGE_MASK) != 0) BUG();
- if ((len & ~PAGE_MASK) != 0) BUG();
+ BUG_ON((va & ~PAGE_MASK) != 0);
+ BUG_ON((addr & ~PAGE_MASK) != 0);
+ BUG_ON((len & ~PAGE_MASK) != 0);
- ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT, 1);
+ /* page color = physical address */
+ ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
+ addr >> PAGE_SHIFT);
if (ioptex < 0)
panic("iommu out");
* to handle the latter case as well.
*/
flush_cache_all();
- iommu_viking_flush_iotlb(first, len >> PAGE_SHIFT);
+ iommu_flush_iotlb(first, len >> PAGE_SHIFT);
flush_tlb_all();
iommu_invalidate(iommu->regs);
unsigned long end;
int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
- if ((busa & ~PAGE_MASK) != 0) BUG();
- if ((len & ~PAGE_MASK) != 0) BUG();
+ BUG_ON((busa & ~PAGE_MASK) != 0);
+ BUG_ON((len & ~PAGE_MASK) != 0);
iopte += ioptex;
end = busa + len;