2 * iommu.c: IOMMU specific routines for memory management.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10 #include <linux/config.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
17 #include <asm/scatterlist.h>
18 #include <asm/pgalloc.h>
19 #include <asm/pgtable.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <asm/bitext.h>
27 #include <asm/iommu.h>
30 * This can be sized dynamically, but we will do this
31 * only when we have a guidance about actual I/O pressures.
33 #define IOMMU_RNGE IOMMU_RNGE_256MB
34 #define IOMMU_START 0xF0000000
35 #define IOMMU_WINSIZE (256*1024*1024U)
36 #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */
37 #define IOMMU_ORDER 6 /* 4096 * (1<<6) */
40 extern int viking_mxcc_present;
41 BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
42 #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
43 extern int flush_page_for_dma_global;
44 static int viking_flush;
46 extern void viking_flush_page(unsigned long page);
47 extern void viking_mxcc_flush_page(unsigned long page);
50 * Values precomputed according to CPU type.
52 static unsigned int ioperm_noc; /* Consistent mapping iopte flags */
53 static pgprot_t dvma_prot; /* Consistent mapping pte flags */
55 #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
56 #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
59 iommu_init(int iommund, struct sbus_bus *sbus)
61 unsigned int impl, vers;
63 struct iommu_struct *iommu;
64 struct linux_prom_registers iommu_promregs[PROMREG_MAX];
66 unsigned long *bitmap;
68 iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
70 prom_printf("Unable to allocate iommu structure\n");
73 prom_getproperty(iommund, "reg", (void *) iommu_promregs,
74 sizeof(iommu_promregs));
75 memset(&r, 0, sizeof(r));
76 r.flags = iommu_promregs[0].which_io;
77 r.start = iommu_promregs[0].phys_addr;
78 iommu->regs = (struct iommu_regs *)
79 sbus_ioremap(&r, 0, PAGE_SIZE * 3, "iommu_regs");
81 prom_printf("Cannot map IOMMU registers\n");
84 impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
85 vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
86 tmp = iommu->regs->control;
87 tmp &= ~(IOMMU_CTRL_RNGE);
88 tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
89 iommu->regs->control = tmp;
90 iommu_invalidate(iommu->regs);
91 iommu->start = IOMMU_START;
92 iommu->end = 0xffffffff;
94 /* Allocate IOMMU page table */
95 /* Stupid alignment constraints give me a headache.
96 We need 256K or 512K or 1M or 2M area aligned to
97 its size and current gfp will fortunately give
99 tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
101 prom_printf("Unable to allocate iommu table [0x%08x]\n",
102 IOMMU_NPTES*sizeof(iopte_t));
105 iommu->page_table = (iopte_t *)tmp;
107 /* Initialize new table. */
108 memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
111 iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
112 iommu_invalidate(iommu->regs);
114 bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
116 prom_printf("Unable to allocate iommu bitmap [%d]\n",
117 (int)(IOMMU_NPTES>>3));
120 bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
122 printk("IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
123 impl, vers, iommu->page_table,
124 (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
129 /* This begs to be btfixup-ed by srmmu. */
130 static void iommu_viking_flush_iotlb(iopte_t *iopte, unsigned int niopte)
135 start = (unsigned long)iopte & PAGE_MASK;
136 end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
137 if (viking_mxcc_present) {
139 viking_mxcc_flush_page(start);
142 } else if (viking_flush) {
144 viking_flush_page(start);
150 static u32 iommu_get_one(struct page *page, int npages, struct sbus_bus *sbus)
152 struct iommu_struct *iommu = sbus->iommu;
154 iopte_t *iopte, *iopte0;
155 unsigned int busa, busa0;
158 ioptex = bit_map_string_get(&iommu->usemap, npages, 1);
161 busa0 = iommu->start + (ioptex << PAGE_SHIFT);
162 iopte0 = &iommu->page_table[ioptex];
166 for (i = 0; i < npages; i++) {
167 iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
168 iommu_invalidate_page(iommu->regs, busa);
174 iommu_viking_flush_iotlb(iopte0, npages);
179 static u32 iommu_get_scsi_one(char *vaddr, unsigned int len,
180 struct sbus_bus *sbus)
187 off = (unsigned long)vaddr & ~PAGE_MASK;
188 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
189 page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
190 busa = iommu_get_one(page, npages, sbus);
194 static __u32 iommu_get_scsi_one_noflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
196 return iommu_get_scsi_one(vaddr, len, sbus);
199 static __u32 iommu_get_scsi_one_gflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
201 flush_page_for_dma(0);
202 return iommu_get_scsi_one(vaddr, len, sbus);
205 static __u32 iommu_get_scsi_one_pflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
207 unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
209 while(page < ((unsigned long)(vaddr + len))) {
210 flush_page_for_dma(page);
213 return iommu_get_scsi_one(vaddr, len, sbus);
216 static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
222 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
223 sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
224 sg->dvma_length = (__u32) sg->length;
229 static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
233 flush_page_for_dma(0);
236 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
237 sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
238 sg->dvma_length = (__u32) sg->length;
243 static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
245 unsigned long page, oldpage = 0;
251 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
254 * We expect unmapped highmem pages to be not in the cache.
255 * XXX Is this a good assumption?
256 * XXX What if someone else unmaps it here and races us?
258 if ((page = (unsigned long) page_address(sg->page)) != 0) {
259 for (i = 0; i < n; i++) {
260 if (page != oldpage) { /* Already flushed? */
261 flush_page_for_dma(page);
268 sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
269 sg->dvma_length = (__u32) sg->length;
274 static void iommu_release_one(u32 busa, int npages, struct sbus_bus *sbus)
276 struct iommu_struct *iommu = sbus->iommu;
280 if (busa < iommu->start)
282 ioptex = (busa - iommu->start) >> PAGE_SHIFT;
283 for (i = 0; i < npages; i++) {
284 iopte_val(iommu->page_table[ioptex + i]) = 0;
285 iommu_invalidate_page(iommu->regs, busa);
288 bit_map_clear(&iommu->usemap, ioptex, npages);
291 static void iommu_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
296 off = vaddr & ~PAGE_MASK;
297 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
298 iommu_release_one(vaddr & PAGE_MASK, npages, sbus);
301 static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
308 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
309 iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus);
310 sg->dvma_address = 0x21212121;
316 static int iommu_map_dma_area(dma_addr_t *pba, unsigned long va,
317 unsigned long addr, int len)
319 unsigned long page, end;
320 struct iommu_struct *iommu = sbus_root->iommu;
321 iopte_t *iopte = iommu->page_table;
325 if ((va & ~PAGE_MASK) != 0) BUG();
326 if ((addr & ~PAGE_MASK) != 0) BUG();
327 if ((len & ~PAGE_MASK) != 0) BUG();
329 ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT, 1);
343 if (viking_mxcc_present)
344 viking_mxcc_flush_page(page);
345 else if (viking_flush)
346 viking_flush_page(page);
348 __flush_page_to_ram(page);
350 pgdp = pgd_offset(&init_mm, addr);
351 pmdp = pmd_offset(pgdp, addr);
352 ptep = pte_offset_map(pmdp, addr);
354 set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
356 iopte_val(*iopte++) =
357 MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
361 /* P3: why do we need this?
363 * DAVEM: Because there are several aspects, none of which
364 * are handled by a single interface. Some cpus are
365 * completely not I/O DMA coherent, and some have
366 * virtually indexed caches. The driver DMA flushing
367 * methods handle the former case, but here during
368 * IOMMU page table modifications, and usage of non-cacheable
369 * cpu mappings of pages potentially in the cpu caches, we have
370 * to handle the latter case as well.
373 iommu_viking_flush_iotlb(first, len >> PAGE_SHIFT);
375 iommu_invalidate(iommu->regs);
377 *pba = iommu->start + (ioptex << PAGE_SHIFT);
381 static void iommu_unmap_dma_area(unsigned long busa, int len)
383 struct iommu_struct *iommu = sbus_root->iommu;
384 iopte_t *iopte = iommu->page_table;
386 int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
388 if ((busa & ~PAGE_MASK) != 0) BUG();
389 if ((len & ~PAGE_MASK) != 0) BUG();
394 iopte_val(*iopte++) = 0;
398 iommu_invalidate(iommu->regs);
399 bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
402 static struct page *iommu_translate_dvma(unsigned long busa)
404 struct iommu_struct *iommu = sbus_root->iommu;
405 iopte_t *iopte = iommu->page_table;
407 iopte += ((busa - iommu->start) >> PAGE_SHIFT);
408 return pfn_to_page((iopte_val(*iopte) & IOPTE_PAGE) >> (PAGE_SHIFT-4));
412 static char *iommu_lockarea(char *vaddr, unsigned long len)
417 static void iommu_unlockarea(char *vaddr, unsigned long len)
421 void __init ld_mmu_iommu(void)
423 viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
424 BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
425 BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);
427 if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
428 /* IO coherent chip */
429 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
430 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
431 } else if (flush_page_for_dma_global) {
432 /* flush_page_for_dma flushes everything, no matter of what page is it */
433 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM);
434 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
436 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM);
437 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
439 BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM);
440 BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM);
443 BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
444 BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
445 BTFIXUPSET_CALL(mmu_translate_dvma, iommu_translate_dvma, BTFIXUPCALL_NORM);
448 if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
449 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
450 ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
452 dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
453 ioperm_noc = IOPTE_WRITE | IOPTE_VALID;