1 /* $Id: generic.c,v 1.18 2001/12/21 04:56:15 davem Exp $
2 * generic.c: Generic Sparc mm routines that are not dependent upon
3 * MMU type but are Sparc specific.
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
8 #include <linux/kernel.h>
10 #include <linux/swap.h>
11 #include <linux/pagemap.h>
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
16 #include <asm/tlbflush.h>
18 /* Remap IO memory, the same way as remap_pfn_range(), but use
19 * the obio memory space.
21 * They use a pgprot that sets PAGE_IO and does not check the
22 * mem_map table as this is independent of normal memory.
24 * As a special hack if the lowest bit of offset is set the
25 * side-effect bit will be turned off. This is used as a
26 * performance improvement on FFB/AFB. -DaveM
28 static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
29 unsigned long offset, pgprot_t prot, int space)
39 unsigned long curend = address + PAGE_SIZE;
41 entry = mk_pte_io((offset & ~(0x1UL)), prot, space);
42 if (!(address & 0xffff)) {
43 if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) {
44 entry = mk_pte_io((offset & ~(0x1UL)),
45 __pgprot(pgprot_val (prot) | _PAGE_SZ4MB),
47 curend = address + 0x400000;
49 } else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) {
50 entry = mk_pte_io((offset & ~(0x1UL)),
51 __pgprot(pgprot_val (prot) | _PAGE_SZ512K),
53 curend = address + 0x80000;
55 } else if (!(offset & 0xfffe) && end >= address + 0x10000) {
56 entry = mk_pte_io((offset & ~(0x1UL)),
57 __pgprot(pgprot_val (prot) | _PAGE_SZ64K),
59 curend = address + 0x10000;
67 pte_val(entry) &= ~(_PAGE_E);
69 BUG_ON(!pte_none(*pte));
73 } while (address < curend);
74 } while (address < end);
77 static inline int io_remap_pmd_range(pmd_t * pmd, unsigned long address, unsigned long size,
78 unsigned long offset, pgprot_t prot, int space)
82 address &= ~PGDIR_MASK;
88 pte_t * pte = pte_alloc_map(current->mm, pmd, address);
91 io_remap_pte_range(pte, address, end - address, address + offset, prot, space);
93 address = (address + PMD_SIZE) & PMD_MASK;
95 } while (address < end);
99 static inline int io_remap_pud_range(pud_t * pud, unsigned long address, unsigned long size,
100 unsigned long offset, pgprot_t prot, int space)
104 address &= ~PUD_MASK;
105 end = address + size;
110 pmd_t *pmd = pmd_alloc(current->mm, pud, address);
113 io_remap_pmd_range(pmd, address, end - address, address + offset, prot, space);
114 address = (address + PUD_SIZE) & PUD_MASK;
116 } while (address < end);
120 int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space)
124 unsigned long beg = from;
125 unsigned long end = from + size;
126 struct mm_struct *mm = vma->vm_mm;
128 prot = __pgprot(pg_iobits);
130 dir = pgd_offset(mm, from);
131 flush_cache_range(vma, beg, end);
133 spin_lock(&mm->page_table_lock);
135 pud_t *pud = pud_alloc(current->mm, dir, from);
139 error = io_remap_pud_range(pud, from, end - from, offset + from, prot, space);
142 from = (from + PGDIR_SIZE) & PGDIR_MASK;
145 flush_tlb_range(vma, beg, end);
146 spin_unlock(&mm->page_table_lock);