ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / arch / sparc64 / mm / generic.c
1 /* $Id: generic.c,v 1.18 2001/12/21 04:56:15 davem Exp $
2  * generic.c: Generic Sparc mm routines that are not dependent upon
3  *            MMU type but are Sparc specific.
4  *
5  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6  */
7
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/pagemap.h>
12
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
15 #include <asm/page.h>
16 #include <asm/tlbflush.h>
17
18 static inline void forget_pte(pte_t page)
19 {
20         if (!pte_none(page)) {
21                 printk("forget_pte: old mapping existed!\n");
22                 BUG();
23         }
24 }
25
26 /* Remap IO memory, the same way as remap_page_range(), but use
27  * the obio memory space.
28  *
29  * They use a pgprot that sets PAGE_IO and does not check the
30  * mem_map table as this is independent of normal memory.
31  *
32  * As a special hack if the lowest bit of offset is set the
33  * side-effect bit will be turned off.  This is used as a
34  * performance improvement on FFB/AFB. -DaveM
35  */
36 static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
37         unsigned long offset, pgprot_t prot, int space)
38 {
39         unsigned long end;
40
41         address &= ~PMD_MASK;
42         end = address + size;
43         if (end > PMD_SIZE)
44                 end = PMD_SIZE;
45         do {
46                 pte_t oldpage;
47                 pte_t entry;
48                 unsigned long curend = address + PAGE_SIZE;
49                 
50                 entry = mk_pte_io((offset & ~(0x1UL)), prot, space);
51                 if (!(address & 0xffff)) {
52                         if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) {
53                                 entry = mk_pte_io((offset & ~(0x1UL)),
54                                                   __pgprot(pgprot_val (prot) | _PAGE_SZ4MB),
55                                                   space);
56                                 curend = address + 0x400000;
57                                 offset += 0x400000;
58                         } else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) {
59                                 entry = mk_pte_io((offset & ~(0x1UL)),
60                                                   __pgprot(pgprot_val (prot) | _PAGE_SZ512K),
61                                                   space);
62                                 curend = address + 0x80000;
63                                 offset += 0x80000;
64                         } else if (!(offset & 0xfffe) && end >= address + 0x10000) {
65                                 entry = mk_pte_io((offset & ~(0x1UL)),
66                                                   __pgprot(pgprot_val (prot) | _PAGE_SZ64K),
67                                                   space);
68                                 curend = address + 0x10000;
69                                 offset += 0x10000;
70                         } else
71                                 offset += PAGE_SIZE;
72                 } else
73                         offset += PAGE_SIZE;
74
75                 if (offset & 0x1UL)
76                         pte_val(entry) &= ~(_PAGE_E);
77                 do {
78                         oldpage = *pte;
79                         pte_clear(pte);
80                         set_pte(pte, entry);
81                         forget_pte(oldpage);
82                         address += PAGE_SIZE;
83                         pte++;
84                 } while (address < curend);
85         } while (address < end);
86 }
87
88 static inline int io_remap_pmd_range(pmd_t * pmd, unsigned long address, unsigned long size,
89         unsigned long offset, pgprot_t prot, int space)
90 {
91         unsigned long end;
92
93         address &= ~PGDIR_MASK;
94         end = address + size;
95         if (end > PGDIR_SIZE)
96                 end = PGDIR_SIZE;
97         offset -= address;
98         do {
99                 pte_t * pte = pte_alloc_map(current->mm, pmd, address);
100                 if (!pte)
101                         return -ENOMEM;
102                 io_remap_pte_range(pte, address, end - address, address + offset, prot, space);
103                 pte_unmap(pte);
104                 address = (address + PMD_SIZE) & PMD_MASK;
105                 pmd++;
106         } while (address < end);
107         return 0;
108 }
109
110 int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space)
111 {
112         int error = 0;
113         pgd_t * dir;
114         unsigned long beg = from;
115         unsigned long end = from + size;
116         struct mm_struct *mm = vma->vm_mm;
117
118         prot = __pgprot(pg_iobits);
119         offset -= from;
120         dir = pgd_offset(mm, from);
121         flush_cache_range(vma, beg, end);
122
123         spin_lock(&mm->page_table_lock);
124         while (from < end) {
125                 pmd_t *pmd = pmd_alloc(current->mm, dir, from);
126                 error = -ENOMEM;
127                 if (!pmd)
128                         break;
129                 error = io_remap_pmd_range(pmd, from, end - from, offset + from, prot, space);
130                 if (error)
131                         break;
132                 from = (from + PGDIR_SIZE) & PGDIR_MASK;
133                 dir++;
134         }
135         spin_unlock(&mm->page_table_lock);
136
137         flush_tlb_range(vma, beg, end);
138         return error;
139 }