patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / arch / x86_64 / mm / ioremap.c
1 /*
2  * arch/x86_64/mm/ioremap.c
3  *
4  * Re-map IO memory to kernel address space so that we can access it.
5  * This is needed for high PCI addresses that aren't mapped in the
6  * 640k-1MB IO memory area on PC's
7  *
8  * (C) Copyright 1995 1996 Linus Torvalds
9  */
10
11 #include <linux/vmalloc.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <asm/io.h>
15 #include <asm/pgalloc.h>
16 #include <asm/fixmap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
19
20
21 static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
22         unsigned long phys_addr, unsigned long flags)
23 {
24         unsigned long end;
25         unsigned long pfn;
26
27         address &= ~PMD_MASK;
28         end = address + size;
29         if (end > PMD_SIZE)
30                 end = PMD_SIZE;
31         if (address >= end)
32                 BUG();
33         pfn = phys_addr >> PAGE_SHIFT;
34         do {
35                 if (!pte_none(*pte)) {
36                         printk("remap_area_pte: page already exists\n");
37                         BUG();
38                 }
39                 set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW | 
40                                         _PAGE_GLOBAL | _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
41                 address += PAGE_SIZE;
42                 pfn++;
43                 pte++;
44         } while (address && (address < end));
45 }
46
47 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
48         unsigned long phys_addr, unsigned long flags)
49 {
50         unsigned long end;
51
52         address &= ~PGDIR_MASK;
53         end = address + size;
54         if (end > PGDIR_SIZE)
55                 end = PGDIR_SIZE;
56         phys_addr -= address;
57         if (address >= end)
58                 BUG();
59         do {
60                 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
61                 if (!pte)
62                         return -ENOMEM;
63                 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
64                 address = (address + PMD_SIZE) & PMD_MASK;
65                 pmd++;
66         } while (address && (address < end));
67         return 0;
68 }
69
70 static int remap_area_pages(unsigned long address, unsigned long phys_addr,
71                                  unsigned long size, unsigned long flags)
72 {
73         int error;
74         pgd_t * dir;
75         unsigned long end = address + size;
76
77         phys_addr -= address;
78         dir = pgd_offset_k(address);
79         flush_cache_all();
80         if (address >= end)
81                 BUG();
82         spin_lock(&init_mm.page_table_lock);
83         do {
84                 pmd_t *pmd;
85                 pmd = pmd_alloc(&init_mm, dir, address);
86                 error = -ENOMEM;
87                 if (!pmd)
88                         break;
89                 if (remap_area_pmd(pmd, address, end - address,
90                                          phys_addr + address, flags))
91                         break;
92                 error = 0;
93                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
94                 dir++;
95         } while (address && (address < end));
96         spin_unlock(&init_mm.page_table_lock);
97         flush_tlb_all();
98         return error;
99 }
100
101 /*
102  * Generic mapping function (not visible outside):
103  */
104
105 /*
106  * Remap an arbitrary physical address space into the kernel virtual
107  * address space. Needed when the kernel wants to access high addresses
108  * directly.
109  *
110  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
111  * have to convert them into an offset in a page-aligned mapping, but the
112  * caller shouldn't need to know that small detail.
113  */
114 void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
115 {
116         void * addr;
117         struct vm_struct * area;
118         unsigned long offset, last_addr;
119
120         /* Don't allow wraparound or zero size */
121         last_addr = phys_addr + size - 1;
122         if (!size || last_addr < phys_addr)
123                 return NULL;
124
125         /*
126          * Don't remap the low PCI/ISA area, it's always mapped..
127          */
128         if (phys_addr >= 0xA0000 && last_addr < 0x100000)
129                 return phys_to_virt(phys_addr);
130
131         /*
132          * Don't allow anybody to remap normal RAM that we're using..
133          */
134         if (phys_addr < virt_to_phys(high_memory)) {
135 #ifndef CONFIG_DISCONTIGMEM
136                 char *t_addr, *t_end;
137                 struct page *page;
138
139                 t_addr = __va(phys_addr);
140                 t_end = t_addr + (size - 1);
141            
142                 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
143                         if(!PageReserved(page))
144                                 return NULL;
145 #endif
146         }
147
148         /*
149          * Mappings have to be page-aligned
150          */
151         offset = phys_addr & ~PAGE_MASK;
152         phys_addr &= PAGE_MASK;
153         size = PAGE_ALIGN(last_addr+1) - phys_addr;
154
155         /*
156          * Ok, go for it..
157          */
158         area = get_vm_area(size, VM_IOREMAP);
159         if (!area)
160                 return NULL;
161         area->phys_addr = phys_addr;
162         addr = area->addr;
163         if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
164                 vunmap(addr);
165                 return NULL;
166         }
167         return (void *) (offset + (char *)addr);
168 }
169
170 /**
171  * ioremap_nocache     -   map bus memory into CPU space
172  * @offset:    bus address of the memory
173  * @size:      size of the resource to map
174  *
175  * ioremap_nocache performs a platform specific sequence of operations to
176  * make bus memory CPU accessible via the readb/readw/readl/writeb/
177  * writew/writel functions and the other mmio helpers. The returned
178  * address is not guaranteed to be usable directly as a virtual
179  * address. 
180  *
181  * This version of ioremap ensures that the memory is marked uncachable
182  * on the CPU as well as honouring existing caching rules from things like
183  * the PCI bus. Note that there are other caches and buffers on many 
184  * busses. In particular driver authors should read up on PCI writes
185  *
186  * It's useful if some control registers are in such an area and
187  * write combining or read caching is not desirable:
188  * 
189  * Must be freed with iounmap.
190  */
191
192 void *ioremap_nocache (unsigned long phys_addr, unsigned long size)
193 {
194         void *p = __ioremap(phys_addr, size, _PAGE_PCD);
195         if (!p) 
196                 return p; 
197
198         if (phys_addr + size < virt_to_phys(high_memory)) { 
199                 struct page *ppage = virt_to_page(__va(phys_addr));             
200                 unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
201
202                 BUG_ON(phys_addr+size > (unsigned long)high_memory);
203                 BUG_ON(phys_addr + size < phys_addr);
204
205                 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { 
206                         iounmap(p); 
207                         p = NULL;
208                 }
209                 global_flush_tlb();
210         } 
211
212         return p;                                       
213 }
214
215 void iounmap(void *addr)
216 {
217         struct vm_struct *p;
218         if (addr <= high_memory) 
219                 return; 
220         p = remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); 
221         if (!p) { 
222                 printk("__iounmap: bad address %p\n", addr);
223                 return;
224         } 
225
226         if (p->flags && p->phys_addr < virt_to_phys(high_memory)) { 
227                 change_page_attr(virt_to_page(__va(p->phys_addr)),
228                                  p->size >> PAGE_SHIFT,
229                                  PAGE_KERNEL);                           
230                 global_flush_tlb();
231         } 
232         kfree(p); 
233 }