patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / arch / s390 / mm / init.c
1 /*
2  *  arch/s390/mm/init.c
3  *
4  *  S390 version
5  *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6  *    Author(s): Hartmut Penner (hp@de.ibm.com)
7  *
8  *  Derived from "arch/i386/mm/init.c"
9  *    Copyright (C) 1995  Linus Torvalds
10  */
11
12 #include <linux/config.h>
13 #include <linux/signal.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/ptrace.h>
20 #include <linux/mman.h>
21 #include <linux/mm.h>
22 #include <linux/swap.h>
23 #include <linux/smp.h>
24 #include <linux/init.h>
25 #include <linux/pagemap.h>
26 #include <linux/bootmem.h>
27
28 #include <asm/processor.h>
29 #include <asm/system.h>
30 #include <asm/uaccess.h>
31 #include <asm/pgtable.h>
32 #include <asm/pgalloc.h>
33 #include <asm/dma.h>
34 #include <asm/lowcore.h>
35 #include <asm/tlb.h>
36 #include <asm/tlbflush.h>
37
38 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
39
40 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
41 char  empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
42
43 void diag10(unsigned long addr)
44 {
45         if (addr >= 0x7ff00000)
46                 return;
47 #ifdef __s390x__
48         asm volatile ("sam31\n\t"
49                       "diag %0,%0,0x10\n\t"
50                       "sam64" : : "a" (addr) );
51 #else
52         asm volatile ("diag %0,%0,0x10" : : "a" (addr) );
53 #endif
54 }
55
56 void show_mem(void)
57 {
58         int i, total = 0, reserved = 0;
59         int shared = 0, cached = 0;
60
61         printk("Mem-info:\n");
62         show_free_areas();
63         printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
64         i = max_mapnr;
65         while (i-- > 0) {
66                 total++;
67                 if (PageReserved(mem_map+i))
68                         reserved++;
69                 else if (PageSwapCache(mem_map+i))
70                         cached++;
71                 else if (page_count(mem_map+i))
72                         shared += page_count(mem_map+i) - 1;
73         }
74         printk("%d pages of RAM\n",total);
75         printk("%d reserved pages\n",reserved);
76         printk("%d pages shared\n",shared);
77         printk("%d pages swap cached\n",cached);
78 }
79
80 /* References to section boundaries */
81
82 extern unsigned long _text;
83 extern unsigned long _etext;
84 extern unsigned long _edata;
85 extern unsigned long __bss_start;
86 extern unsigned long _end;
87
88 extern unsigned long __init_begin;
89 extern unsigned long __init_end;
90
91 /*
92  * paging_init() sets up the page tables
93  */
94
95 #ifndef CONFIG_ARCH_S390X
96 void __init paging_init(void)
97 {
98         pgd_t * pg_dir;
99         pte_t * pg_table;
100         pte_t   pte;
101         int     i;
102         unsigned long tmp;
103         unsigned long pfn = 0;
104         unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
105         static const int ssm_mask = 0x04000000L;
106
107         /* unmap whole virtual address space */
108         
109         pg_dir = swapper_pg_dir;
110
111         for (i=0;i<KERNEL_PGD_PTRS;i++) 
112                 pmd_clear((pmd_t*)pg_dir++);
113                 
114         /*
115          * map whole physical memory to virtual memory (identity mapping) 
116          */
117
118         pg_dir = swapper_pg_dir;
119
120         while (pfn < max_low_pfn) {
121                 /*
122                  * pg_table is physical at this point
123                  */
124                 pg_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
125
126                 pg_dir->pgd0 =  (_PAGE_TABLE | __pa(pg_table));
127                 pg_dir->pgd1 =  (_PAGE_TABLE | (__pa(pg_table)+1024));
128                 pg_dir->pgd2 =  (_PAGE_TABLE | (__pa(pg_table)+2048));
129                 pg_dir->pgd3 =  (_PAGE_TABLE | (__pa(pg_table)+3072));
130                 pg_dir++;
131
132                 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
133                         pte = pfn_pte(pfn, PAGE_KERNEL);
134                         if (pfn >= max_low_pfn)
135                                 pte_clear(&pte);
136                         set_pte(pg_table, pte);
137                         pfn++;
138                 }
139         }
140
141         S390_lowcore.kernel_asce = pgdir_k;
142
143         /* enable virtual mapping in kernel mode */
144         __asm__ __volatile__("    LCTL  1,1,%0\n"
145                              "    LCTL  7,7,%0\n"
146                              "    LCTL  13,13,%0\n"
147                              "    SSM   %1" 
148                              : : "m" (pgdir_k), "m" (ssm_mask));
149
150         local_flush_tlb();
151
152         {
153                 unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0};
154
155                 zones_size[ZONE_DMA] = max_low_pfn;
156                 free_area_init(zones_size);
157         }
158         return;
159 }
160
161 #else /* CONFIG_ARCH_S390X */
162 void __init paging_init(void)
163 {
164         pgd_t * pg_dir;
165         pmd_t * pm_dir;
166         pte_t * pt_dir;
167         pte_t   pte;
168         int     i,j,k;
169         unsigned long pfn = 0;
170         unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
171           _KERN_REGION_TABLE;
172         static const int ssm_mask = 0x04000000L;
173
174         unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
175         unsigned long dma_pfn, high_pfn;
176
177         dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
178         high_pfn = max_low_pfn;
179
180         if (dma_pfn > high_pfn)
181                 zones_size[ZONE_DMA] = high_pfn;
182         else {
183                 zones_size[ZONE_DMA] = dma_pfn;
184                 zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
185         }
186
187         /* Initialize mem_map[].  */
188         free_area_init(zones_size);
189
190
191         /*
192          * map whole physical memory to virtual memory (identity mapping) 
193          */
194
195         pg_dir = swapper_pg_dir;
196         
197         for (i = 0 ; i < PTRS_PER_PGD ; i++,pg_dir++) {
198         
199                 if (pfn >= max_low_pfn) {
200                         pgd_clear(pg_dir);
201                         continue;
202                 }          
203         
204                 pm_dir = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE*4);
205                 pgd_populate(&init_mm, pg_dir, pm_dir);
206
207                 for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) {
208                         if (pfn >= max_low_pfn) {
209                                 pmd_clear(pm_dir);
210                                 continue; 
211                         }          
212                         
213                         pt_dir = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
214                         pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
215         
216                         for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
217                                 pte = pfn_pte(pfn, PAGE_KERNEL);
218                                 if (pfn >= max_low_pfn) {
219                                         pte_clear(&pte); 
220                                         continue;
221                                 }
222                                 set_pte(pt_dir, pte);
223                                 pfn++;
224                         }
225                 }
226         }
227
228         S390_lowcore.kernel_asce = pgdir_k;
229
230         /* enable virtual mapping in kernel mode */
231         __asm__ __volatile__("lctlg 1,1,%0\n\t"
232                              "lctlg 7,7,%0\n\t"
233                              "lctlg 13,13,%0\n\t"
234                              "ssm   %1"
235                              : :"m" (pgdir_k), "m" (ssm_mask));
236
237         local_flush_tlb();
238
239         return;
240 }
241 #endif /* CONFIG_ARCH_S390X */
242
243 void __init mem_init(void)
244 {
245         unsigned long codesize, reservedpages, datasize, initsize;
246
247         max_mapnr = num_physpages = max_low_pfn;
248         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
249
250         /* clear the zero-page */
251         memset(empty_zero_page, 0, PAGE_SIZE);
252
253         /* this will put all low memory onto the freelists */
254         totalram_pages += free_all_bootmem();
255
256         reservedpages = 0;
257
258         codesize =  (unsigned long) &_etext - (unsigned long) &_text;
259         datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
260         initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
261         printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
262                 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
263                 max_mapnr << (PAGE_SHIFT-10),
264                 codesize >> 10,
265                 reservedpages << (PAGE_SHIFT-10),
266                 datasize >>10,
267                 initsize >> 10);
268 }
269
270 void free_initmem(void)
271 {
272         unsigned long addr;
273
274         addr = (unsigned long)(&__init_begin);
275         for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
276                 ClearPageReserved(virt_to_page(addr));
277                 set_page_count(virt_to_page(addr), 1);
278                 free_page(addr);
279                 totalram_pages++;
280         }
281         printk ("Freeing unused kernel memory: %ldk freed\n",
282                 ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10);
283 }
284
285 #ifdef CONFIG_BLK_DEV_INITRD
286 void free_initrd_mem(unsigned long start, unsigned long end)
287 {
288         if (start < end)
289                 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
290         for (; start < end; start += PAGE_SIZE) {
291                 ClearPageReserved(virt_to_page(start));
292                 set_page_count(virt_to_page(start), 1);
293                 free_page(start);
294                 totalram_pages++;
295         }
296 }
297 #endif