patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) William Irwin, April 2004
4  */
5 #include <linux/gfp.h>
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/hugetlb.h>
11 #include <linux/sysctl.h>
12 #include <linux/highmem.h>
13
14 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
15 static unsigned long nr_huge_pages, free_huge_pages;
16 unsigned long max_huge_pages;
17 static struct list_head hugepage_freelists[MAX_NUMNODES];
18 static spinlock_t hugetlb_lock = SPIN_LOCK_UNLOCKED;
19
20 static void enqueue_huge_page(struct page *page)
21 {
22         list_add(&page->lru,
23                  &hugepage_freelists[page_zone(page)->zone_pgdat->node_id]);
24 }
25
26 static struct page *dequeue_huge_page(void)
27 {
28         int nid = numa_node_id();
29         struct page *page = NULL;
30
31         if (list_empty(&hugepage_freelists[nid])) {
32                 for (nid = 0; nid < MAX_NUMNODES; ++nid)
33                         if (!list_empty(&hugepage_freelists[nid]))
34                                 break;
35         }
36         if (nid >= 0 && nid < MAX_NUMNODES &&
37             !list_empty(&hugepage_freelists[nid])) {
38                 page = list_entry(hugepage_freelists[nid].next,
39                                   struct page, lru);
40                 list_del(&page->lru);
41         }
42         return page;
43 }
44
45 static struct page *alloc_fresh_huge_page(void)
46 {
47         static int nid = 0;
48         struct page *page;
49         page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP,
50                                         HUGETLB_PAGE_ORDER);
51         nid = (nid + 1) % numnodes;
52         return page;
53 }
54
55 void free_huge_page(struct page *page)
56 {
57         BUG_ON(page_count(page));
58
59         INIT_LIST_HEAD(&page->lru);
60         page[1].mapping = NULL;
61
62         spin_lock(&hugetlb_lock);
63         enqueue_huge_page(page);
64         free_huge_pages++;
65         spin_unlock(&hugetlb_lock);
66 }
67
68 struct page *alloc_huge_page(void)
69 {
70         struct page *page;
71         int i;
72
73         spin_lock(&hugetlb_lock);
74         page = dequeue_huge_page();
75         if (!page) {
76                 spin_unlock(&hugetlb_lock);
77                 return NULL;
78         }
79         free_huge_pages--;
80         spin_unlock(&hugetlb_lock);
81         set_page_count(page, 1);
82         page[1].mapping = (void *)free_huge_page;
83         for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
84                 clear_highpage(&page[i]);
85         return page;
86 }
87
88 static int __init hugetlb_init(void)
89 {
90         unsigned long i;
91         struct page *page;
92
93         for (i = 0; i < MAX_NUMNODES; ++i)
94                 INIT_LIST_HEAD(&hugepage_freelists[i]);
95
96         for (i = 0; i < max_huge_pages; ++i) {
97                 page = alloc_fresh_huge_page();
98                 if (!page)
99                         break;
100                 spin_lock(&hugetlb_lock);
101                 enqueue_huge_page(page);
102                 spin_unlock(&hugetlb_lock);
103         }
104         max_huge_pages = free_huge_pages = nr_huge_pages = i;
105         printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
106         return 0;
107 }
108 module_init(hugetlb_init);
109
110 static int __init hugetlb_setup(char *s)
111 {
112         if (sscanf(s, "%lu", &max_huge_pages) <= 0)
113                 max_huge_pages = 0;
114         return 1;
115 }
116 __setup("hugepages=", hugetlb_setup);
117
118 static void update_and_free_page(struct page *page)
119 {
120         int i;
121         nr_huge_pages--;
122         for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
123                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
124                                 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
125                                 1 << PG_private | 1<< PG_writeback);
126                 set_page_count(&page[i], 0);
127         }
128         set_page_count(page, 1);
129         __free_pages(page, HUGETLB_PAGE_ORDER);
130 }
131
132 #ifdef CONFIG_HIGHMEM
133 static int try_to_free_low(unsigned long count)
134 {
135         int i;
136         for (i = 0; i < MAX_NUMNODES; ++i) {
137                 struct page *page;
138                 list_for_each_entry(page, &hugepage_freelists[i], lru) {
139                         if (PageHighMem(page))
140                                 continue;
141                         list_del(&page->lru);
142                         update_and_free_page(page);
143                         --free_huge_pages;
144                         if (!--count)
145                                 return 0;
146                 }
147         }
148         return count;
149 }
150 #else
151 static inline int try_to_free_low(unsigned long count)
152 {
153         return count;
154 }
155 #endif
156
157 static unsigned long set_max_huge_pages(unsigned long count)
158 {
159         while (count > nr_huge_pages) {
160                 struct page *page = alloc_fresh_huge_page();
161                 if (!page)
162                         return nr_huge_pages;
163                 spin_lock(&hugetlb_lock);
164                 enqueue_huge_page(page);
165                 free_huge_pages++;
166                 nr_huge_pages++;
167                 spin_unlock(&hugetlb_lock);
168         }
169         if (count >= nr_huge_pages)
170                 return nr_huge_pages;
171
172         spin_lock(&hugetlb_lock);
173         for (count = try_to_free_low(count); count < nr_huge_pages; --free_huge_pages) {
174                 struct page *page = dequeue_huge_page();
175                 if (!page)
176                         break;
177                 update_and_free_page(page);
178         }
179         spin_unlock(&hugetlb_lock);
180         return nr_huge_pages;
181 }
182
183 #ifdef CONFIG_SYSCTL
184 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
185                            struct file *file, void __user *buffer,
186                            size_t *length)
187 {
188         proc_doulongvec_minmax(table, write, file, buffer, length);
189         max_huge_pages = set_max_huge_pages(max_huge_pages);
190         return 0;
191 }
192 #endif /* CONFIG_SYSCTL */
193
194 int hugetlb_report_meminfo(char *buf)
195 {
196         return sprintf(buf,
197                         "HugePages_Total: %5lu\n"
198                         "HugePages_Free:  %5lu\n"
199                         "Hugepagesize:    %5lu kB\n",
200                         nr_huge_pages,
201                         free_huge_pages,
202                         HPAGE_SIZE/1024);
203 }
204
205 int is_hugepage_mem_enough(size_t size)
206 {
207         return (size + ~HPAGE_MASK)/HPAGE_SIZE <= free_huge_pages;
208 }
209
210 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
211 unsigned long hugetlb_total_pages(void)
212 {
213         return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
214 }
215 EXPORT_SYMBOL(hugetlb_total_pages);
216
217 /*
218  * We cannot handle pagefaults against hugetlb pages at all.  They cause
219  * handle_mm_fault() to try to instantiate regular-sized pages in the
220  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
221  * this far.
222  */
223 static struct page *hugetlb_nopage(struct vm_area_struct *vma,
224                                 unsigned long address, int *unused)
225 {
226         BUG();
227         return NULL;
228 }
229
230 struct vm_operations_struct hugetlb_vm_ops = {
231         .nopage = hugetlb_nopage,
232 };
233
234 void zap_hugepage_range(struct vm_area_struct *vma,
235                         unsigned long start, unsigned long length)
236 {
237         struct mm_struct *mm = vma->vm_mm;
238
239         spin_lock(&mm->page_table_lock);
240         unmap_hugepage_range(vma, start, start + length);
241         spin_unlock(&mm->page_table_lock);
242 }