5 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Collaborative memory management interface.
11 #include <linux/config.h>
12 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/sysctl.h>
18 #include <linux/ctype.h>
20 #include <asm/pgalloc.h>
21 #include <asm/uaccess.h>
24 #include "../../../drivers/s390/net/smsgiucv.h"
26 #define CMM_NR_PAGES ((PAGE_SIZE / sizeof(unsigned long)) - 2)
28 struct cmm_page_array {
29 struct cmm_page_array *next;
31 unsigned long pages[CMM_NR_PAGES];
34 static long cmm_pages = 0;
35 static long cmm_timed_pages = 0;
36 static volatile long cmm_pages_target = 0;
37 static volatile long cmm_timed_pages_target = 0;
38 static long cmm_timeout_pages = 0;
39 static long cmm_timeout_seconds = 0;
41 static struct cmm_page_array *cmm_page_list = 0;
42 static struct cmm_page_array *cmm_timed_page_list = 0;
44 static unsigned long cmm_thread_active = 0;
45 static struct work_struct cmm_thread_starter;
46 static wait_queue_head_t cmm_thread_wait;
47 static struct timer_list cmm_timer;
49 static void cmm_timer_fn(unsigned long);
50 static void cmm_set_timer(void);
53 cmm_strtoul(const char *cp, char **endp)
55 unsigned int base = 10;
60 if ((*cp == 'x' || *cp == 'X') && isxdigit(cp[1])) {
65 return simple_strtoul(cp, endp, base);
69 cmm_alloc_pages(long pages, long *counter, struct cmm_page_array **list)
71 struct cmm_page_array *pa;
76 page = __get_free_page(GFP_NOIO);
79 if (!pa || pa->index >= CMM_NR_PAGES) {
80 /* Need a new page for the page list. */
81 pa = (struct cmm_page_array *)
82 __get_free_page(GFP_NOIO);
92 pa->pages[pa->index++] = page;
100 cmm_free_pages(long pages, long *counter, struct cmm_page_array **list)
102 struct cmm_page_array *pa;
107 if (!pa || pa->index <= 0)
109 page = pa->pages[--pa->index];
110 if (pa->index == 0) {
112 free_page((unsigned long) *list);
122 cmm_thread(void *dummy)
126 daemonize("cmmthread");
127 set_cpus_allowed(current, cpumask_of_cpu(0));
129 rc = wait_event_interruptible(cmm_thread_wait,
130 (cmm_pages != cmm_pages_target ||
131 cmm_timed_pages != cmm_timed_pages_target));
132 if (rc == -ERESTARTSYS) {
133 /* Got kill signal. End thread. */
134 clear_bit(0, &cmm_thread_active);
135 cmm_pages_target = cmm_pages;
136 cmm_timed_pages_target = cmm_timed_pages;
139 if (cmm_pages_target > cmm_pages) {
140 if (cmm_alloc_pages(1, &cmm_pages, &cmm_page_list))
141 cmm_pages_target = cmm_pages;
142 } else if (cmm_pages_target < cmm_pages) {
143 cmm_free_pages(1, &cmm_pages, &cmm_page_list);
145 if (cmm_timed_pages_target > cmm_timed_pages) {
146 if (cmm_alloc_pages(1, &cmm_timed_pages,
147 &cmm_timed_page_list))
148 cmm_timed_pages_target = cmm_timed_pages;
149 } else if (cmm_timed_pages_target < cmm_timed_pages) {
150 cmm_free_pages(1, &cmm_timed_pages,
151 &cmm_timed_page_list);
153 if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer))
160 cmm_start_thread(void)
162 kernel_thread(cmm_thread, 0, 0);
166 cmm_kick_thread(void)
168 if (!test_and_set_bit(0, &cmm_thread_active))
169 schedule_work(&cmm_thread_starter);
170 wake_up(&cmm_thread_wait);
176 if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) {
177 if (timer_pending(&cmm_timer))
178 del_timer(&cmm_timer);
181 if (timer_pending(&cmm_timer)) {
182 if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ))
185 cmm_timer.function = cmm_timer_fn;
187 cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ;
188 add_timer(&cmm_timer);
192 cmm_timer_fn(unsigned long ignored)
196 pages = cmm_timed_pages_target - cmm_timeout_pages;
198 cmm_timed_pages_target = 0;
200 cmm_timed_pages_target = pages;
206 cmm_set_pages(long pages)
208 cmm_pages_target = pages;
219 cmm_add_timed_pages(long pages)
221 cmm_timed_pages_target += pages;
226 cmm_get_timed_pages(void)
228 return cmm_timed_pages;
232 cmm_set_timeout(long pages, long seconds)
234 cmm_timeout_pages = pages;
235 cmm_timeout_seconds = seconds;
240 cmm_skip_blanks(char *cp, char **endp)
244 for (str = cp; *str == ' ' || *str == '\t'; str++);
249 #ifdef CONFIG_CMM_PROC
250 /* These will someday get removed. */
251 #define VM_CMM_PAGES 1111
252 #define VM_CMM_TIMED_PAGES 1112
253 #define VM_CMM_TIMEOUT 1113
255 static struct ctl_table cmm_table[];
258 cmm_pages_handler(ctl_table *ctl, int write, struct file *filp,
259 void *buffer, size_t *lenp, loff_t *ppos)
265 if (!*lenp || (*ppos && !write)) {
272 if (copy_from_user(buf, buffer,
273 len > sizeof(buf) ? sizeof(buf) : len))
275 buf[sizeof(buf) - 1] = '\0';
276 cmm_skip_blanks(buf, &p);
277 pages = cmm_strtoul(p, &p);
278 if (ctl == &cmm_table[0])
279 cmm_set_pages(pages);
281 cmm_add_timed_pages(pages);
283 if (ctl == &cmm_table[0])
284 pages = cmm_get_pages();
286 pages = cmm_get_timed_pages();
287 len = sprintf(buf, "%ld\n", pages);
290 if (copy_to_user(buffer, buf, len))
299 cmm_timeout_handler(ctl_table *ctl, int write, struct file *filp,
300 void *buffer, size_t *lenp, loff_t *ppos)
306 if (!*lenp || (*ppos && !write)) {
313 if (copy_from_user(buf, buffer,
314 len > sizeof(buf) ? sizeof(buf) : len))
316 buf[sizeof(buf) - 1] = '\0';
317 cmm_skip_blanks(buf, &p);
318 pages = cmm_strtoul(p, &p);
319 cmm_skip_blanks(p, &p);
320 seconds = cmm_strtoul(p, &p);
321 cmm_set_timeout(pages, seconds);
323 len = sprintf(buf, "%ld %ld\n",
324 cmm_timeout_pages, cmm_timeout_seconds);
327 if (copy_to_user(buffer, buf, len))
335 static struct ctl_table cmm_table[] = {
337 .ctl_name = VM_CMM_PAGES,
338 .procname = "cmm_pages",
340 .proc_handler = &cmm_pages_handler,
343 .ctl_name = VM_CMM_TIMED_PAGES,
344 .procname = "cmm_timed_pages",
346 .proc_handler = &cmm_pages_handler,
349 .ctl_name = VM_CMM_TIMEOUT,
350 .procname = "cmm_timeout",
352 .proc_handler = &cmm_timeout_handler,
357 static struct ctl_table cmm_dir_table[] = {
369 #ifdef CONFIG_CMM_IUCV
370 #define SMSG_PREFIX "CMM"
372 cmm_smsg_target(char *msg)
376 if (!cmm_skip_blanks(msg + strlen(SMSG_PREFIX), &msg))
378 if (strncmp(msg, "SHRINK", 6) == 0) {
379 if (!cmm_skip_blanks(msg + 6, &msg))
381 pages = cmm_strtoul(msg, &msg);
382 cmm_skip_blanks(msg, &msg);
384 cmm_set_pages(pages);
385 } else if (strncmp(msg, "RELEASE", 7) == 0) {
386 if (!cmm_skip_blanks(msg + 7, &msg))
388 pages = cmm_strtoul(msg, &msg);
389 cmm_skip_blanks(msg, &msg);
391 cmm_add_timed_pages(pages);
392 } else if (strncmp(msg, "REUSE", 5) == 0) {
393 if (!cmm_skip_blanks(msg + 5, &msg))
395 pages = cmm_strtoul(msg, &msg);
396 if (!cmm_skip_blanks(msg, &msg))
398 seconds = cmm_strtoul(msg, &msg);
399 cmm_skip_blanks(msg, &msg);
401 cmm_set_timeout(pages, seconds);
406 struct ctl_table_header *cmm_sysctl_header;
413 /* Prevent logical cpu 0 from being set offline. */
414 rc = smp_get_cpu(cpumask_of_cpu(0));
416 printk(KERN_ERR "CMM: unable to reserve cpu 0\n");
419 #ifdef CONFIG_CMM_PROC
420 cmm_sysctl_header = register_sysctl_table(cmm_dir_table, 1);
422 #ifdef CONFIG_CMM_IUCV
423 smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
425 INIT_WORK(&cmm_thread_starter, (void *) cmm_start_thread, 0);
426 init_waitqueue_head(&cmm_thread_wait);
427 init_timer(&cmm_timer);
434 cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
435 cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
436 #ifdef CONFIG_CMM_PROC
437 unregister_sysctl_table(cmm_sysctl_header);
439 #ifdef CONFIG_CMM_IUCV
440 smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
442 /* Allow logical cpu 0 to be set offline again. */
446 module_init(cmm_init);
447 module_exit(cmm_exit);
449 EXPORT_SYMBOL(cmm_set_pages);
450 EXPORT_SYMBOL(cmm_get_pages);
451 EXPORT_SYMBOL(cmm_add_timed_pages);
452 EXPORT_SYMBOL(cmm_get_timed_pages);
453 EXPORT_SYMBOL(cmm_set_timeout);
455 MODULE_LICENSE("GPL");