1 /* include/linux/ckrm_mem_inline.h : memory control for CKRM
3 * Copyright (C) Jiantao Kong, IBM Corp. 2003
4 * (C) Shailabh Nagar, IBM Corp. 2003
5 * (C) Chandra Seetharaman, IBM Corp. 2004
8 * Memory control functions of the CKRM kernel API
10 * Latest version, more details at http://ckrm.sf.net
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
19 #ifndef _LINUX_CKRM_MEM_INLINE_H_
20 #define _LINUX_CKRM_MEM_INLINE_H_
22 #include <linux/rmap.h>
23 #include <linux/mmzone.h>
24 #include <linux/ckrm_mem.h>
27 #ifdef CONFIG_CKRM_RES_MEM
29 #define ckrm_shrink_list_empty() list_empty(&ckrm_shrink_list)
31 static inline struct ckrm_mem_res *
32 ckrm_get_mem_class(struct task_struct *tsk)
34 return ckrm_get_res_class(tsk->taskclass, mem_rcbs.resid,
39 ckrm_set_shrink(struct ckrm_zone *cz)
41 set_bit(CLS_SHRINK_BIT, &cz->shrink_flag);
45 ckrm_test_set_shrink(struct ckrm_zone *cz)
47 return test_and_set_bit(CLS_SHRINK_BIT, &cz->shrink_flag);
51 ckrm_clear_shrink(struct ckrm_zone *cz)
53 clear_bit(CLS_SHRINK_BIT, &cz->shrink_flag);
57 set_page_ckrmzone( struct page *page, struct ckrm_zone *cz)
62 static inline struct ckrm_zone *
63 page_ckrmzone(struct page *page)
65 return page->ckrm_zone;
69 * Currently, a shared page that is shared by multiple classes is charged
70 * to a class with max available guarantee. Simply replace this function
74 ckrm_mem_share_compare(struct ckrm_mem_res *a, struct ckrm_mem_res *b)
80 if (a->pg_guar == b->pg_guar)
82 if (a->pg_guar == CKRM_SHARE_DONTCARE)
84 if (b->pg_guar == CKRM_SHARE_DONTCARE)
86 return (a->pg_unused - b->pg_unused);
90 incr_use_count(struct ckrm_mem_res *cls, int borrow)
92 extern int ckrm_mem_shrink_at;
93 struct ckrm_mem_res *parcls = ckrm_get_res_class(cls->parent,
94 mem_rcbs.resid, struct ckrm_mem_res);
99 atomic_inc(&cls->pg_total);
103 parcls = ckrm_get_res_class(cls->parent,
104 mem_rcbs.resid, struct ckrm_mem_res);
105 if (parcls && ((cls->pg_guar == CKRM_SHARE_DONTCARE) ||
106 (atomic_read(&cls->pg_total) > cls->pg_unused))) {
107 incr_use_count(parcls, 1);
110 atomic_inc(&ckrm_mem_real_count);
112 if ((cls->pg_limit != CKRM_SHARE_DONTCARE) &&
113 (atomic_read(&cls->pg_total) >=
114 ((ckrm_mem_shrink_at * cls->pg_limit) / 100)) &&
115 ((cls->flags & CLS_AT_LIMIT) != CLS_AT_LIMIT)) {
116 ckrm_shrink_atlimit(cls);
122 decr_use_count(struct ckrm_mem_res *cls, int borrowed)
126 atomic_dec(&cls->pg_total);
129 if (cls->pg_borrowed > 0) {
130 struct ckrm_mem_res *parcls = ckrm_get_res_class(cls->parent,
131 mem_rcbs.resid, struct ckrm_mem_res);
133 decr_use_count(parcls, 1);
138 atomic_dec(&ckrm_mem_real_count);
142 ckrm_set_page_class(struct page *page, struct ckrm_mem_res *cls)
144 struct ckrm_zone *new_czone, *old_czone;
147 if (!ckrm_mem_root_class) {
148 set_page_ckrmzone(page, NULL);
151 cls = ckrm_mem_root_class;
153 new_czone = &cls->ckrm_zone[page_zonenum(page)];
154 old_czone = page_ckrmzone(page);
157 kref_put(&old_czone->memcls->nr_users, memclass_release);
159 set_page_ckrmzone(page, new_czone);
160 kref_get(&cls->nr_users);
161 incr_use_count(cls, 0);
162 SetPageCkrmAccount(page);
166 ckrm_change_page_class(struct page *page, struct ckrm_mem_res *newcls)
168 struct ckrm_zone *old_czone = page_ckrmzone(page), *new_czone;
169 struct ckrm_mem_res *oldcls;
172 if (!ckrm_mem_root_class)
174 newcls = ckrm_mem_root_class;
177 oldcls = old_czone->memcls;
178 if (oldcls == newcls)
182 kref_put(&oldcls->nr_users, memclass_release);
183 decr_use_count(oldcls, 0);
186 new_czone = &newcls->ckrm_zone[page_zonenum(page)];
187 set_page_ckrmzone(page, new_czone);
188 kref_get(&newcls->nr_users);
189 incr_use_count(newcls, 0);
191 list_del(&page->lru);
192 if (PageActive(page)) {
193 old_czone->nr_active--;
194 new_czone->nr_active++;
195 list_add(&page->lru, &new_czone->active_list);
197 old_czone->nr_inactive--;
198 new_czone->nr_inactive++;
199 list_add(&page->lru, &new_czone->inactive_list);
204 ckrm_clear_page_class(struct page *page)
206 struct ckrm_zone *czone = page_ckrmzone(page);
208 if (PageCkrmAccount(page)) {
209 decr_use_count(czone->memcls, 0);
210 ClearPageCkrmAccount(page);
212 kref_put(&czone->memcls->nr_users, memclass_release);
213 set_page_ckrmzone(page, NULL);
218 ckrm_mem_inc_active(struct page *page)
220 struct ckrm_mem_res *cls = ckrm_get_mem_class(current)
221 ?: ckrm_mem_root_class;
222 struct ckrm_zone *czone;
227 ckrm_set_page_class(page, cls);
228 czone = page_ckrmzone(page);
230 list_add(&page->lru, &czone->active_list);
234 ckrm_mem_dec_active(struct page *page)
236 struct ckrm_zone *czone = page_ckrmzone(page);
240 list_del(&page->lru);
242 ckrm_clear_page_class(page);
247 ckrm_mem_inc_inactive(struct page *page)
249 struct ckrm_mem_res *cls = ckrm_get_mem_class(current)
250 ?: ckrm_mem_root_class;
251 struct ckrm_zone *czone;
256 ckrm_set_page_class(page, cls);
257 czone = page_ckrmzone(page);
258 czone->nr_inactive++;
259 list_add(&page->lru, &czone->inactive_list);
263 ckrm_mem_dec_inactive(struct page *page)
265 struct ckrm_zone *czone = page_ckrmzone(page);
269 czone->nr_inactive--;
270 list_del(&page->lru);
271 ckrm_clear_page_class(page);
275 ckrm_zone_add_active(struct ckrm_zone *czone, int cnt)
277 czone->nr_active += cnt;
281 ckrm_zone_add_inactive(struct ckrm_zone *czone, int cnt)
283 czone->nr_inactive += cnt;
287 ckrm_zone_sub_active(struct ckrm_zone *czone, int cnt)
289 czone->nr_active -= cnt;
293 ckrm_zone_sub_inactive(struct ckrm_zone *czone, int cnt)
295 czone->nr_inactive -= cnt;
299 ckrm_class_limit_ok(struct ckrm_mem_res *cls)
303 if ((mem_rcbs.resid == -1) || !cls) {
306 if (cls->pg_limit == CKRM_SHARE_DONTCARE) {
307 struct ckrm_mem_res *parcls = ckrm_get_res_class(cls->parent,
308 mem_rcbs.resid, struct ckrm_mem_res);
309 ret = (parcls ? ckrm_class_limit_ok(parcls) : 0);
311 ret = (atomic_read(&cls->pg_total) <= cls->pg_limit);
313 /* If we are failing, just nudge the back end */
315 ckrm_shrink_atlimit(cls);
321 ckrm_page_init(struct page *page)
323 page->flags &= ~(1 << PG_ckrm_account);
324 set_page_ckrmzone(page, NULL);
328 /* task/mm initializations/cleanup */
331 ckrm_task_mm_init(struct task_struct *tsk)
333 INIT_LIST_HEAD(&tsk->mm_peers);
337 ckrm_task_mm_set(struct mm_struct * mm, struct task_struct *task)
339 spin_lock(&mm->peertask_lock);
340 if (!list_empty(&task->mm_peers)) {
341 printk(KERN_ERR "MEM_RC: Task list NOT empty!! emptying...\n");
342 list_del_init(&task->mm_peers);
344 list_add_tail(&task->mm_peers, &mm->tasklist);
345 spin_unlock(&mm->peertask_lock);
346 if (mm->memclass != ckrm_get_mem_class(task))
347 ckrm_mem_migrate_mm(mm, NULL);
352 ckrm_task_mm_change(struct task_struct *tsk,
353 struct mm_struct *oldmm, struct mm_struct *newmm)
356 spin_lock(&oldmm->peertask_lock);
357 list_del(&tsk->mm_peers);
358 ckrm_mem_migrate_mm(oldmm, NULL);
359 spin_unlock(&oldmm->peertask_lock);
361 spin_lock(&newmm->peertask_lock);
362 list_add_tail(&tsk->mm_peers, &newmm->tasklist);
363 ckrm_mem_migrate_mm(newmm, NULL);
364 spin_unlock(&newmm->peertask_lock);
368 ckrm_task_mm_clear(struct task_struct *tsk, struct mm_struct *mm)
370 spin_lock(&mm->peertask_lock);
371 list_del_init(&tsk->mm_peers);
372 ckrm_mem_migrate_mm(mm, NULL);
373 spin_unlock(&mm->peertask_lock);
377 ckrm_mm_init(struct mm_struct *mm)
379 INIT_LIST_HEAD(&mm->tasklist);
380 mm->peertask_lock = SPIN_LOCK_UNLOCKED;
384 ckrm_mm_setclass(struct mm_struct *mm, struct ckrm_mem_res *cls)
387 kref_get(&cls->nr_users);
391 ckrm_mm_clearclass(struct mm_struct *mm)
394 kref_put(&mm->memclass->nr_users, memclass_release);
399 static inline void ckrm_init_lists(struct zone *zone) {}
401 static inline void ckrm_add_tail_inactive(struct page *page)
403 struct ckrm_zone *ckrm_zone = page_ckrmzone(page);
404 list_add_tail(&page->lru, &ckrm_zone->inactive_list);
409 #define ckrm_shrink_list_empty() (1)
412 ckrm_get_memclass(struct task_struct *tsk)
417 static inline void ckrm_clear_page_class(struct page *p) {}
419 static inline void ckrm_mem_inc_active(struct page *p) {}
420 static inline void ckrm_mem_dec_active(struct page *p) {}
421 static inline void ckrm_mem_inc_inactive(struct page *p) {}
422 static inline void ckrm_mem_dec_inactive(struct page *p) {}
424 #define ckrm_zone_add_active(a, b) do {} while (0)
425 #define ckrm_zone_add_inactive(a, b) do {} while (0)
426 #define ckrm_zone_sub_active(a, b) do {} while (0)
427 #define ckrm_zone_sub_inactive(a, b) do {} while (0)
429 #define ckrm_class_limit_ok(a) (1)
431 static inline void ckrm_page_init(struct page *p) {}
432 static inline void ckrm_task_mm_init(struct task_struct *tsk) {}
433 static inline void ckrm_task_mm_set(struct mm_struct * mm,
434 struct task_struct *task) {}
435 static inline void ckrm_task_mm_change(struct task_struct *tsk,
436 struct mm_struct *oldmm, struct mm_struct *newmm) {}
437 static inline void ckrm_task_mm_clear(struct task_struct *tsk,
438 struct mm_struct *mm) {}
440 static inline void ckrm_mm_init(struct mm_struct *mm) {}
442 /* using #define instead of static inline as the prototype requires *
443 * data structures that is available only with the controller enabled */
444 #define ckrm_mm_setclass(a, b) do {} while(0)
446 static inline void ckrm_mm_clearclass(struct mm_struct *mm) {}
448 static inline void ckrm_init_lists(struct zone *zone)
450 INIT_LIST_HEAD(&zone->active_list);
451 INIT_LIST_HEAD(&zone->inactive_list);
454 static inline void ckrm_add_tail_inactive(struct page *page)
456 struct zone *zone = page_zone(page);
457 list_add_tail(&page->lru, &zone->inactive_list);
460 #endif /* _LINUX_CKRM_MEM_INLINE_H_ */