1 /* include/linux/ckrm_mem_inline.h : memory control for CKRM
3 * Copyright (C) Jiantao Kong, IBM Corp. 2003
4 * (C) Shailabh Nagar, IBM Corp. 2003
5 * (C) Chandra Seetharaman, IBM Corp. 2004
8 * Memory control functions of the CKRM kernel API
10 * Latest version, more details at http://ckrm.sf.net
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
19 #ifndef _LINUX_CKRM_MEM_INLINE_H_
20 #define _LINUX_CKRM_MEM_INLINE_H_
22 #include <linux/rmap.h>
23 #include <linux/mmzone.h>
24 #include <linux/ckrm_mem.h>
27 #ifdef CONFIG_CKRM_RES_MEM
32 static inline struct ckrm_mem_res *
33 ckrm_get_mem_class(struct task_struct *tsk)
35 return ckrm_get_res_class(tsk->taskclass, mem_rcbs.resid,
39 #define ckrm_shrink_list_empty() list_empty(&ckrm_shrink_list)
42 ckrm_set_shrink(struct ckrm_zone *cz)
44 set_bit(CLS_SHRINK_BIT, &cz->shrink_flag);
48 ckrm_test_set_shrink(struct ckrm_zone *cz)
50 return test_and_set_bit(CLS_SHRINK_BIT, &cz->shrink_flag);
54 ckrm_clear_shrink(struct ckrm_zone *cz)
56 clear_bit(CLS_SHRINK_BIT, &cz->shrink_flag);
60 * Currently, a shared page that is shared by multiple classes is charged
61 * to a class with max available guarantee. Simply replace this function
65 ckrm_mem_share_compare(struct ckrm_mem_res *a, struct ckrm_mem_res *b)
71 if (a->pg_guar == b->pg_guar)
73 if (a->pg_guar == CKRM_SHARE_DONTCARE)
75 if (b->pg_guar == CKRM_SHARE_DONTCARE)
77 return (a->pg_unused - b->pg_unused);
81 incr_use_count(struct ckrm_mem_res *cls, int borrow)
83 extern int ckrm_mem_shrink_at;
86 BUG_ON(!ckrm_memclass_valid(cls));
87 atomic_inc(&cls->pg_total);
91 if ((cls->pg_guar == CKRM_SHARE_DONTCARE) ||
92 (atomic_read(&cls->pg_total) > cls->pg_unused)) {
93 struct ckrm_mem_res *parcls = ckrm_get_res_class(cls->parent,
94 mem_rcbs.resid, struct ckrm_mem_res);
96 incr_use_count(parcls, 1);
100 atomic_inc(&ckrm_mem_real_count);
102 if (unlikely((cls->pg_limit != CKRM_SHARE_DONTCARE) &&
103 (atomic_read(&cls->pg_total) >=
104 ((ckrm_mem_shrink_at * cls->pg_limit) / 100)) &&
105 ((cls->flags & MEM_AT_LIMIT) != MEM_AT_LIMIT))) {
112 decr_use_count(struct ckrm_mem_res *cls, int borrowed)
116 BUG_ON(!ckrm_memclass_valid(cls));
117 atomic_dec(&cls->pg_total);
120 if (cls->pg_borrowed > 0) {
121 struct ckrm_mem_res *parcls = ckrm_get_res_class(cls->parent,
122 mem_rcbs.resid, struct ckrm_mem_res);
124 decr_use_count(parcls, 1);
129 atomic_dec(&ckrm_mem_real_count);
133 ckrm_set_page_class(struct page *page, struct ckrm_mem_res *cls)
135 if (unlikely(cls == NULL)) {
136 cls = ckrm_mem_root_class;
138 if (likely(cls != NULL)) {
139 struct ckrm_zone *czone = &cls->ckrm_zone[page_zonenum(page)];
140 if (unlikely(page->ckrm_zone)) {
141 kref_put(&cls->nr_users, memclass_release);
143 page->ckrm_zone = czone;
144 kref_get(&cls->nr_users);
146 page->ckrm_zone = NULL;
151 ckrm_set_pages_class(struct page *pages, int numpages, struct ckrm_mem_res *cls)
154 for (i = 0; i < numpages; pages++, i++) {
155 ckrm_set_page_class(pages, cls);
160 ckrm_clear_page_class(struct page *page)
162 if (likely(page->ckrm_zone != NULL)) {
163 if (CkrmAccount(page)) {
164 decr_use_count(page->ckrm_zone->memcls, 0);
165 ClearCkrmAccount(page);
167 kref_put(&page->ckrm_zone->memcls->nr_users, memclass_release);
168 page->ckrm_zone = NULL;
173 ckrm_change_page_class(struct page *page, struct ckrm_mem_res *newcls)
175 struct ckrm_zone *old_czone = page->ckrm_zone, *new_czone;
176 struct ckrm_mem_res *oldcls;
178 if (unlikely(!old_czone || !newcls)) {
179 BUG_ON(CkrmAccount(page));
182 BUG_ON(!CkrmAccount(page));
184 oldcls = old_czone->memcls;
185 if (oldcls == NULL || (oldcls == newcls))
188 kref_put(&oldcls->nr_users, memclass_release);
189 decr_use_count(oldcls, 0);
191 page->ckrm_zone = new_czone = &newcls->ckrm_zone[page_zonenum(page)];
193 kref_get(&newcls->nr_users);
194 incr_use_count(newcls, 0);
196 list_del(&page->lru);
197 if (PageActive(page)) {
198 old_czone->nr_active--;
199 new_czone->nr_active++;
200 list_add(&page->lru, &new_czone->active_list);
202 old_czone->nr_inactive--;
203 new_czone->nr_inactive++;
204 list_add(&page->lru, &new_czone->inactive_list);
209 ckrm_mem_inc_active(struct page *page)
211 struct ckrm_mem_res *cls = ckrm_get_mem_class(current) ?: ckrm_mem_root_class;
215 BUG_ON(CkrmAccount(page));
216 BUG_ON(page->ckrm_zone != NULL);
218 ckrm_set_page_class(page, cls);
219 incr_use_count(cls, 0);
220 SetCkrmAccount(page);
221 BUG_ON(page->ckrm_zone == NULL);
222 page->ckrm_zone->nr_active++;
223 list_add(&page->lru, &page->ckrm_zone->active_list);
227 ckrm_mem_dec_active(struct page *page)
229 if (page->ckrm_zone == NULL)
231 BUG_ON(page->ckrm_zone->memcls == NULL);
232 BUG_ON(!CkrmAccount(page));
234 list_del(&page->lru);
235 page->ckrm_zone->nr_active--;
236 ckrm_clear_page_class(page);
241 ckrm_mem_inc_inactive(struct page *page)
243 struct ckrm_mem_res *cls = ckrm_get_mem_class(current) ?: ckrm_mem_root_class;
247 BUG_ON(CkrmAccount(page));
248 BUG_ON(page->ckrm_zone != NULL);
250 ckrm_set_page_class(page, cls);
251 incr_use_count(cls, 0);
252 SetCkrmAccount(page);
253 BUG_ON(page->ckrm_zone == NULL);
254 page->ckrm_zone->nr_inactive++;
255 list_add(&page->lru, &page->ckrm_zone->inactive_list);
259 ckrm_mem_dec_inactive(struct page *page)
261 if (page->ckrm_zone == NULL)
263 BUG_ON(page->ckrm_zone->memcls == NULL);
264 BUG_ON(!CkrmAccount(page));
266 page->ckrm_zone->nr_inactive--;
267 list_del(&page->lru);
268 ckrm_clear_page_class(page);
272 ckrm_class_limit_ok(struct ckrm_mem_res *cls)
275 extern int ckrm_mem_fail_over;
277 if ((mem_rcbs.resid == -1) || !cls) {
280 if (cls->pg_limit == CKRM_SHARE_DONTCARE) {
281 struct ckrm_mem_res *parcls = ckrm_get_res_class(cls->parent,
282 mem_rcbs.resid, struct ckrm_mem_res);
283 ret = (parcls ? ckrm_class_limit_ok(parcls) : 0);
285 ret = (atomic_read(&cls->pg_total) <=
286 ((ckrm_mem_fail_over * cls->pg_limit) / 100));
290 // if we are failing... just nudge the back end
296 // task/mm initializations/cleanup
299 ckrm_task_mm_init(struct task_struct *tsk)
301 INIT_LIST_HEAD(&tsk->mm_peers);
305 ckrm_task_change_mm(struct task_struct *tsk, struct mm_struct *oldmm, struct mm_struct *newmm)
308 spin_lock(&oldmm->peertask_lock);
309 list_del(&tsk->mm_peers);
310 ckrm_mem_evaluate_mm(oldmm, NULL);
311 spin_unlock(&oldmm->peertask_lock);
313 spin_lock(&newmm->peertask_lock);
314 list_add_tail(&tsk->mm_peers, &newmm->tasklist);
315 ckrm_mem_evaluate_mm(newmm, NULL);
316 spin_unlock(&newmm->peertask_lock);
320 ckrm_task_clear_mm(struct task_struct *tsk, struct mm_struct *mm)
322 spin_lock(&mm->peertask_lock);
323 list_del_init(&tsk->mm_peers);
324 ckrm_mem_evaluate_mm(mm, NULL);
325 spin_unlock(&mm->peertask_lock);
329 ckrm_mm_init(struct mm_struct *mm)
331 INIT_LIST_HEAD(&mm->tasklist);
332 mm->peertask_lock = SPIN_LOCK_UNLOCKED;
336 ckrm_mm_setclass(struct mm_struct *mm, struct ckrm_mem_res *cls)
339 kref_get(&cls->nr_users);
343 ckrm_mm_clearclass(struct mm_struct *mm)
346 kref_put(&mm->memclass->nr_users, memclass_release);
352 ckrm_zone_inc_active(struct ckrm_zone *czone, int cnt)
354 czone->nr_active += cnt;
358 ckrm_zone_inc_inactive(struct ckrm_zone *czone, int cnt)
360 czone->nr_inactive += cnt;
364 ckrm_zone_dec_active(struct ckrm_zone *czone, int cnt)
366 czone->nr_active -= cnt;
370 ckrm_zone_dec_inactive(struct ckrm_zone *czone, int cnt)
372 czone->nr_inactive -= cnt;
375 #else // !CONFIG_CKRM_RES_MEM
377 #define ckrm_set_page_class(a,b) do{}while(0)
378 #define ckrm_set_pages_class(a,b,c) do{}while(0)
379 #define ckrm_clear_page_class(a) do{}while(0)
380 #define ckrm_clear_pages_class(a,b) do{}while(0)
381 #define ckrm_change_page_class(a,b) do{}while(0)
382 #define ckrm_change_pages_class(a,b,c) do{}while(0)
383 #define ckrm_mem_inc_active(a) do{}while(0)
384 #define ckrm_mem_dec_active(a) do{}while(0)
385 #define ckrm_mem_inc_inactive(a) do{}while(0)
386 #define ckrm_mem_dec_inactive(a) do{}while(0)
387 #define ckrm_shrink_list_empty() (1)
388 #define ckrm_kick_page(a,b) (0)
389 #define ckrm_class_limit_ok(a) (1)
390 #define ckrm_task_mm_init(a) do{}while(0)
391 #define ckrm_task_clear_mm(a, b) do{}while(0)
392 #define ckrm_task_change_mm(a, b, c) do{}while(0)
393 #define ckrm_mm_init(a) do{}while(0)
394 #define ckrm_mm_setclass(a, b) do{}while(0)
395 #define ckrm_mm_clearclass(a) do{}while(0)
396 #define ckrm_zone_inc_active(a, b) do{}while(0)
397 #define ckrm_zone_inc_inactive(a, b) do{}while(0)
398 #define ckrm_zone_dec_active(a, b) do{}while(0)
399 #define ckrm_zone_dec_inactive(a, b) do{}while(0)
401 #endif // CONFIG_CKRM_RES_MEM
403 #endif // _LINUX_CKRM_MEM_INLINE_H_