1 /* include/linux/ckrm_mem_inline.h : memory control for CKRM
3 * Copyright (C) Jiantao Kong, IBM Corp. 2003
4 * (C) Shailabh Nagar, IBM Corp. 2003
5 * (C) Chandra Seetharaman, IBM Corp. 2004
8 * Memory control functions of the CKRM kernel API
10 * Latest version, more details at http://ckrm.sf.net
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
26 #ifndef _LINUX_CKRM_MEM_INLINE_H_
27 #define _LINUX_CKRM_MEM_INLINE_H_
29 #include <linux/rmap.h>
30 #include <linux/mmzone.h>
31 #include <linux/ckrm_mem.h>
34 #ifdef CONFIG_CKRM_RES_MEM
36 #define GET_MEM_CLASS(tsk) \
37 ckrm_get_res_class(tsk->taskclass, mem_rcbs.resid, ckrm_mem_res_t)
39 #define ckrm_set_shrink(cls) \
40 set_bit(CLS_SHRINK_BIT, (unsigned long *)&(cls)->reclaim_flags)
41 #define ckrm_test_set_shrink(cls) \
42 test_and_set_bit(CLS_SHRINK_BIT, (unsigned long *)&(cls)->reclaim_flags)
43 #define ckrm_clear_shrink(cls) \
44 clear_bit(CLS_SHRINK_BIT, (unsigned long *)&(cls)->reclaim_flags)
46 #define ckrm_shrink_list_empty() list_empty(&ckrm_shrink_list)
49 * Currently, the class of an address is assigned to the class with max
50 * available guarantee. Simply replace this function for other policies.
53 ckrm_mem_share_compare(ckrm_mem_res_t *a, ckrm_mem_res_t *b)
59 return (a->pg_unused - b->pg_unused);
63 mem_class_get(ckrm_mem_res_t *cls)
66 atomic_inc(&((cls)->nr_users));
70 mem_class_put(ckrm_mem_res_t *cls)
72 if (cls && atomic_dec_and_test(&(cls->nr_users)) ) {
73 printk("freeing memclass %p of <core:%s>\n", cls, cls->core->name);
79 incr_use_count(ckrm_mem_res_t *cls, int borrow)
83 atomic_inc(&cls->pg_total);
84 over_limit = (atomic_read(&cls->pg_total) > ((9 * cls->pg_limit) / 10));
88 if ((cls->pg_guar != CKRM_SHARE_DONTCARE) &&
89 (atomic_read(&cls->pg_total) > cls->pg_unused)) {
90 ckrm_mem_res_t *parcls = ckrm_get_res_class(cls->parent,
91 mem_rcbs.resid, ckrm_mem_res_t);
93 over_limit |= incr_use_count(parcls, 1);
98 atomic_inc(&ckrm_mem_real_count);
103 decr_use_count(ckrm_mem_res_t *cls, int borrowed)
105 atomic_dec(&cls->pg_total);
108 if (cls->pg_borrowed > 0) {
109 ckrm_mem_res_t *parcls = ckrm_get_res_class(cls->parent,
110 mem_rcbs.resid, ckrm_mem_res_t);
112 decr_use_count(parcls, 1);
117 atomic_dec(&ckrm_mem_real_count);
121 ckrm_set_page_class(struct page *page, ckrm_mem_res_t *cls)
123 if (mem_rcbs.resid != -1 && cls != NULL) {
124 if (unlikely(page->memclass)) {
125 mem_class_put(page->memclass);
127 page->memclass = cls;
130 page->memclass = NULL;
135 ckrm_set_pages_class(struct page *pages, int numpages, ckrm_mem_res_t *cls)
138 for (i = 0; i < numpages; pages++, i++) {
139 ckrm_set_page_class(pages, cls);
144 ckrm_clear_page_class(struct page *page)
146 if (page->memclass != NULL) {
147 mem_class_put(page->memclass);
148 page->memclass = NULL;
153 ckrm_clear_pages_class(struct page *pages, int numpages)
156 for (i = 0; i < numpages; pages++, i++) {
157 ckrm_clear_page_class(pages);
162 ckrm_change_page_class(struct page *page, ckrm_mem_res_t *cls)
164 ckrm_clear_page_class(page);
165 ckrm_set_page_class(page, cls);
169 ckrm_change_pages_class(struct page *pages, int numpages,
173 for (i = 0; i < numpages; pages++, i++) {
174 ckrm_change_page_class(pages, cls);
179 ckrm_mem_inc_active(struct page *page)
181 ckrm_mem_res_t *cls = page_class(page);
183 cls->nr_active[page_zonenum(page)]++;
184 if (incr_use_count(cls, 0)) {
185 ckrm_near_limit(cls);
190 ckrm_mem_dec_active(struct page *page)
192 ckrm_mem_res_t *cls = page_class(page);
194 cls->nr_active[page_zonenum(page)]--;
195 decr_use_count(cls, 0);
199 ckrm_mem_inc_inactive(struct page *page)
201 ckrm_mem_res_t *cls = page_class(page);
203 cls->nr_inactive[page_zonenum(page)]++;
204 if (incr_use_count(cls, 0) &&
205 ((cls->flags & MEM_NEAR_LIMIT) != MEM_NEAR_LIMIT)) {
206 ckrm_near_limit(cls);
211 ckrm_mem_dec_inactive(struct page *page)
213 ckrm_mem_res_t *cls = page_class(page);
215 cls->nr_inactive[page_zonenum(page)]--;
216 decr_use_count(cls, 0);
220 ckrm_kick_page(struct page *page, unsigned int bits)
222 if (page_class(page) == NULL) {
225 return (page_class(page)->reclaim_flags & bits);
230 ckrm_class_limit_ok(ckrm_mem_res_t *cls)
232 if ((mem_rcbs.resid == -1) || !cls) {
235 return (atomic_read(&cls->pg_total) <= (11 * cls->pg_limit) / 10);
238 #else // !CONFIG_CKRM_RES_MEM
240 #define ckrm_set_page_class(a,b) do{}while(0)
241 #define ckrm_set_pages_class(a,b,c) do{}while(0)
242 #define ckrm_clear_page_class(a) do{}while(0)
243 #define ckrm_clear_pages_class(a,b) do{}while(0)
244 #define ckrm_change_page_class(a,b) do{}while(0)
245 #define ckrm_change_pages_class(a,b,c) do{}while(0)
246 #define ckrm_mem_inc_active(a) do{}while(0)
247 #define ckrm_mem_dec_active(a) do{}while(0)
248 #define ckrm_mem_inc_inactive(a) do{}while(0)
249 #define ckrm_mem_dec_inactive(a) do{}while(0)
250 #define ckrm_shrink_list_empty() (1)
251 #define ckrm_kick_page(a,b) (0)
252 #define ckrm_class_limit_ok(a) (1)
254 #endif // CONFIG_CKRM_RES_MEM
256 #endif // _LINUX_CKRM_MEM_INLINE_H_