Revert to pre E17 ckrm memory controller back port code, which apparently
[linux-2.6.git] / include / linux / ckrm_mem_inline.h
1 /* include/linux/ckrm_mem_inline.h : memory control for CKRM
2  *
3  * Copyright (C) Jiantao Kong, IBM Corp. 2003
4  *           (C) Shailabh Nagar, IBM Corp. 2003
5  *           (C) Chandra Seetharaman, IBM Corp. 2004
6  *
7  *
8  * Memory control functions of the CKRM kernel API
9  *
10  * Latest version, more details at http://ckrm.sf.net
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  */
18
19 #ifndef _LINUX_CKRM_MEM_INLINE_H_
20 #define _LINUX_CKRM_MEM_INLINE_H_
21
22 #include <linux/rmap.h>
23 #include <linux/mmzone.h>
24 #include <linux/ckrm_mem.h>
25
26
27 #ifdef CONFIG_CKRM_RES_MEM
28
29 #define INACTIVE        0
30 #define ACTIVE          1
31
32 static inline struct ckrm_mem_res *
33 ckrm_get_mem_class(struct task_struct *tsk)
34 {
35         return ckrm_get_res_class(tsk->taskclass, mem_rcbs.resid,
36                 struct ckrm_mem_res);
37 }
38
39 #define ckrm_shrink_list_empty()        list_empty(&ckrm_shrink_list)
40
41 static inline void
42 ckrm_set_shrink(struct ckrm_zone *cz)
43 {
44         set_bit(CLS_SHRINK_BIT, &cz->shrink_flag);
45 }
46
47 static inline int
48 ckrm_test_set_shrink(struct ckrm_zone *cz)
49 {
50         return test_and_set_bit(CLS_SHRINK_BIT, &cz->shrink_flag);
51 }
52
53 static inline void 
54 ckrm_clear_shrink(struct ckrm_zone *cz)
55 {
56         clear_bit(CLS_SHRINK_BIT, &cz->shrink_flag);
57 }
58
59 /*
60  * Currently, a shared page that is shared by multiple classes is charged
61  * to a class with max available guarantee. Simply replace this function
62  * for other policies.
63  */
64 static inline int
65 ckrm_mem_share_compare(struct ckrm_mem_res *a, struct ckrm_mem_res *b)
66 {
67         if (a == NULL)
68                 return -(b != NULL);
69         if (b == NULL)
70                 return 0;
71         if (a->pg_guar == b->pg_guar)
72                 return 0;
73         if (a->pg_guar == CKRM_SHARE_DONTCARE)
74                 return 1;
75         if (b->pg_guar == CKRM_SHARE_DONTCARE)
76                 return -1;
77         return (a->pg_unused - b->pg_unused);
78 }
79
80 static inline void
81 incr_use_count(struct ckrm_mem_res *cls, int borrow)
82 {
83         extern int ckrm_mem_shrink_at;
84         if (unlikely(!cls))
85                 return;
86         BUG_ON(!ckrm_memclass_valid(cls));
87         atomic_inc(&cls->pg_total);
88
89         if (borrow)
90                 cls->pg_lent++;
91         if ((cls->pg_guar == CKRM_SHARE_DONTCARE) ||
92                         (atomic_read(&cls->pg_total) > cls->pg_unused)) {
93                 struct ckrm_mem_res *parcls = ckrm_get_res_class(cls->parent,
94                                 mem_rcbs.resid, struct ckrm_mem_res);
95                 if (parcls) {
96                         incr_use_count(parcls, 1);
97                         cls->pg_borrowed++;
98                 }
99         } else {
100                 atomic_inc(&ckrm_mem_real_count);
101         }
102         if (unlikely((cls->pg_limit != CKRM_SHARE_DONTCARE) &&
103                         (atomic_read(&cls->pg_total) >=
104                         ((ckrm_mem_shrink_at * cls->pg_limit) / 100)) &&
105                         ((cls->flags & MEM_AT_LIMIT) != MEM_AT_LIMIT))) {
106                 ckrm_at_limit(cls);
107         }
108         return;
109 }
110
111 static inline void
112 decr_use_count(struct ckrm_mem_res *cls, int borrowed)
113 {
114         if (unlikely(!cls))
115                 return;
116         BUG_ON(!ckrm_memclass_valid(cls));
117         atomic_dec(&cls->pg_total);
118         if (borrowed)
119                 cls->pg_lent--;
120         if (cls->pg_borrowed > 0) {
121                 struct ckrm_mem_res *parcls = ckrm_get_res_class(cls->parent,
122                                 mem_rcbs.resid, struct ckrm_mem_res);
123                 if (parcls) {
124                         decr_use_count(parcls, 1);
125                         cls->pg_borrowed--;
126                         return;
127                 }
128         }
129         atomic_dec(&ckrm_mem_real_count);
130 }
131
132 static inline void
133 ckrm_set_page_class(struct page *page, struct ckrm_mem_res *cls)
134 {
135         if (unlikely(cls == NULL)) {
136                 cls = ckrm_mem_root_class;
137         }
138         if (likely(cls != NULL)) {
139                 struct ckrm_zone *czone = &cls->ckrm_zone[page_zonenum(page)];
140                 if (unlikely(page->ckrm_zone)) {
141                         kref_put(&cls->nr_users, memclass_release);
142                 }
143                 page->ckrm_zone = czone;
144                 kref_get(&cls->nr_users);
145         } else {
146                 page->ckrm_zone = NULL;
147         }
148 }
149
150 static inline void
151 ckrm_set_pages_class(struct page *pages, int numpages, struct ckrm_mem_res *cls)
152 {
153         int i;
154         for (i = 0; i < numpages; pages++, i++) {
155                 ckrm_set_page_class(pages, cls);
156         }
157 }
158
159 static inline void
160 ckrm_clear_page_class(struct page *page)
161 {
162         if (likely(page->ckrm_zone != NULL)) {
163                 if (CkrmAccount(page)) {
164                         decr_use_count(page->ckrm_zone->memcls, 0);
165                         ClearCkrmAccount(page);
166                 }
167                 kref_put(&page->ckrm_zone->memcls->nr_users, memclass_release);
168                 page->ckrm_zone = NULL;
169         }
170 }
171
172 static inline void
173 ckrm_change_page_class(struct page *page, struct ckrm_mem_res *newcls)
174 {
175         struct ckrm_zone *old_czone = page->ckrm_zone, *new_czone;
176         struct ckrm_mem_res *oldcls;
177
178         if (unlikely(!old_czone || !newcls)) {
179                 BUG_ON(CkrmAccount(page));
180                 return;
181         }
182         BUG_ON(!CkrmAccount(page));
183
184         oldcls = old_czone->memcls;
185         if (oldcls == NULL || (oldcls == newcls))
186                 return;
187
188         kref_put(&oldcls->nr_users, memclass_release);
189         decr_use_count(oldcls, 0);
190
191         page->ckrm_zone = new_czone = &newcls->ckrm_zone[page_zonenum(page)];
192
193         kref_get(&newcls->nr_users);
194         incr_use_count(newcls, 0);
195
196         list_del(&page->lru);
197         if (PageActive(page)) {
198                 old_czone->nr_active--;
199                 new_czone->nr_active++;
200                 list_add(&page->lru, &new_czone->active_list);
201         } else {
202                 old_czone->nr_inactive--;
203                 new_czone->nr_inactive++;
204                 list_add(&page->lru, &new_czone->inactive_list);
205         }
206 }
207
208 static inline void
209 ckrm_mem_inc_active(struct page *page)
210 {
211         struct ckrm_mem_res *cls = ckrm_get_mem_class(current) ?: ckrm_mem_root_class;
212
213         if (cls == NULL)
214                 return;
215         BUG_ON(CkrmAccount(page));
216         BUG_ON(page->ckrm_zone != NULL);
217
218         ckrm_set_page_class(page, cls);
219         incr_use_count(cls, 0);
220         SetCkrmAccount(page);
221         BUG_ON(page->ckrm_zone == NULL);
222         page->ckrm_zone->nr_active++;
223         list_add(&page->lru, &page->ckrm_zone->active_list);
224 }
225
226 static inline void
227 ckrm_mem_dec_active(struct page *page)
228 {
229         if (page->ckrm_zone == NULL)
230                 return;
231         BUG_ON(page->ckrm_zone->memcls == NULL);
232         BUG_ON(!CkrmAccount(page));
233
234         list_del(&page->lru);
235         page->ckrm_zone->nr_active--;
236         ckrm_clear_page_class(page);
237 }
238
239
240 static inline void
241 ckrm_mem_inc_inactive(struct page *page)
242 {
243         struct ckrm_mem_res *cls = ckrm_get_mem_class(current) ?: ckrm_mem_root_class;
244
245         if (cls == NULL)
246                 return;
247         BUG_ON(CkrmAccount(page));
248         BUG_ON(page->ckrm_zone != NULL);
249
250         ckrm_set_page_class(page, cls);
251         incr_use_count(cls, 0);
252         SetCkrmAccount(page);
253         BUG_ON(page->ckrm_zone == NULL);
254         page->ckrm_zone->nr_inactive++;
255         list_add(&page->lru, &page->ckrm_zone->inactive_list);
256 }
257
258 static inline void
259 ckrm_mem_dec_inactive(struct page *page)
260 {
261         if (page->ckrm_zone == NULL)
262                 return;
263         BUG_ON(page->ckrm_zone->memcls == NULL);
264         BUG_ON(!CkrmAccount(page));
265
266         page->ckrm_zone->nr_inactive--;
267         list_del(&page->lru);
268         ckrm_clear_page_class(page);
269 }
270
271 static inline int
272 ckrm_class_limit_ok(struct ckrm_mem_res *cls)
273 {
274         int ret;
275         extern int ckrm_mem_fail_over;
276
277         if ((mem_rcbs.resid == -1) || !cls) {
278                 return 1;
279         }
280         if (cls->pg_limit == CKRM_SHARE_DONTCARE) {
281                 struct ckrm_mem_res *parcls = ckrm_get_res_class(cls->parent,
282                                         mem_rcbs.resid, struct ckrm_mem_res);
283                 ret = (parcls ? ckrm_class_limit_ok(parcls) : 0);
284         } else {
285                 ret = (atomic_read(&cls->pg_total) <=
286                         ((ckrm_mem_fail_over * cls->pg_limit) / 100));
287         }
288
289         if (ret == 0) {
290                 // if we are failing... just nudge the back end
291                 ckrm_at_limit(cls);
292         }
293         return ret;
294 }
295
296 // task/mm initializations/cleanup
297
298 static inline void
299 ckrm_task_mm_init(struct task_struct *tsk)
300 {
301         INIT_LIST_HEAD(&tsk->mm_peers);
302 }
303
304 static inline void
305 ckrm_task_change_mm(struct task_struct *tsk, struct mm_struct *oldmm, struct mm_struct *newmm)
306 {
307         if (oldmm) {
308                 spin_lock(&oldmm->peertask_lock);
309                 list_del(&tsk->mm_peers);
310                 ckrm_mem_evaluate_mm(oldmm, NULL);
311                 spin_unlock(&oldmm->peertask_lock);
312         }
313         spin_lock(&newmm->peertask_lock);
314         list_add_tail(&tsk->mm_peers, &newmm->tasklist);
315         ckrm_mem_evaluate_mm(newmm, NULL);
316         spin_unlock(&newmm->peertask_lock);
317 }
318
319 static inline void
320 ckrm_task_clear_mm(struct task_struct *tsk, struct mm_struct *mm)
321 {
322         spin_lock(&mm->peertask_lock);
323         list_del_init(&tsk->mm_peers);
324         ckrm_mem_evaluate_mm(mm, NULL);
325         spin_unlock(&mm->peertask_lock);
326 }
327
328 static inline void
329 ckrm_mm_init(struct mm_struct *mm)
330 {
331         INIT_LIST_HEAD(&mm->tasklist);
332         mm->peertask_lock = SPIN_LOCK_UNLOCKED;
333 }
334
335 static inline void
336 ckrm_mm_setclass(struct mm_struct *mm, struct ckrm_mem_res *cls)
337 {
338         mm->memclass = cls;
339         kref_get(&cls->nr_users);
340 }
341
342 static inline void
343 ckrm_mm_clearclass(struct mm_struct *mm)
344 {
345         if (mm->memclass) {
346                 kref_put(&mm->memclass->nr_users, memclass_release);
347                 mm->memclass = NULL;
348         }
349 }
350
351 static inline void
352 ckrm_zone_inc_active(struct ckrm_zone *czone, int cnt)
353 {
354         czone->nr_active += cnt;
355 }
356
357 static inline void
358 ckrm_zone_inc_inactive(struct ckrm_zone *czone, int cnt)
359 {
360         czone->nr_inactive += cnt;
361 }
362
363 static inline void
364 ckrm_zone_dec_active(struct ckrm_zone *czone, int cnt)
365 {
366         czone->nr_active -= cnt;
367 }
368
369 static inline void
370 ckrm_zone_dec_inactive(struct ckrm_zone *czone, int cnt)
371 {
372         czone->nr_inactive -= cnt;
373 }
374
375 #else // !CONFIG_CKRM_RES_MEM
376
377 #define ckrm_set_page_class(a,b)        do{}while(0)
378 #define ckrm_set_pages_class(a,b,c)     do{}while(0)
379 #define ckrm_clear_page_class(a)        do{}while(0)
380 #define ckrm_clear_pages_class(a,b)     do{}while(0)
381 #define ckrm_change_page_class(a,b)     do{}while(0)
382 #define ckrm_change_pages_class(a,b,c)  do{}while(0)
383 #define ckrm_mem_inc_active(a)          do{}while(0)
384 #define ckrm_mem_dec_active(a)          do{}while(0)
385 #define ckrm_mem_inc_inactive(a)        do{}while(0)
386 #define ckrm_mem_dec_inactive(a)        do{}while(0)
387 #define ckrm_shrink_list_empty()        (1)
388 #define ckrm_kick_page(a,b)             (0)
389 #define ckrm_class_limit_ok(a)          (1)
390 #define ckrm_task_mm_init(a)            do{}while(0)
391 #define ckrm_task_clear_mm(a, b)        do{}while(0)
392 #define ckrm_task_change_mm(a, b, c)    do{}while(0)
393 #define ckrm_mm_init(a)                 do{}while(0)
394 #define ckrm_mm_setclass(a, b)          do{}while(0)
395 #define ckrm_mm_clearclass(a)           do{}while(0)
396 #define ckrm_zone_inc_active(a, b)      do{}while(0)
397 #define ckrm_zone_inc_inactive(a, b)    do{}while(0)
398 #define ckrm_zone_dec_active(a, b)      do{}while(0)
399 #define ckrm_zone_dec_inactive(a, b)    do{}while(0)
400
401 #endif // CONFIG_CKRM_RES_MEM
402
403 #endif // _LINUX_CKRM_MEM_INLINE_H_