static ckrm_mem_res_t *ckrm_mem_root_class;
atomic_t ckrm_mem_real_count = ATOMIC_INIT(0);
EXPORT_SYMBOL(ckrm_mem_real_count);
-static void ckrm_mem_evaluate_all_pages(void);
/* Initialize rescls values
* May be called on each rcfs unmount or as part of error recovery
res->pg_guar = CKRM_SHARE_DONTCARE;
res->pg_limit = CKRM_SHARE_DONTCARE;
- res->pg_unused = 0;
+ res->pg_unused = CKRM_SHARE_DONTCARE;
}
static void *
if (!res)
return;
- res->shares.my_guarantee = 0;
- res->shares.my_limit = 0;
- res->pg_guar = 0;
- res->pg_limit = 0;
- res->pg_unused = 0;
-
parres = ckrm_get_res_class(res->parent, mem_rcbs.resid, ckrm_mem_res_t);
+
// return child's limit/guarantee to parent node
if (parres) {
child_guarantee_changed(&parres->shares, res->shares.my_guarantee, 0);
child_maxlimit_changed_local(parres);
}
- ckrm_mem_evaluate_all_pages();
- res->core = NULL;
-
+ res->shares.my_guarantee = 0;
+ res->shares.my_limit = 0;
spin_lock(&ckrm_mem_lock);
list_del(&res->mcls_list);
spin_unlock(&ckrm_mem_lock);
mem_class_put(res);
+
return;
}
}
}
- spin_unlock(&mm->peertask_lock);
ckrm_mem_evaluate_mm(mm);
- /*
- printk("chg_cls: task <%s:%d> mm %p oldmm %s newmm %s o %s n %s\n",
- task->comm, task->pid, mm, prev_mmcls ? prev_mmcls->core->name:
- "NULL", mm->memclass ? mm->memclass->core->name : "NULL",
- o ? o->core->name: "NULL", n ? n->core->name: "NULL");
- */
+ spin_unlock(&mm->peertask_lock);
return;
}
guar = (res->pg_guar > 0) ? res->pg_guar : 0;
range = res->pg_limit - guar;
- if ((tot_usage > (guar + ((110 * range) / 100))) &&
+ if ((tot_usage > (guar + ((120 * range) / 100))) &&
(res->pg_lent > (guar + ((25 * range) / 100)))) {
set_flags_of_children(res, CLS_PARENT_OVER);
}
res->reclaim_flags |= CLS_OVER_100;
} else if (cls_usage > (guar + ((3 * range) / 4))) {
res->reclaim_flags |= CLS_OVER_75;
- } else if (cls_usage > (guar + (range / 2))) {
- res->reclaim_flags |= CLS_OVER_50;
- } else if (cls_usage > (guar + (range / 4))) {
- res->reclaim_flags |= CLS_OVER_25;
} else if (cls_usage > guar) {
res->reclaim_flags |= CLS_OVER_GUAR;
} else {
{
int i, j, mask = 0;
- if (*flags == 0) {
- *extract = 0;
+ if (*extract == 0 || *flags == 0) {
return;
}
-
if (*flags & CLS_SHRINK) {
*extract = CLS_SHRINK;
*flags = 0;
return;
}
+
i = fls(*flags);
for (j = i-1; j > 0; j--) {
}
void
-ckrm_at_limit(ckrm_mem_res_t *cls)
+ckrm_near_limit(ckrm_mem_res_t *cls)
{
-#ifndef AT_LIMIT_SUPPORT
-#warning "ckrm_at_limit disabled due to problems with memory hog tests"
-#else
struct zone *zone;
unsigned long now = jiffies;
- if (!cls || (cls->pg_limit == CKRM_SHARE_DONTCARE) ||
- ((cls->flags & MEM_AT_LIMIT) == MEM_AT_LIMIT)) {
+ if (!cls || ((cls->flags & MEM_NEAR_LIMIT) == MEM_NEAR_LIMIT)) {
return;
}
if ((cls->last_shrink + (10 * HZ)) < now) { // 10 seconds since last ?
spin_lock(&ckrm_mem_lock);
list_add(&cls->shrink_list, &ckrm_shrink_list);
spin_unlock(&ckrm_mem_lock);
- cls->flags |= MEM_AT_LIMIT;
+ cls->flags |= MEM_NEAR_LIMIT;
for_each_zone(zone) {
wakeup_kswapd(zone);
break; // only once is enough
}
-#endif // AT_LIMIT_SUPPORT
}
-static int unmapped = 0, changed = 0, unchanged = 0, maxnull = 0,
-anovma = 0, fnovma = 0;
-static void
+static int
ckrm_mem_evaluate_page_anon(struct page* page)
{
ckrm_mem_res_t* pgcls = page_class(page);
struct anon_vma *anon_vma = (struct anon_vma *) page->mapping;
struct vm_area_struct *vma;
struct mm_struct* mm;
- int v = 0;
spin_lock(&anon_vma->lock);
BUG_ON(list_empty(&anon_vma->head));
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
- v++;
mm = vma->vm_mm;
if (!maxshareclass ||
ckrm_mem_share_compare(maxshareclass, mm->memclass) < 0) {
}
}
spin_unlock(&anon_vma->lock);
- if (!v)
- anovma++;
- if (!maxshareclass)
- maxnull++;
if (maxshareclass && (pgcls != maxshareclass)) {
ckrm_change_page_class(page, maxshareclass);
- changed++;
- } else
- unchanged++;
- return;
+ return 1;
+ }
+ return 0;
}
-static void
+static int
ckrm_mem_evaluate_page_file(struct page* page)
{
ckrm_mem_res_t* pgcls = page_class(page);
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
struct prio_tree_iter iter;
struct mm_struct* mm;
- int v = 0;
if (!mapping)
- return;
+ return 0;
if (!spin_trylock(&mapping->i_mmap_lock))
- return;
+ return 0;
while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap,
&iter, pgoff, pgoff)) != NULL) {
- v++;
mm = vma->vm_mm;
if (!maxshareclass || ckrm_mem_share_compare(maxshareclass,mm->memclass)<0)
maxshareclass = mm->memclass;
}
spin_unlock(&mapping->i_mmap_lock);
- if (!v)
- fnovma++;
- if (!maxshareclass)
- maxnull++;
-
if (maxshareclass && pgcls != maxshareclass) {
ckrm_change_page_class(page, maxshareclass);
- changed++;
- } else
- unchanged++;
- return;
+ return 1;
+ }
+ return 0;
}
-static void
+static int
ckrm_mem_evaluate_page(struct page* page)
{
+ int changed = 0;
+
if (page->mapping) {
if (PageAnon(page))
- ckrm_mem_evaluate_page_anon(page);
+ changed = ckrm_mem_evaluate_page_anon(page);
else
- ckrm_mem_evaluate_page_file(page);
- } else
- unmapped++;
- return;
-}
-
-static void
-ckrm_mem_evaluate_all_pages()
-{
- struct page *page;
- struct zone *zone;
- int active = 0, inactive = 0, cleared = 0;
- int act_cnt, inact_cnt, idx;
- ckrm_mem_res_t *res;
-
- spin_lock(&ckrm_mem_lock);
- list_for_each_entry(res, &ckrm_memclass_list, mcls_list) {
- res->tmp_cnt = 0;
+ changed = ckrm_mem_evaluate_page_file(page);
}
- spin_unlock(&ckrm_mem_lock);
-
- for_each_zone(zone) {
- spin_lock_irq(&zone->lru_lock);
- list_for_each_entry(page, &zone->inactive_list, lru) {
- ckrm_mem_evaluate_page(page);
- active++;
- page_class(page)->tmp_cnt++;
- if (!test_bit(PG_ckrm_account, &page->flags))
- cleared++;
- }
- list_for_each_entry(page, &zone->active_list, lru) {
- ckrm_mem_evaluate_page(page);
- inactive++;
- page_class(page)->tmp_cnt++;
- if (!test_bit(PG_ckrm_account, &page->flags))
- cleared++;
- }
- spin_unlock_irq(&zone->lru_lock);
- }
- printk(KERN_DEBUG "all_pages: active %d inactive %d cleared %d\n",
- active, inactive, cleared);
- spin_lock(&ckrm_mem_lock);
- list_for_each_entry(res, &ckrm_memclass_list, mcls_list) {
- act_cnt = 0; inact_cnt = 0; idx = 0;
- for_each_zone(zone) {
- act_cnt += res->nr_active[idx];
- inact_cnt += res->nr_inactive[idx];
- idx++;
- }
- printk(KERN_DEBUG "all_pages: %s: tmp_cnt %d; act_cnt %d inact_cnt %d\n",
- res->core->name, res->tmp_cnt, act_cnt, inact_cnt);
- }
- spin_unlock(&ckrm_mem_lock);
-
- // check all mm's in the system to see which memclass they are attached
- // to.
- return;
+ return changed;
}
-static /*inline*/ int
+static inline int
class_migrate_pmd(struct mm_struct* mm, struct vm_area_struct* vma,
pmd_t* pmdir, unsigned long address, unsigned long end)
{
- pte_t *pte, *orig_pte;
+ pte_t* pte;
unsigned long pmd_end;
if (pmd_none(*pmdir))
return 0;
BUG_ON(pmd_bad(*pmdir));
- orig_pte = pte = pte_offset_map(pmdir,address);
+ pte = pte_offset_map(pmdir,address);
pmd_end = (address+PMD_SIZE)&PMD_MASK;
if (end>pmd_end)
end = pmd_end;
do {
if (pte_present(*pte)) {
- BUG_ON(mm->memclass == NULL);
- ckrm_change_page_class(pte_page(*pte), mm->memclass);
- // ckrm_mem_evaluate_page(pte_page(*pte));
+ ckrm_mem_evaluate_page(pte_page(*pte));
}
address += PAGE_SIZE;
pte++;
} while(address && (address<end));
- pte_unmap(orig_pte);
return 0;
}
-static /*inline*/ int
+static inline int
class_migrate_pgd(struct mm_struct* mm, struct vm_area_struct* vma,
pgd_t* pgdir, unsigned long address, unsigned long end)
{
return 0;
}
-static /*inline*/ int
+static inline int
class_migrate_vma(struct mm_struct* mm, struct vm_area_struct* vma)
{
pgd_t* pgdir;
maxshareclass = cls;
}
- if (maxshareclass && (mm->memclass != (void *)maxshareclass)) {
+ if (mm->memclass != (void *)maxshareclass) {
+ mem_class_get(maxshareclass);
if (mm->memclass)
mem_class_put(mm->memclass);
mm->memclass = maxshareclass;
- mem_class_get(maxshareclass);
/* Go through all VMA to migrate pages */
down_read(&mm->mmap_sem);
return;
}
+void
+ckrm_mem_evaluate_page_byadd(struct page* page, struct mm_struct* mm)
+{
+ ckrm_mem_res_t *pgcls = page_class(page);
+ ckrm_mem_res_t *chgcls = mm->memclass ? mm->memclass : GET_MEM_CLASS(current);
+
+ if (!chgcls || pgcls == chgcls)
+ return;
+
+ if (!page->mapcount) {
+ ckrm_change_page_class(page, chgcls);
+ return;
+ }
+ if (ckrm_mem_share_compare(pgcls, chgcls) < 0) {
+ ckrm_change_page_class(page, chgcls);
+ return;
+ }
+ return;
+}
+
void
ckrm_init_mm_to_task(struct mm_struct * mm, struct task_struct *task)
{
list_del_init(&task->mm_peers);
}
list_add_tail(&task->mm_peers, &mm->tasklist);
- spin_unlock(&mm->peertask_lock);
if (mm->memclass != GET_MEM_CLASS(task))
ckrm_mem_evaluate_mm(mm);
+ spin_unlock(&mm->peertask_lock);
return;
}
-int
-ckrm_memclass_valid(ckrm_mem_res_t *cls)
-{
- ckrm_mem_res_t *tmp;
-
- spin_lock(&ckrm_mem_lock);
- list_for_each_entry(tmp, &ckrm_memclass_list, mcls_list) {
- if (tmp == cls) {
- spin_unlock(&ckrm_mem_lock);
- return 1;
- }
- }
- spin_unlock(&ckrm_mem_lock);
- return 0;
-}
-
MODULE_LICENSE("GPL");