X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fppc64%2Fmm%2Fhugetlbpage.c;h=7e1a9bf286be96c9bff85b6b0a09a3db1b9cddd9;hb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;hp=9416e53dfc23989cc15882113ce898b2934b6d38;hpb=9213980e6a70d8473e0ffd4b39ab5b6caaba9ff5;p=linux-2.6.git diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c index 9416e53df..7e1a9bf28 100644 --- a/arch/ppc64/mm/hugetlbpage.c +++ b/arch/ppc64/mm/hugetlbpage.c @@ -377,6 +377,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long addr; hugepte_t *ptep; struct page *page; + int cpu; int local = 0; cpumask_t tmp; @@ -385,7 +386,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, BUG_ON((end % HPAGE_SIZE) != 0); /* XXX are there races with checking cpu_vm_mask? - Anton */ - tmp = cpumask_of_cpu(smp_processor_id()); + cpu = get_cpu(); + tmp = cpumask_of_cpu(cpu); if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp)) local = 1; @@ -408,6 +410,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, put_page(page); } + put_cpu(); // mm->rss -= (end - start) >> PAGE_SHIFT; vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT); @@ -527,6 +530,108 @@ full_search: return -ENOMEM; } +/* + * This mmap-allocator allocates new areas top-down from below the + * stack's low limit (the base): + * + * Because we have an exclusive hugepage region which lies within the + * normal user address space, we have to take special measures to make + * non-huge mmap()s evade the hugepage reserved regions. + */ +unsigned long +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + const unsigned long len, const unsigned long pgoff, + const unsigned long flags) +{ + struct vm_area_struct *vma, *prev_vma; + struct mm_struct *mm = current->mm; + unsigned long base = mm->mmap_base, addr = addr0; + int first_time = 1; + + /* requested length too big for entire address space */ + if (len > TASK_SIZE) + return -ENOMEM; + + /* dont allow allocations above current base */ + if (mm->free_area_cache > base) + mm->free_area_cache = base; + + /* requesting a specific address */ + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start) + && !is_hugepage_only_range(addr,len)) + return addr; + } + +try_again: + /* make sure it can fit in the remaining address space */ + if (mm->free_area_cache < len) + goto fail; + + /* either no address requested or cant fit in requested address hole */ + addr = (mm->free_area_cache - len) & PAGE_MASK; + do { +hugepage_recheck: + if (touches_hugepage_low_range(addr, len)) { + addr = (addr & ((~0) << SID_SHIFT)) - len; + goto hugepage_recheck; + } else if (touches_hugepage_high_range(addr, len)) { + addr = TASK_HPAGE_BASE - len; + } + + /* + * Lookup failure means no vma is above this address, + * i.e. return with success: + */ + if (!(vma = find_vma_prev(mm, addr, &prev_vma))) + return addr; + + /* + * new region fits between prev_vma->vm_end and + * vma->vm_start, use it: + */ + if (addr+len <= vma->vm_start && + (!prev_vma || (addr >= prev_vma->vm_end))) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + else + /* pull free_area_cache down to the first hole */ + if (mm->free_area_cache == vma->vm_end) + mm->free_area_cache = vma->vm_start; + + /* try just below the current vma->vm_start */ + addr = vma->vm_start-len; + } while (len <= vma->vm_start); + +fail: + /* + * if hint left us with no space for the requested + * mapping then try again: + */ + if (first_time) { + mm->free_area_cache = base; + first_time = 0; + goto try_again; + } + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + mm->free_area_cache = TASK_UNMAPPED_BASE; + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); + /* + * Restore the topdown base: + */ + mm->free_area_cache = base; + + return addr; +} + static unsigned long htlb_get_low_area(unsigned long len, u16 segmask) { unsigned long addr = 0; @@ -751,7 +856,7 @@ static void flush_hash_hugepage(mm_context_t context, unsigned long ea, vsid = get_vsid(context.id, ea); va = (vsid << 28) | (ea & 0x0fffffff); - vpn = va >> LARGE_PAGE_SHIFT; + vpn = va >> HPAGE_SHIFT; hash = hpt_hash(vpn, 1); if (hugepte_val(pte) & _HUGEPAGE_SECONDARY) hash = ~hash;