X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Flinux%2Frmap.h;h=bdd277223af01e9b013b69b5f604bc877c987186;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=11b484e37ac94fcd2308a354d372f00faff8a736;hpb=6a77f38946aaee1cd85eeec6cf4229b204c15071;p=linux-2.6.git diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 11b484e37..bdd277223 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -4,7 +4,6 @@ * Declarations for Reverse Mapping functions in mm/rmap.c */ -#include #include #include #include @@ -31,11 +30,11 @@ struct anon_vma { #ifdef CONFIG_MMU -extern kmem_cache_t *anon_vma_cachep; +extern struct kmem_cache *anon_vma_cachep; static inline struct anon_vma *anon_vma_alloc(void) { - return kmem_cache_alloc(anon_vma_cachep, SLAB_KERNEL); + return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); } static inline void anon_vma_free(struct anon_vma *anon_vma) @@ -71,8 +70,9 @@ void __anon_vma_link(struct vm_area_struct *); * rmap interfaces called when adding or removing pte of page */ void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); +void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); void page_add_file_rmap(struct page *); -void page_remove_rmap(struct page *); +void page_remove_rmap(struct page *, struct vm_area_struct *); /** * page_dup_rmap - duplicate pte mapping to a page @@ -89,22 +89,42 @@ static inline void page_dup_rmap(struct page *page) /* * Called from mm/vmscan.c to handle paging out */ -int page_referenced(struct page *, int is_locked, int ignore_token); -int try_to_unmap(struct page *); +int page_referenced(struct page *, int is_locked); +int try_to_unmap(struct page *, int ignore_refs); + +/* + * Called from mm/filemap_xip.c to unmap empty zero page + */ +pte_t *page_check_address(struct page *, struct mm_struct *, + unsigned long, spinlock_t **); /* * Used by swapoff to help locate where page is expected in vma. */ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); +/* + * Cleans the PTEs of shared mappings. + * (and since clean PTEs should also be readonly, write protects them too) + * + * returns the number of cleaned PTEs. + */ +int page_mkclean(struct page *); + #else /* !CONFIG_MMU */ #define anon_vma_init() do {} while (0) #define anon_vma_prepare(vma) (0) #define anon_vma_link(vma) do {} while (0) -#define page_referenced(page,l,i) TestClearPageReferenced(page) -#define try_to_unmap(page) SWAP_FAIL +#define page_referenced(page,l) TestClearPageReferenced(page) +#define try_to_unmap(page, refs) SWAP_FAIL + +static inline int page_mkclean(struct page *page) +{ + return 0; +} + #endif /* CONFIG_MMU */