Fedora kernel-2.6.17-1.2142_FC4 patched with stable patch-2.6.17.4-vs2.0.2-rc26.diff
[linux-2.6.git] / include / linux / hugetlb.h
index 6af1ae4..4c5e610 100644 (file)
@@ -4,6 +4,7 @@
 #ifdef CONFIG_HUGETLB_PAGE
 
 #include <linux/mempolicy.h>
+#include <asm/tlbflush.h>
 
 struct ctl_table;
 
@@ -15,39 +16,75 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
 int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
 int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
-void zap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
 void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
 int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
 int hugetlb_report_meminfo(char *);
 int hugetlb_report_node_meminfo(int, char *);
-int is_hugepage_mem_enough(size_t);
 unsigned long hugetlb_total_pages(void);
+int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+                       unsigned long address, int write_access);
+
+extern unsigned long max_huge_pages;
+extern const unsigned long hugetlb_zero, hugetlb_infinity;
+extern int sysctl_hugetlb_shm_group;
+
+/* arch callbacks */
+
+pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr);
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
                              int write);
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
                                pmd_t *pmd, int write);
-int is_aligned_hugepage_range(unsigned long addr, unsigned long len);
 int pmd_huge(pmd_t pmd);
-struct page *alloc_huge_page(void);
-void free_huge_page(struct page *);
-
-extern unsigned long max_huge_pages;
-extern const unsigned long hugetlb_zero, hugetlb_infinity;
-extern int sysctl_hugetlb_shm_group;
+void hugetlb_change_protection(struct vm_area_struct *vma,
+               unsigned long address, unsigned long end, pgprot_t newprot);
 
 #ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
 #define is_hugepage_only_range(mm, addr, len)  0
-#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
-                                               do { } while (0)
+#endif
+
+#ifndef ARCH_HAS_HUGETLB_FREE_PGD_RANGE
+#define hugetlb_free_pgd_range free_pgd_range
+#else
+void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
+                           unsigned long end, unsigned long floor,
+                           unsigned long ceiling);
 #endif
 
 #ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
-#define prepare_hugepage_range(addr, len)      \
-       is_aligned_hugepage_range(addr, len)
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
+{
+       if (len & ~HPAGE_MASK)
+               return -EINVAL;
+       if (addr & ~HPAGE_MASK)
+               return -EINVAL;
+       return 0;
+}
 #else
 int prepare_hugepage_range(unsigned long addr, unsigned long len);
 #endif
 
+#ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
+#define set_huge_pte_at(mm, addr, ptep, pte)   set_pte_at(mm, addr, ptep, pte)
+#define huge_ptep_get_and_clear(mm, addr, ptep) ptep_get_and_clear(mm, addr, ptep)
+#else
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+                    pte_t *ptep, pte_t pte);
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+                             pte_t *ptep);
+#endif
+
+#ifndef ARCH_HAS_HUGETLB_PREFAULT_HOOK
+#define hugetlb_prefault_arch_hook(mm)         do { } while (0)
+#else
+void hugetlb_prefault_arch_hook(struct mm_struct *mm);
+#endif
+
 #else /* !CONFIG_HUGETLB_PAGE */
 
 static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
@@ -63,24 +100,21 @@ static inline unsigned long hugetlb_total_pages(void)
 #define follow_huge_addr(mm, addr, write)      ERR_PTR(-EINVAL)
 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
 #define hugetlb_prefault(mapping, vma)         ({ BUG(); 0; })
-#define zap_hugepage_range(vma, start, len)    BUG()
 #define unmap_hugepage_range(vma, start, end)  BUG()
-#define is_hugepage_mem_enough(size)           0
 #define hugetlb_report_meminfo(buf)            0
 #define hugetlb_report_node_meminfo(n, buf)    0
 #define follow_huge_pmd(mm, addr, pmd, write)  NULL
-#define is_aligned_hugepage_range(addr, len)   0
 #define prepare_hugepage_range(addr, len)      (-EINVAL)
 #define pmd_huge(x)    0
 #define is_hugepage_only_range(mm, addr, len)  0
-#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
-                                               do { } while (0)
-#define alloc_huge_page()                      ({ NULL; })
-#define free_huge_page(p)                      ({ (void)(p); BUG(); })
+#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
+#define hugetlb_fault(mm, vma, addr, write)    ({ BUG(); 0; })
+
+#define hugetlb_change_protection(vma, address, end, newprot)
 
 #ifndef HPAGE_MASK
-#define HPAGE_MASK     0               /* Keep the compiler happy */
-#define HPAGE_SIZE     0
+#define HPAGE_MASK     PAGE_MASK               /* Keep the compiler happy */
+#define HPAGE_SIZE     PAGE_SIZE
 #endif
 
 #endif /* !CONFIG_HUGETLB_PAGE */
@@ -105,6 +139,8 @@ struct hugetlbfs_sb_info {
 
 struct hugetlbfs_inode_info {
        struct shared_policy policy;
+       /* Protected by the (global) hugetlb_lock */
+       unsigned long prereserved_hpages;
        struct inode vfs_inode;
 };
 
@@ -118,9 +154,13 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
        return sb->s_fs_info;
 }
 
-extern struct file_operations hugetlbfs_file_operations;
+extern const struct file_operations hugetlbfs_file_operations;
 extern struct vm_operations_struct hugetlb_vm_ops;
 struct file *hugetlb_zero_setup(size_t);
+int hugetlb_extend_reservation(struct hugetlbfs_inode_info *info,
+                              unsigned long atleast_hpages);
+void hugetlb_truncate_reservation(struct hugetlbfs_inode_info *info,
+                                 unsigned long atmost_hpages);
 int hugetlb_get_quota(struct address_space *mapping);
 void hugetlb_put_quota(struct address_space *mapping);