+static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
+{
+ if (sbinfo->free_inodes >= 0) {
+ spin_lock(&sbinfo->stat_lock);
+ if (unlikely(!sbinfo->free_inodes)) {
+ spin_unlock(&sbinfo->stat_lock);
+ return 0;
+ }
+ sbinfo->free_inodes--;
+ spin_unlock(&sbinfo->stat_lock);
+ }
+
+ return 1;
+}
+
+static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
+{
+ if (sbinfo->free_inodes >= 0) {
+ spin_lock(&sbinfo->stat_lock);
+ sbinfo->free_inodes++;
+ spin_unlock(&sbinfo->stat_lock);
+ }
+}
+
+
+static struct kmem_cache *hugetlbfs_inode_cachep;
+
+static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
+{
+ struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
+ struct hugetlbfs_inode_info *p;
+
+ if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
+ return NULL;
+ p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
+ if (unlikely(!p)) {
+ hugetlbfs_inc_free_inodes(sbinfo);
+ return NULL;
+ }
+ return &p->vfs_inode;
+}
+
+static void hugetlbfs_destroy_inode(struct inode *inode)
+{
+ hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
+ mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
+ kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
+}
+
+static const struct address_space_operations hugetlbfs_aops = {