vserver 2.0-pre4
[linux-2.6.git] / fs / inode.c
index f699b90..6c21383 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/security.h>
 #include <linux/pagemap.h>
 #include <linux/cdev.h>
-#include <linux/vs_base.h>
+#include <linux/bootmem.h>
 
 /*
  * This is needed for the following functions:
@@ -80,7 +80,7 @@ static struct hlist_head *inode_hashtable;
  * NOTE! You also have to own the lock if you change
  * the i_state of an inode while it is in use..
  */
-spinlock_t inode_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(inode_lock);
 
 /*
  * iprune_sem provides exclusion between the kswapd or try_to_free_pages
@@ -90,7 +90,7 @@ spinlock_t inode_lock = SPIN_LOCK_UNLOCKED;
  * from its final dispose_list, the struct super_block they refer to
  * (for inode->i_sb->s_op) may already have been freed and reused.
  */
-static DECLARE_MUTEX(iprune_sem);
+DECLARE_MUTEX(iprune_sem);
 
 /*
  * Statistics gathering..
@@ -115,11 +115,10 @@ static struct inode *alloc_inode(struct super_block *sb)
                struct address_space * const mapping = &inode->i_data;
 
                inode->i_sb = sb;
-               if (sb->s_flags & MS_TAGXID)
-                       inode->i_xid = current->xid;
-               else
-                       inode->i_xid = 0;       /* maybe xid -1 would be better? */
                // inode->i_dqh = dqhget(sb->s_dqh);
+
+               /* essential because of inode slab reuse */
+               inode->i_xid = 0;
                inode->i_blkbits = sb->s_blocksize_bits;
                inode->i_flags = 0;
                atomic_set(&inode->i_count, 1);
@@ -139,7 +138,6 @@ static struct inode *alloc_inode(struct super_block *sb)
                inode->i_bdev = NULL;
                inode->i_cdev = NULL;
                inode->i_rdev = 0;
-               // inode->i_xid = 0;    /* maybe not too wise ... */
                inode->i_security = NULL;
                inode->dirtied_when = 0;
                if (security_inode_alloc(inode)) {
@@ -204,10 +202,9 @@ void inode_init_once(struct inode *inode)
        INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
        spin_lock_init(&inode->i_data.tree_lock);
        spin_lock_init(&inode->i_data.i_mmap_lock);
-       atomic_set(&inode->i_data.truncate_count, 0);
        INIT_LIST_HEAD(&inode->i_data.private_list);
        spin_lock_init(&inode->i_data.private_lock);
-       INIT_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
+       INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
        INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
        spin_lock_init(&inode->i_lock);
        i_size_ordered_init(inode);
@@ -239,6 +236,8 @@ void __iget(struct inode * inode)
        inodes_stat.nr_unused--;
 }
 
+EXPORT_SYMBOL_GPL(__iget);
+
 /**
  * clear_inode - clear an inode
  * @inode: inode to clear
@@ -249,6 +248,7 @@ void __iget(struct inode * inode)
  */
 void clear_inode(struct inode *inode)
 {
+       might_sleep();
        invalidate_inode_buffers(inode);
        
        if (inode->i_data.nrpages)
@@ -301,7 +301,7 @@ static void dispose_list(struct list_head *head)
 /*
  * Invalidate all inodes for a device.
  */
-static int invalidate_list(struct list_head *head, struct super_block * sb, struct list_head * dispose)
+static int invalidate_list(struct list_head *head, struct list_head *dispose)
 {
        struct list_head *next;
        int busy = 0, count = 0;
@@ -311,15 +311,22 @@ static int invalidate_list(struct list_head *head, struct super_block * sb, stru
                struct list_head * tmp = next;
                struct inode * inode;
 
+               /*
+                * We can reschedule here without worrying about the list's
+                * consistency because the per-sb list of inodes must not
+                * change during umount anymore, and because iprune_sem keeps
+                * shrink_icache_memory() away.
+                */
+               cond_resched_lock(&inode_lock);
+
                next = next->next;
                if (tmp == head)
                        break;
-               inode = list_entry(tmp, struct inode, i_list);
-               if (inode->i_sb != sb)
-                       continue;
+               inode = list_entry(tmp, struct inode, i_sb_list);
                invalidate_inode_buffers(inode);
                if (!atomic_read(&inode->i_count)) {
                        hlist_del_init(&inode->i_hash);
+                       list_del(&inode->i_sb_list);
                        list_move(&inode->i_list, dispose);
                        inode->i_state |= I_FREEING;
                        count++;
@@ -355,10 +362,7 @@ int invalidate_inodes(struct super_block * sb)
 
        down(&iprune_sem);
        spin_lock(&inode_lock);
-       busy = invalidate_list(&inode_in_use, sb, &throw_away);
-       busy |= invalidate_list(&inode_unused, sb, &throw_away);
-       busy |= invalidate_list(&sb->s_dirty, sb, &throw_away);
-       busy |= invalidate_list(&sb->s_io, sb, &throw_away);
+       busy = invalidate_list(&sb->s_inodes, &throw_away);
        spin_unlock(&inode_lock);
 
        dispose_list(&throw_away);
@@ -458,6 +462,7 @@ static void prune_icache(int nr_to_scan)
                                continue;
                }
                hlist_del_init(&inode->i_hash);
+               list_del_init(&inode->i_sb_list);
                list_move(&inode->i_list, &freeable);
                inode->i_state |= I_FREEING;
                nr_pruned++;
@@ -491,10 +496,11 @@ static int shrink_icache_memory(int nr, unsigned int gfp_mask)
                 * and we don't want to recurse into the FS that called us
                 * in clear_inode() and friends..
                 */
-               if (gfp_mask & __GFP_FS)
-                       prune_icache(nr);
+               if (!(gfp_mask & __GFP_FS))
+                       return -1;
+               prune_icache(nr);
        }
-       return inodes_stat.nr_unused;
+       return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
 }
 
 static void __wait_on_freeing_inode(struct inode *inode);
@@ -568,9 +574,9 @@ struct inode *new_inode(struct super_block *sb)
                spin_lock(&inode_lock);
                inodes_stat.nr_inodes++;
                list_add(&inode->i_list, &inode_in_use);
+               list_add(&inode->i_sb_list, &sb->s_inodes);
                inode->i_ino = ++last_ino;
                inode->i_state = 0;
-               inode->i_xid = vx_current_xid();
                spin_unlock(&inode_lock);
        }
        return inode;
@@ -617,6 +623,7 @@ static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *h
 
                        inodes_stat.nr_inodes++;
                        list_add(&inode->i_list, &inode_in_use);
+                       list_add(&inode->i_sb_list, &sb->s_inodes);
                        hlist_add_head(&inode->i_hash, head);
                        inode->i_state = I_LOCK|I_NEW;
                        spin_unlock(&inode_lock);
@@ -665,6 +672,7 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he
                        inode->i_ino = ino;
                        inodes_stat.nr_inodes++;
                        list_add(&inode->i_list, &inode_in_use);
+                       list_add(&inode->i_sb_list, &sb->s_inodes);
                        hlist_add_head(&inode->i_hash, head);
                        inode->i_state = I_LOCK|I_NEW;
                        spin_unlock(&inode_lock);
@@ -1001,6 +1009,7 @@ void generic_delete_inode(struct inode *inode)
        struct super_operations *op = inode->i_sb->s_op;
 
        list_del_init(&inode->i_list);
+       list_del_init(&inode->i_sb_list);
        inode->i_state|=I_FREEING;
        inodes_stat.nr_inodes--;
        spin_unlock(&inode_lock);
@@ -1046,6 +1055,7 @@ static void generic_forget_inode(struct inode *inode)
                hlist_del_init(&inode->i_hash);
        }
        list_del_init(&inode->i_list);
+       list_del_init(&inode->i_sb_list);
        inode->i_state|=I_FREEING;
        inodes_stat.nr_inodes--;
        spin_unlock(&inode_lock);
@@ -1135,19 +1145,6 @@ sector_t bmap(struct inode * inode, sector_t block)
 
 EXPORT_SYMBOL(bmap);
 
-/*
- * Return true if the filesystem which backs this inode considers the two
- * passed timespecs to be sufficiently different to warrant flushing the
- * altered time out to disk.
- */
-static int inode_times_differ(struct inode *inode,
-                       struct timespec *old, struct timespec *new)
-{
-       if (IS_ONE_SECOND(inode))
-               return old->tv_sec != new->tv_sec;
-       return !timespec_equal(old, new);
-}
-
 /**
  *     update_atime    -       update the access time
  *     @inode: inode accessed
@@ -1167,8 +1164,8 @@ void update_atime(struct inode *inode)
        if (IS_RDONLY(inode))
                return;
 
-       now = current_kernel_time();
-       if (inode_times_differ(inode, &inode->i_atime, &now)) {
+       now = current_fs_time(inode->i_sb);
+       if (!timespec_equal(&inode->i_atime, &now)) {
                inode->i_atime = now;
                mark_inode_dirty_sync(inode);
        } else {
@@ -1198,14 +1195,13 @@ void inode_update_time(struct inode *inode, int ctime_too)
        if (IS_RDONLY(inode))
                return;
 
-       now = current_kernel_time();
-
-       if (inode_times_differ(inode, &inode->i_mtime, &now))
+       now = current_fs_time(inode->i_sb);
+       if (!timespec_equal(&inode->i_mtime, &now))
                sync_it = 1;
        inode->i_mtime = now;
 
        if (ctime_too) {
-               if (inode_times_differ(inode, &inode->i_ctime, &now))
+               if (!timespec_equal(&inode->i_ctime, &now))
                        sync_it = 1;
                inode->i_ctime = now;
        }
@@ -1234,72 +1230,32 @@ EXPORT_SYMBOL(inode_needs_sync);
 /* Function back in dquot.c */
 int remove_inode_dquot_ref(struct inode *, int, struct list_head *);
 
-void remove_dquot_ref(struct super_block *sb, int type, struct list_head *tofree_head)
+void remove_dquot_ref(struct super_block *sb, int type,
+                       struct list_head *tofree_head)
 {
        struct inode *inode;
-       struct list_head *act_head;
 
        if (!sb->dq_op)
                return; /* nothing to do */
        spin_lock(&inode_lock); /* This lock is for inodes code */
-       /* We don't have to lock against quota code - test IS_QUOTAINIT is just for speedup... */
-       list_for_each(act_head, &inode_in_use) {
-               inode = list_entry(act_head, struct inode, i_list);
-               if (inode->i_sb == sb && IS_QUOTAINIT(inode))
-                       remove_inode_dquot_ref(inode, type, tofree_head);
-       }
-       list_for_each(act_head, &inode_unused) {
-               inode = list_entry(act_head, struct inode, i_list);
-               if (inode->i_sb == sb && IS_QUOTAINIT(inode))
-                       remove_inode_dquot_ref(inode, type, tofree_head);
-       }
-       list_for_each(act_head, &sb->s_dirty) {
-               inode = list_entry(act_head, struct inode, i_list);
-               if (IS_QUOTAINIT(inode))
-                       remove_inode_dquot_ref(inode, type, tofree_head);
-       }
-       list_for_each(act_head, &sb->s_io) {
-               inode = list_entry(act_head, struct inode, i_list);
-               if (IS_QUOTAINIT(inode))
+
+       /*
+        * We don't have to lock against quota code - test IS_QUOTAINIT is
+        * just for speedup...
+        */
+       list_for_each_entry(inode, &sb->s_inodes, i_sb_list)
+               if (!IS_NOQUOTA(inode))
                        remove_inode_dquot_ref(inode, type, tofree_head);
-       }
+
        spin_unlock(&inode_lock);
 }
 
 #endif
 
-/*
- * Hashed waitqueues for wait_on_inode().  The table is pretty small - the
- * kernel doesn't lock many inodes at the same time.
- */
-#define I_WAIT_TABLE_ORDER     3
-static struct i_wait_queue_head {
-       wait_queue_head_t wqh;
-} ____cacheline_aligned_in_smp i_wait_queue_heads[1<<I_WAIT_TABLE_ORDER];
-
-/*
- * Return the address of the waitqueue_head to be used for this inode
- */
-static wait_queue_head_t *i_waitq_head(struct inode *inode)
+int inode_wait(void *word)
 {
-       return &i_wait_queue_heads[hash_ptr(inode, I_WAIT_TABLE_ORDER)].wqh;
-}
-
-void __wait_on_inode(struct inode *inode)
-{
-       DECLARE_WAITQUEUE(wait, current);
-       wait_queue_head_t *wq = i_waitq_head(inode);
-
-       add_wait_queue(wq, &wait);
-repeat:
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       if (inode->i_state & I_LOCK) {
-               schedule();
-               goto repeat;
-       }
-       remove_wait_queue(wq, &wait);
-       __set_current_state(TASK_RUNNING);
+       schedule();
+       return 0;
 }
 
 /*
@@ -1308,36 +1264,39 @@ repeat:
  * that it isn't found.  This is because iget will immediately call
  * ->read_inode, and we want to be sure that evidence of the deletion is found
  * by ->read_inode.
- *
- * This call might return early if an inode which shares the waitq is woken up.
- * This is most easily handled by the caller which will loop around again
- * looking for the inode.
- *
  * This is called with inode_lock held.
  */
 static void __wait_on_freeing_inode(struct inode *inode)
 {
-       DECLARE_WAITQUEUE(wait, current);
-       wait_queue_head_t *wq = i_waitq_head(inode);
+       wait_queue_head_t *wq;
+       DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK);
 
-       add_wait_queue(wq, &wait);
-       set_current_state(TASK_UNINTERRUPTIBLE);
+       /*
+        * I_FREEING and I_CLEAR are cleared in process context under
+        * inode_lock, so we have to give the tasks who would clear them
+        * a chance to run and acquire inode_lock.
+        */
+       if (!(inode->i_state & I_LOCK)) {
+               spin_unlock(&inode_lock);
+               yield();
+               spin_lock(&inode_lock);
+               return;
+       }
+       wq = bit_waitqueue(&inode->i_state, __I_LOCK);
+       prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
        spin_unlock(&inode_lock);
        schedule();
-       remove_wait_queue(wq, &wait);
+       finish_wait(wq, &wait.wait);
        spin_lock(&inode_lock);
 }
 
 void wake_up_inode(struct inode *inode)
 {
-       wait_queue_head_t *wq = i_waitq_head(inode);
-
        /*
         * Prevent speculative execution through spin_unlock(&inode_lock);
         */
        smp_mb();
-       if (waitqueue_active(wq))
-               wake_up_all(wq);
+       wake_up_bit(&inode->i_state, __I_LOCK);
 }
 
 static __initdata unsigned long ihash_entries;
@@ -1353,60 +1312,55 @@ __setup("ihash_entries=", set_ihash_entries);
 /*
  * Initialize the waitqueues and inode hash table.
  */
-void __init inode_init(unsigned long mempages)
+void __init inode_init_early(void)
 {
-       struct hlist_head *head;
-       unsigned long order;
-       unsigned int nr_hash;
-       int i;
+       int loop;
 
-       for (i = 0; i < ARRAY_SIZE(i_wait_queue_heads); i++)
-               init_waitqueue_head(&i_wait_queue_heads[i].wqh);
-
-       if (!ihash_entries)
-               ihash_entries = PAGE_SHIFT < 14 ?
-                               mempages >> (14 - PAGE_SHIFT) :
-                               mempages << (PAGE_SHIFT - 14);
-
-       ihash_entries *= sizeof(struct hlist_head);
-       for (order = 0; ((1UL << order) << PAGE_SHIFT) < ihash_entries; order++)
-               ;
-
-       do {
-               unsigned long tmp;
-
-               nr_hash = (1UL << order) * PAGE_SIZE /
-                       sizeof(struct hlist_head);
-               i_hash_mask = (nr_hash - 1);
-
-               tmp = nr_hash;
-               i_hash_shift = 0;
-               while ((tmp >>= 1UL) != 0UL)
-                       i_hash_shift++;
-
-               inode_hashtable = (struct hlist_head *)
-                       __get_free_pages(GFP_ATOMIC, order);
-       } while (inode_hashtable == NULL && --order >= 0);
-
-       printk("Inode-cache hash table entries: %d (order: %ld, %ld bytes)\n",
-                       nr_hash, order, (PAGE_SIZE << order));
+       /* If hashes are distributed across NUMA nodes, defer
+        * hash allocation until vmalloc space is available.
+        */
+       if (hashdist)
+               return;
 
-       if (!inode_hashtable)
-               panic("Failed to allocate inode hash table\n");
+       inode_hashtable =
+               alloc_large_system_hash("Inode-cache",
+                                       sizeof(struct hlist_head),
+                                       ihash_entries,
+                                       14,
+                                       HASH_EARLY,
+                                       &i_hash_shift,
+                                       &i_hash_mask,
+                                       0);
+
+       for (loop = 0; loop < (1 << i_hash_shift); loop++)
+               INIT_HLIST_HEAD(&inode_hashtable[loop]);
+}
 
-       head = inode_hashtable;
-       i = nr_hash;
-       do {
-               INIT_HLIST_HEAD(head);
-               head++;
-               i--;
-       } while (i);
+void __init inode_init(unsigned long mempages)
+{
+       int loop;
 
        /* inode slab cache */
        inode_cachep = kmem_cache_create("inode_cache", sizeof(struct inode),
-                               0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, init_once,
-                               NULL);
+                               0, SLAB_PANIC, init_once, NULL);
        set_shrinker(DEFAULT_SEEKS, shrink_icache_memory);
+
+       /* Hash may have been set up in inode_init_early */
+       if (!hashdist)
+               return;
+
+       inode_hashtable =
+               alloc_large_system_hash("Inode-cache",
+                                       sizeof(struct hlist_head),
+                                       ihash_entries,
+                                       14,
+                                       0,
+                                       &i_hash_shift,
+                                       &i_hash_mask,
+                                       0);
+
+       for (loop = 0; loop < (1 << i_hash_shift); loop++)
+               INIT_HLIST_HEAD(&inode_hashtable[loop]);
 }
 
 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)