#include <linux/pagemap.h>
#include <linux/cdev.h>
#include <linux/bootmem.h>
-#include <linux/vs_base.h>
/*
* This is needed for the following functions:
* NOTE! You also have to own the lock if you change
* the i_state of an inode while it is in use..
*/
-spinlock_t inode_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(inode_lock);
/*
* iprune_sem provides exclusion between the kswapd or try_to_free_pages
inode->i_sb = sb;
// inode->i_dqh = dqhget(sb->s_dqh);
- /* important because of inode slab reuse */
+ /* essential because of inode slab reuse */
inode->i_xid = 0;
inode->i_blkbits = sb->s_blocksize_bits;
inode->i_flags = 0;
INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
spin_lock_init(&inode->i_data.tree_lock);
spin_lock_init(&inode->i_data.i_mmap_lock);
- atomic_set(&inode->i_data.truncate_count, 0);
INIT_LIST_HEAD(&inode->i_data.private_list);
spin_lock_init(&inode->i_data.private_lock);
- INIT_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
+ INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
spin_lock_init(&inode->i_lock);
i_size_ordered_init(inode);
/*
* Invalidate all inodes for a device.
*/
-static int invalidate_list(struct list_head *head, struct super_block * sb, struct list_head * dispose)
+static int invalidate_list(struct list_head *head, struct list_head *dispose)
{
struct list_head *next;
int busy = 0, count = 0;
struct list_head * tmp = next;
struct inode * inode;
+ /*
+ * We can reschedule here without worrying about the list's
+ * consistency because the per-sb list of inodes must not
+ * change during umount anymore, and because iprune_sem keeps
+ * shrink_icache_memory() away.
+ */
+ cond_resched_lock(&inode_lock);
+
next = next->next;
if (tmp == head)
break;
- inode = list_entry(tmp, struct inode, i_list);
- if (inode->i_sb != sb)
- continue;
+ inode = list_entry(tmp, struct inode, i_sb_list);
invalidate_inode_buffers(inode);
if (!atomic_read(&inode->i_count)) {
hlist_del_init(&inode->i_hash);
+ list_del(&inode->i_sb_list);
list_move(&inode->i_list, dispose);
inode->i_state |= I_FREEING;
count++;
down(&iprune_sem);
spin_lock(&inode_lock);
- busy = invalidate_list(&inode_in_use, sb, &throw_away);
- busy |= invalidate_list(&inode_unused, sb, &throw_away);
- busy |= invalidate_list(&sb->s_dirty, sb, &throw_away);
- busy |= invalidate_list(&sb->s_io, sb, &throw_away);
+ busy = invalidate_list(&sb->s_inodes, &throw_away);
spin_unlock(&inode_lock);
dispose_list(&throw_away);
continue;
}
hlist_del_init(&inode->i_hash);
+ list_del_init(&inode->i_sb_list);
list_move(&inode->i_list, &freeable);
inode->i_state |= I_FREEING;
nr_pruned++;
spin_lock(&inode_lock);
inodes_stat.nr_inodes++;
list_add(&inode->i_list, &inode_in_use);
+ list_add(&inode->i_sb_list, &sb->s_inodes);
inode->i_ino = ++last_ino;
inode->i_state = 0;
spin_unlock(&inode_lock);
inodes_stat.nr_inodes++;
list_add(&inode->i_list, &inode_in_use);
+ list_add(&inode->i_sb_list, &sb->s_inodes);
hlist_add_head(&inode->i_hash, head);
inode->i_state = I_LOCK|I_NEW;
spin_unlock(&inode_lock);
inode->i_ino = ino;
inodes_stat.nr_inodes++;
list_add(&inode->i_list, &inode_in_use);
+ list_add(&inode->i_sb_list, &sb->s_inodes);
hlist_add_head(&inode->i_hash, head);
inode->i_state = I_LOCK|I_NEW;
spin_unlock(&inode_lock);
struct super_operations *op = inode->i_sb->s_op;
list_del_init(&inode->i_list);
+ list_del_init(&inode->i_sb_list);
inode->i_state|=I_FREEING;
inodes_stat.nr_inodes--;
spin_unlock(&inode_lock);
hlist_del_init(&inode->i_hash);
}
list_del_init(&inode->i_list);
+ list_del_init(&inode->i_sb_list);
inode->i_state|=I_FREEING;
inodes_stat.nr_inodes--;
spin_unlock(&inode_lock);
EXPORT_SYMBOL(bmap);
-/*
- * Return true if the filesystem which backs this inode considers the two
- * passed timespecs to be sufficiently different to warrant flushing the
- * altered time out to disk.
- */
-static int inode_times_differ(struct inode *inode,
- struct timespec *old, struct timespec *new)
-{
- if (IS_ONE_SECOND(inode))
- return old->tv_sec != new->tv_sec;
- return !timespec_equal(old, new);
-}
-
/**
* update_atime - update the access time
* @inode: inode accessed
if (IS_RDONLY(inode))
return;
- now = current_kernel_time();
- if (inode_times_differ(inode, &inode->i_atime, &now)) {
+ now = current_fs_time(inode->i_sb);
+ if (!timespec_equal(&inode->i_atime, &now)) {
inode->i_atime = now;
mark_inode_dirty_sync(inode);
} else {
if (IS_RDONLY(inode))
return;
- now = current_kernel_time();
-
- if (inode_times_differ(inode, &inode->i_mtime, &now))
+ now = current_fs_time(inode->i_sb);
+ if (!timespec_equal(&inode->i_mtime, &now))
sync_it = 1;
inode->i_mtime = now;
if (ctime_too) {
- if (inode_times_differ(inode, &inode->i_ctime, &now))
+ if (!timespec_equal(&inode->i_ctime, &now))
sync_it = 1;
inode->i_ctime = now;
}
/* Function back in dquot.c */
int remove_inode_dquot_ref(struct inode *, int, struct list_head *);
-void remove_dquot_ref(struct super_block *sb, int type, struct list_head *tofree_head)
+void remove_dquot_ref(struct super_block *sb, int type,
+ struct list_head *tofree_head)
{
struct inode *inode;
- struct list_head *act_head;
if (!sb->dq_op)
return; /* nothing to do */
spin_lock(&inode_lock); /* This lock is for inodes code */
- /* We hold dqptr_sem so we are safe against the quota code */
- list_for_each(act_head, &inode_in_use) {
- inode = list_entry(act_head, struct inode, i_list);
- if (inode->i_sb == sb && !IS_NOQUOTA(inode))
- remove_inode_dquot_ref(inode, type, tofree_head);
- }
- list_for_each(act_head, &inode_unused) {
- inode = list_entry(act_head, struct inode, i_list);
- if (inode->i_sb == sb && !IS_NOQUOTA(inode))
- remove_inode_dquot_ref(inode, type, tofree_head);
- }
- list_for_each(act_head, &sb->s_dirty) {
- inode = list_entry(act_head, struct inode, i_list);
- if (!IS_NOQUOTA(inode))
- remove_inode_dquot_ref(inode, type, tofree_head);
- }
- list_for_each(act_head, &sb->s_io) {
- inode = list_entry(act_head, struct inode, i_list);
+ /*
+ * We don't have to lock against quota code - test IS_QUOTAINIT is
+ * just for speedup...
+ */
+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list)
if (!IS_NOQUOTA(inode))
remove_inode_dquot_ref(inode, type, tofree_head);
- }
+
spin_unlock(&inode_lock);
}
#endif
-/*
- * Hashed waitqueues for wait_on_inode(). The table is pretty small - the
- * kernel doesn't lock many inodes at the same time.
- */
-#define I_WAIT_TABLE_ORDER 3
-static struct i_wait_queue_head {
- wait_queue_head_t wqh;
-} ____cacheline_aligned_in_smp i_wait_queue_heads[1<<I_WAIT_TABLE_ORDER];
-
-/*
- * Return the address of the waitqueue_head to be used for this inode
- */
-static wait_queue_head_t *i_waitq_head(struct inode *inode)
-{
- return &i_wait_queue_heads[hash_ptr(inode, I_WAIT_TABLE_ORDER)].wqh;
-}
-
-void __wait_on_inode(struct inode *inode)
+int inode_wait(void *word)
{
- DECLARE_WAITQUEUE(wait, current);
- wait_queue_head_t *wq = i_waitq_head(inode);
-
- add_wait_queue(wq, &wait);
-repeat:
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (inode->i_state & I_LOCK) {
- schedule();
- goto repeat;
- }
- remove_wait_queue(wq, &wait);
- __set_current_state(TASK_RUNNING);
+ schedule();
+ return 0;
}
/*
* that it isn't found. This is because iget will immediately call
* ->read_inode, and we want to be sure that evidence of the deletion is found
* by ->read_inode.
- *
- * This call might return early if an inode which shares the waitq is woken up.
- * This is most easily handled by the caller which will loop around again
- * looking for the inode.
- *
* This is called with inode_lock held.
*/
static void __wait_on_freeing_inode(struct inode *inode)
{
- DECLARE_WAITQUEUE(wait, current);
- wait_queue_head_t *wq = i_waitq_head(inode);
+ wait_queue_head_t *wq;
+ DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK);
- add_wait_queue(wq, &wait);
- set_current_state(TASK_UNINTERRUPTIBLE);
+ /*
+ * I_FREEING and I_CLEAR are cleared in process context under
+ * inode_lock, so we have to give the tasks who would clear them
+ * a chance to run and acquire inode_lock.
+ */
+ if (!(inode->i_state & I_LOCK)) {
+ spin_unlock(&inode_lock);
+ yield();
+ spin_lock(&inode_lock);
+ return;
+ }
+ wq = bit_waitqueue(&inode->i_state, __I_LOCK);
+ prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
spin_unlock(&inode_lock);
schedule();
- remove_wait_queue(wq, &wait);
+ finish_wait(wq, &wait.wait);
spin_lock(&inode_lock);
}
void wake_up_inode(struct inode *inode)
{
- wait_queue_head_t *wq = i_waitq_head(inode);
-
/*
* Prevent speculative execution through spin_unlock(&inode_lock);
*/
smp_mb();
- if (waitqueue_active(wq))
- wake_up_all(wq);
+ wake_up_bit(&inode->i_state, __I_LOCK);
}
static __initdata unsigned long ihash_entries;
{
int loop;
+ /* If hashes are distributed across NUMA nodes, defer
+ * hash allocation until vmalloc space is available.
+ */
+ if (hashdist)
+ return;
+
inode_hashtable =
alloc_large_system_hash("Inode-cache",
sizeof(struct hlist_head),
ihash_entries,
14,
- 0,
+ HASH_EARLY,
&i_hash_shift,
- &i_hash_mask);
+ &i_hash_mask,
+ 0);
for (loop = 0; loop < (1 << i_hash_shift); loop++)
INIT_HLIST_HEAD(&inode_hashtable[loop]);
void __init inode_init(unsigned long mempages)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(i_wait_queue_heads); i++)
- init_waitqueue_head(&i_wait_queue_heads[i].wqh);
+ int loop;
/* inode slab cache */
inode_cachep = kmem_cache_create("inode_cache", sizeof(struct inode),
0, SLAB_PANIC, init_once, NULL);
set_shrinker(DEFAULT_SEEKS, shrink_icache_memory);
+
+ /* Hash may have been set up in inode_init_early */
+ if (!hashdist)
+ return;
+
+ inode_hashtable =
+ alloc_large_system_hash("Inode-cache",
+ sizeof(struct hlist_head),
+ ihash_entries,
+ 14,
+ 0,
+ &i_hash_shift,
+ &i_hash_mask,
+ 0);
+
+ for (loop = 0; loop < (1 << i_hash_shift); loop++)
+ INIT_HLIST_HEAD(&inode_hashtable[loop]);
}
void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)