X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=fs%2Finode.c;h=79a986f5a160d16f9721998490287ebdc36757c5;hb=9464c7cf61b9433057924c36e6e02f303a00e768;hp=7e1d502c5a20a5b083d6aad218c51351e40f75a6;hpb=8fe849edaaabd915f060b3744165ff7f95a2b34e;p=linux-2.6.git diff --git a/fs/inode.c b/fs/inode.c index 7e1d502c5..79a986f5a 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -20,12 +20,14 @@ #include #include #include +#include +#include +#include /* * This is needed for the following functions: * - inode_has_buffers * - invalidate_inode_buffers - * - fsync_bdev * - invalidate_bdev * * FIXME: remove all knowledge of the buffer layer from this file @@ -54,8 +56,8 @@ #define I_HASHBITS i_hash_shift #define I_HASHMASK i_hash_mask -static unsigned int i_hash_mask; -static unsigned int i_hash_shift; +static unsigned int i_hash_mask __read_mostly; +static unsigned int i_hash_shift __read_mostly; /* * Each inode can be on two separate lists. One is @@ -71,7 +73,7 @@ static unsigned int i_hash_shift; LIST_HEAD(inode_in_use); LIST_HEAD(inode_unused); -static struct hlist_head *inode_hashtable; +static struct hlist_head *inode_hashtable __read_mostly; /* * A simple spinlock to protect the list manipulations. @@ -79,55 +81,32 @@ static struct hlist_head *inode_hashtable; * NOTE! You also have to own the lock if you change * the i_state of an inode while it is in use.. */ -spinlock_t inode_lock = SPIN_LOCK_UNLOCKED; +DEFINE_SPINLOCK(inode_lock); /* - * iprune_sem provides exclusion between the kswapd or try_to_free_pages + * iprune_mutex provides exclusion between the kswapd or try_to_free_pages * icache shrinking path, and the umount path. Without this exclusion, * by the time prune_icache calls iput for the inode whose pages it has * been invalidating, or by the time it calls clear_inode & destroy_inode * from its final dispose_list, the struct super_block they refer to * (for inode->i_sb->s_op) may already have been freed and reused. */ -static DECLARE_MUTEX(iprune_sem); +static DEFINE_MUTEX(iprune_mutex); /* * Statistics gathering.. */ struct inodes_stat_t inodes_stat; -static kmem_cache_t * inode_cachep; - -static void prune_icache(int nr_to_scan); - - -#define INODE_UNUSED_THRESHOLD 15000 -#define PRUNE_BATCH_COUNT 32 - -void try_to_clip_inodes(void) -{ - unsigned long count = 0; - /* if there are a LOT of unused inodes in cache, better shrink a few first */ - - /* check lockless first to not take the lock always here; racing occasionally isn't a big deal */ - if (inodes_stat.nr_unused > INODE_UNUSED_THRESHOLD) { - spin_lock(&inode_lock); - if (inodes_stat.nr_unused > INODE_UNUSED_THRESHOLD) - count = inodes_stat.nr_unused - INODE_UNUSED_THRESHOLD; - spin_unlock(&inode_lock); - if (count) - prune_icache(count); - } -} - +static kmem_cache_t * inode_cachep __read_mostly; static struct inode *alloc_inode(struct super_block *sb) { static struct address_space_operations empty_aops; static struct inode_operations empty_iops; - static struct file_operations empty_fops; + static const struct file_operations empty_fops; struct inode *inode; - + if (sb->s_op->alloc_inode) inode = sb->s_op->alloc_inode(sb); else @@ -137,15 +116,12 @@ static struct inode *alloc_inode(struct super_block *sb) struct address_space * const mapping = &inode->i_data; inode->i_sb = sb; - if (sb->s_flags & MS_TAGXID) - inode->i_xid = current->xid; - else - inode->i_xid = 0; /* maybe xid -1 would be better? */ - // inode->i_dqh = dqhget(sb->s_dqh); + + /* essential because of inode slab reuse */ + inode->i_xid = 0; inode->i_blkbits = sb->s_blocksize_bits; inode->i_flags = 0; atomic_set(&inode->i_count, 1); - inode->i_sock = 0; inode->i_op = &empty_iops; inode->i_fop = &empty_fops; inode->i_nlink = 1; @@ -161,7 +137,6 @@ static struct inode *alloc_inode(struct super_block *sb) inode->i_bdev = NULL; inode->i_cdev = NULL; inode->i_rdev = 0; - // inode->i_xid = 0; /* maybe not too wise ... */ inode->i_security = NULL; inode->dirtied_when = 0; if (security_inode_alloc(inode)) { @@ -200,8 +175,7 @@ static struct inode *alloc_inode(struct super_block *sb) void destroy_inode(struct inode *inode) { - if (inode_has_buffers(inode)) - BUG(); + BUG_ON(inode_has_buffers(inode)); security_inode_free(inode); if (inode->i_sb->s_op->destroy_inode) inode->i_sb->s_op->destroy_inode(inode); @@ -221,18 +195,21 @@ void inode_init_once(struct inode *inode) INIT_HLIST_NODE(&inode->i_hash); INIT_LIST_HEAD(&inode->i_dentry); INIT_LIST_HEAD(&inode->i_devices); - sema_init(&inode->i_sem, 1); + mutex_init(&inode->i_mutex); init_rwsem(&inode->i_alloc_sem); INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); - spin_lock_init(&inode->i_data.tree_lock); + rwlock_init(&inode->i_data.tree_lock); spin_lock_init(&inode->i_data.i_mmap_lock); - atomic_set(&inode->i_data.truncate_count, 0); INIT_LIST_HEAD(&inode->i_data.private_list); spin_lock_init(&inode->i_data.private_lock); - INIT_PRIO_TREE_ROOT(&inode->i_data.i_mmap); + INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap); INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); spin_lock_init(&inode->i_lock); i_size_ordered_init(inode); +#ifdef CONFIG_INOTIFY + INIT_LIST_HEAD(&inode->inotify_watches); + mutex_init(&inode->inotify_mutex); +#endif } EXPORT_SYMBOL(inode_init_once); @@ -261,6 +238,8 @@ void __iget(struct inode * inode) inodes_stat.nr_unused--; } +EXPORT_SYMBOL_GPL(__iget); + /** * clear_inode - clear an inode * @inode: inode to clear @@ -271,14 +250,12 @@ void __iget(struct inode * inode) */ void clear_inode(struct inode *inode) { + might_sleep(); invalidate_inode_buffers(inode); - if (inode->i_data.nrpages) - BUG(); - if (!(inode->i_state & I_FREEING)) - BUG(); - if (inode->i_state & I_CLEAR) - BUG(); + BUG_ON(inode->i_data.nrpages); + BUG_ON(!(inode->i_state & I_FREEING)); + BUG_ON(inode->i_state & I_CLEAR); wait_on_inode(inode); DQUOT_DROP(inode); if (inode->i_sb && inode->i_sb->s_op->clear_inode) @@ -312,6 +289,13 @@ static void dispose_list(struct list_head *head) if (inode->i_data.nrpages) truncate_inode_pages(&inode->i_data, 0); clear_inode(inode); + + spin_lock(&inode_lock); + hlist_del_init(&inode->i_hash); + list_del_init(&inode->i_sb_list); + spin_unlock(&inode_lock); + + wake_up_inode(inode); destroy_inode(inode); nr_disposed++; } @@ -323,7 +307,7 @@ static void dispose_list(struct list_head *head) /* * Invalidate all inodes for a device. */ -static int invalidate_list(struct list_head *head, struct super_block * sb, struct list_head * dispose) +static int invalidate_list(struct list_head *head, struct list_head *dispose) { struct list_head *next; int busy = 0, count = 0; @@ -333,15 +317,20 @@ static int invalidate_list(struct list_head *head, struct super_block * sb, stru struct list_head * tmp = next; struct inode * inode; + /* + * We can reschedule here without worrying about the list's + * consistency because the per-sb list of inodes must not + * change during umount anymore, and because iprune_mutex keeps + * shrink_icache_memory() away. + */ + cond_resched_lock(&inode_lock); + next = next->next; if (tmp == head) break; - inode = list_entry(tmp, struct inode, i_list); - if (inode->i_sb != sb) - continue; + inode = list_entry(tmp, struct inode, i_sb_list); invalidate_inode_buffers(inode); if (!atomic_read(&inode->i_count)) { - hlist_del_init(&inode->i_hash); list_move(&inode->i_list, dispose); inode->i_state |= I_FREEING; count++; @@ -354,14 +343,6 @@ static int invalidate_list(struct list_head *head, struct super_block * sb, stru return busy; } -/* - * This is a two-stage process. First we collect all - * offending inodes onto the throw-away list, and in - * the second stage we actually dispose of them. This - * is because we don't want to sleep while messing - * with the global lists.. - */ - /** * invalidate_inodes - discard the inodes on a device * @sb: superblock @@ -375,36 +356,29 @@ int invalidate_inodes(struct super_block * sb) int busy; LIST_HEAD(throw_away); - down(&iprune_sem); + mutex_lock(&iprune_mutex); spin_lock(&inode_lock); - busy = invalidate_list(&inode_in_use, sb, &throw_away); - busy |= invalidate_list(&inode_unused, sb, &throw_away); - busy |= invalidate_list(&sb->s_dirty, sb, &throw_away); - busy |= invalidate_list(&sb->s_io, sb, &throw_away); + inotify_unmount_inodes(&sb->s_inodes); + busy = invalidate_list(&sb->s_inodes, &throw_away); spin_unlock(&inode_lock); dispose_list(&throw_away); - up(&iprune_sem); + mutex_unlock(&iprune_mutex); return busy; } EXPORT_SYMBOL(invalidate_inodes); -int __invalidate_device(struct block_device *bdev, int do_sync) +int __invalidate_device(struct block_device *bdev) { - struct super_block *sb; - int res; - - if (do_sync) - fsync_bdev(bdev); + struct super_block *sb = get_super(bdev); + int res = 0; - res = 0; - sb = get_super(bdev); if (sb) { /* * no need to lock the super, get_super holds the - * read semaphore so the filesystem cannot go away + * read mutex so the filesystem cannot go away * under us (->put_super runs with the write lock * hold). */ @@ -415,7 +389,6 @@ int __invalidate_device(struct block_device *bdev, int do_sync) invalidate_bdev(bdev, 0); return res; } - EXPORT_SYMBOL(__invalidate_device); static int can_unuse(struct inode *inode) @@ -451,7 +424,7 @@ static void prune_icache(int nr_to_scan) int nr_scanned; unsigned long reap = 0; - down(&iprune_sem); + mutex_lock(&iprune_mutex); spin_lock(&inode_lock); for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { struct inode *inode; @@ -479,7 +452,6 @@ static void prune_icache(int nr_to_scan) if (!can_unuse(inode)) continue; } - hlist_del_init(&inode->i_hash); list_move(&inode->i_list, &freeable); inode->i_state |= I_FREEING; nr_pruned++; @@ -488,7 +460,7 @@ static void prune_icache(int nr_to_scan) spin_unlock(&inode_lock); dispose_list(&freeable); - up(&iprune_sem); + mutex_unlock(&iprune_mutex); if (current_is_kswapd()) mod_page_state(kswapd_inodesteal, reap); @@ -505,7 +477,7 @@ static void prune_icache(int nr_to_scan) * This function is passed the number of inodes to scan, and it returns the * total number of remaining possibly-reclaimable inodes. */ -static int shrink_icache_memory(int nr, unsigned int gfp_mask) +static int shrink_icache_memory(int nr, gfp_t gfp_mask) { if (nr) { /* @@ -513,10 +485,11 @@ static int shrink_icache_memory(int nr, unsigned int gfp_mask) * and we don't want to recurse into the FS that called us * in clear_inode() and friends.. */ - if (gfp_mask & __GFP_FS) - prune_icache(nr); + if (!(gfp_mask & __GFP_FS)) + return -1; + prune_icache(nr); } - return inodes_stat.nr_unused; + return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; } static void __wait_on_freeing_inode(struct inode *inode); @@ -538,7 +511,7 @@ repeat: continue; if (!test(inode, data)) continue; - if (inode->i_state & (I_FREEING|I_CLEAR)) { + if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) { __wait_on_freeing_inode(inode); goto repeat; } @@ -563,7 +536,7 @@ repeat: continue; if (inode->i_sb != sb) continue; - if (inode->i_state & (I_FREEING|I_CLEAR)) { + if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) { __wait_on_freeing_inode(inode); goto repeat; } @@ -590,6 +563,7 @@ struct inode *new_inode(struct super_block *sb) spin_lock(&inode_lock); inodes_stat.nr_inodes++; list_add(&inode->i_list, &inode_in_use); + list_add(&inode->i_sb_list, &sb->s_inodes); inode->i_ino = ++last_ino; inode->i_state = 0; spin_unlock(&inode_lock); @@ -638,6 +612,7 @@ static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *h inodes_stat.nr_inodes++; list_add(&inode->i_list, &inode_in_use); + list_add(&inode->i_sb_list, &sb->s_inodes); hlist_add_head(&inode->i_hash, head); inode->i_state = I_LOCK|I_NEW; spin_unlock(&inode_lock); @@ -686,6 +661,7 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he inode->i_ino = ino; inodes_stat.nr_inodes++; list_add(&inode->i_list, &inode_in_use); + list_add(&inode->i_sb_list, &sb->s_inodes); hlist_add_head(&inode->i_hash, head); inode->i_state = I_LOCK|I_NEW; spin_unlock(&inode_lock); @@ -762,7 +738,7 @@ EXPORT_SYMBOL(iunique); struct inode *igrab(struct inode *inode) { spin_lock(&inode_lock); - if (!(inode->i_state & I_FREEING)) + if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) __iget(inode); else /* @@ -783,6 +759,7 @@ EXPORT_SYMBOL(igrab); * @head: the head of the list to search * @test: callback used for comparisons between inodes * @data: opaque data pointer to pass to @test + * @wait: if true wait for the inode to be unlocked, if false do not * * ifind() searches for the inode specified by @data in the inode * cache. This is a generalized version of ifind_fast() for file systems where @@ -795,9 +772,9 @@ EXPORT_SYMBOL(igrab); * * Note, @test is called with the inode_lock held, so can't sleep. */ -static inline struct inode *ifind(struct super_block *sb, +static struct inode *ifind(struct super_block *sb, struct hlist_head *head, int (*test)(struct inode *, void *), - void *data) + void *data, const int wait) { struct inode *inode; @@ -806,7 +783,8 @@ static inline struct inode *ifind(struct super_block *sb, if (inode) { __iget(inode); spin_unlock(&inode_lock); - wait_on_inode(inode); + if (likely(wait)) + wait_on_inode(inode); return inode; } spin_unlock(&inode_lock); @@ -828,7 +806,7 @@ static inline struct inode *ifind(struct super_block *sb, * * Otherwise NULL is returned. */ -static inline struct inode *ifind_fast(struct super_block *sb, +static struct inode *ifind_fast(struct super_block *sb, struct hlist_head *head, unsigned long ino) { struct inode *inode; @@ -846,7 +824,7 @@ static inline struct inode *ifind_fast(struct super_block *sb, } /** - * ilookup5 - search for an inode in the inode cache + * ilookup5_nowait - search for an inode in the inode cache * @sb: super block of file system to search * @hashval: hash value (usually inode number) to search for * @test: callback used for comparisons between inodes @@ -858,7 +836,38 @@ static inline struct inode *ifind_fast(struct super_block *sb, * identification of an inode. * * If the inode is in the cache, the inode is returned with an incremented - * reference count. + * reference count. Note, the inode lock is not waited upon so you have to be + * very careful what you do with the returned inode. You probably should be + * using ilookup5() instead. + * + * Otherwise NULL is returned. + * + * Note, @test is called with the inode_lock held, so can't sleep. + */ +struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, + int (*test)(struct inode *, void *), void *data) +{ + struct hlist_head *head = inode_hashtable + hash(sb, hashval); + + return ifind(sb, head, test, data, 0); +} + +EXPORT_SYMBOL(ilookup5_nowait); + +/** + * ilookup5 - search for an inode in the inode cache + * @sb: super block of file system to search + * @hashval: hash value (usually inode number) to search for + * @test: callback used for comparisons between inodes + * @data: opaque data pointer to pass to @test + * + * ilookup5() uses ifind() to search for the inode specified by @hashval and + * @data in the inode cache. This is a generalized version of ilookup() for + * file systems where the inode number is not sufficient for unique + * identification of an inode. + * + * If the inode is in the cache, the inode lock is waited upon and the inode is + * returned with an incremented reference count. * * Otherwise NULL is returned. * @@ -869,7 +878,7 @@ struct inode *ilookup5(struct super_block *sb, unsigned long hashval, { struct hlist_head *head = inode_hashtable + hash(sb, hashval); - return ifind(sb, head, test, data); + return ifind(sb, head, test, data, 1); } EXPORT_SYMBOL(ilookup5); @@ -926,7 +935,7 @@ struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, struct hlist_head *head = inode_hashtable + hash(sb, hashval); struct inode *inode; - inode = ifind(sb, head, test, data); + inode = ifind(sb, head, test, data, 1); if (inode) return inode; /* @@ -1022,29 +1031,31 @@ void generic_delete_inode(struct inode *inode) struct super_operations *op = inode->i_sb->s_op; list_del_init(&inode->i_list); + list_del_init(&inode->i_sb_list); inode->i_state|=I_FREEING; inodes_stat.nr_inodes--; spin_unlock(&inode_lock); - if (inode->i_data.nrpages) - truncate_inode_pages(&inode->i_data, 0); - security_inode_delete(inode); if (op->delete_inode) { void (*delete)(struct inode *) = op->delete_inode; if (!is_bad_inode(inode)) DQUOT_INIT(inode); - /* s_op->delete_inode internally recalls clear_inode() */ + /* Filesystems implementing their own + * s_op->delete_inode are required to call + * truncate_inode_pages and clear_inode() + * internally */ delete(inode); - } else + } else { + truncate_inode_pages(&inode->i_data, 0); clear_inode(inode); + } spin_lock(&inode_lock); hlist_del_init(&inode->i_hash); spin_unlock(&inode_lock); wake_up_inode(inode); - if (inode->i_state != I_CLEAR) - BUG(); + BUG_ON(inode->i_state != I_CLEAR); destroy_inode(inode); } @@ -1058,21 +1069,27 @@ static void generic_forget_inode(struct inode *inode) if (!(inode->i_state & (I_DIRTY|I_LOCK))) list_move(&inode->i_list, &inode_unused); inodes_stat.nr_unused++; - spin_unlock(&inode_lock); - if (!sb || (sb->s_flags & MS_ACTIVE)) + if (!sb || (sb->s_flags & MS_ACTIVE)) { + spin_unlock(&inode_lock); return; + } + inode->i_state |= I_WILL_FREE; + spin_unlock(&inode_lock); write_inode_now(inode, 1); spin_lock(&inode_lock); + inode->i_state &= ~I_WILL_FREE; inodes_stat.nr_unused--; hlist_del_init(&inode->i_hash); } list_del_init(&inode->i_list); - inode->i_state|=I_FREEING; + list_del_init(&inode->i_sb_list); + inode->i_state |= I_FREEING; inodes_stat.nr_inodes--; spin_unlock(&inode_lock); if (inode->i_data.nrpages) truncate_inode_pages(&inode->i_data, 0); clear_inode(inode); + wake_up_inode(inode); destroy_inode(inode); } @@ -1081,7 +1098,7 @@ static void generic_forget_inode(struct inode *inode) * inode when the usage count drops to zero, and * i_nlink is zero. */ -static void generic_drop_inode(struct inode *inode) +void generic_drop_inode(struct inode *inode) { if (!inode->i_nlink) generic_delete_inode(inode); @@ -1089,6 +1106,8 @@ static void generic_drop_inode(struct inode *inode) generic_forget_inode(inode); } +EXPORT_SYMBOL_GPL(generic_drop_inode); + /* * Called when we're dropping the last reference * to an inode. @@ -1115,15 +1134,16 @@ static inline void iput_final(struct inode *inode) * @inode: inode to put * * Puts an inode, dropping its usage count. If the inode use count hits - * zero the inode is also then freed and may be destroyed. + * zero, the inode is then freed and may also be destroyed. + * + * Consequently, iput() can sleep. */ void iput(struct inode *inode) { if (inode) { struct super_operations *op = inode->i_sb->s_op; - if (inode->i_state == I_CLEAR) - BUG(); + BUG_ON(inode->i_state == I_CLEAR); if (op && op->put_inode) op->put_inode(inode); @@ -1156,61 +1176,60 @@ sector_t bmap(struct inode * inode, sector_t block) EXPORT_SYMBOL(bmap); -/* - * Return true if the filesystem which backs this inode considers the two - * passed timespecs to be sufficiently different to warrant flushing the - * altered time out to disk. - */ -static int inode_times_differ(struct inode *inode, - struct timespec *old, struct timespec *new) -{ - if (IS_ONE_SECOND(inode)) - return old->tv_sec != new->tv_sec; - return !timespec_equal(old, new); -} - /** - * update_atime - update the access time - * @inode: inode accessed + * touch_atime - update the access time + * @mnt: mount the inode is accessed on + * @dentry: dentry accessed * * Update the accessed time on an inode and mark it for writeback. * This function automatically handles read only file systems and media, * as well as the "noatime" flag and inode specific "noatime" markers. */ -void update_atime(struct inode *inode) +void touch_atime(struct vfsmount *mnt, struct dentry *dentry) { + struct inode *inode = dentry->d_inode; struct timespec now; - if (IS_NOATIME(inode)) + if (IS_RDONLY(inode)) return; - if (IS_NODIRATIME(inode) && S_ISDIR(inode->i_mode)) + + if ((inode->i_flags & S_NOATIME) || + (inode->i_sb->s_flags & MS_NOATIME) || + ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))) return; - if (IS_RDONLY(inode)) + + /* + * We may have a NULL vfsmount when coming from NFSD + */ + if (mnt && + ((mnt->mnt_flags & MNT_NOATIME) || + ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))) return; - now = current_kernel_time(); - if (inode_times_differ(inode, &inode->i_atime, &now)) { + now = current_fs_time(inode->i_sb); + if (!timespec_equal(&inode->i_atime, &now)) { inode->i_atime = now; mark_inode_dirty_sync(inode); - } else { - if (!timespec_equal(&inode->i_atime, &now)) - inode->i_atime = now; } } -EXPORT_SYMBOL(update_atime); +EXPORT_SYMBOL(touch_atime); /** - * inode_update_time - update mtime and ctime time - * @inode: inode accessed - * @ctime_too: update ctime too + * file_update_time - update mtime and ctime time + * @file: file accessed * - * Update the mtime time on an inode and mark it for writeback. - * When ctime_too is specified update the ctime too. + * Update the mtime and ctime members of an inode and mark the inode + * for writeback. Note that this function is meant exclusively for + * usage in the file write path of filesystems, and filesystems may + * choose to explicitly ignore update via this function with the + * S_NOCTIME inode flag, e.g. for network filesystem where these + * timestamps are handled by the server. */ -void inode_update_time(struct inode *inode, int ctime_too) +void file_update_time(struct file *file) { + struct inode *inode = file->f_dentry->d_inode; struct timespec now; int sync_it = 0; @@ -1219,22 +1238,20 @@ void inode_update_time(struct inode *inode, int ctime_too) if (IS_RDONLY(inode)) return; - now = current_kernel_time(); - - if (inode_times_differ(inode, &inode->i_mtime, &now)) + now = current_fs_time(inode->i_sb); + if (!timespec_equal(&inode->i_mtime, &now)) sync_it = 1; inode->i_mtime = now; - if (ctime_too) { - if (inode_times_differ(inode, &inode->i_ctime, &now)) - sync_it = 1; - inode->i_ctime = now; - } + if (!timespec_equal(&inode->i_ctime, &now)) + sync_it = 1; + inode->i_ctime = now; + if (sync_it) mark_inode_dirty_sync(inode); } -EXPORT_SYMBOL(inode_update_time); +EXPORT_SYMBOL(file_update_time); int inode_needs_sync(struct inode *inode) { @@ -1255,110 +1272,65 @@ EXPORT_SYMBOL(inode_needs_sync); /* Function back in dquot.c */ int remove_inode_dquot_ref(struct inode *, int, struct list_head *); -void remove_dquot_ref(struct super_block *sb, int type, struct list_head *tofree_head) +void remove_dquot_ref(struct super_block *sb, int type, + struct list_head *tofree_head) { struct inode *inode; - struct list_head *act_head; if (!sb->dq_op) return; /* nothing to do */ spin_lock(&inode_lock); /* This lock is for inodes code */ - /* We don't have to lock against quota code - test IS_QUOTAINIT is just for speedup... */ - - list_for_each(act_head, &inode_in_use) { - inode = list_entry(act_head, struct inode, i_list); - if (inode->i_sb == sb && IS_QUOTAINIT(inode)) - remove_inode_dquot_ref(inode, type, tofree_head); - } - list_for_each(act_head, &inode_unused) { - inode = list_entry(act_head, struct inode, i_list); - if (inode->i_sb == sb && IS_QUOTAINIT(inode)) - remove_inode_dquot_ref(inode, type, tofree_head); - } - list_for_each(act_head, &sb->s_dirty) { - inode = list_entry(act_head, struct inode, i_list); - if (IS_QUOTAINIT(inode)) - remove_inode_dquot_ref(inode, type, tofree_head); - } - list_for_each(act_head, &sb->s_io) { - inode = list_entry(act_head, struct inode, i_list); - if (IS_QUOTAINIT(inode)) + + /* + * We don't have to lock against quota code - test IS_QUOTAINIT is + * just for speedup... + */ + list_for_each_entry(inode, &sb->s_inodes, i_sb_list) + if (!IS_NOQUOTA(inode)) remove_inode_dquot_ref(inode, type, tofree_head); - } + spin_unlock(&inode_lock); } #endif -/* - * Hashed waitqueues for wait_on_inode(). The table is pretty small - the - * kernel doesn't lock many inodes at the same time. - */ -#define I_WAIT_TABLE_ORDER 3 -static struct i_wait_queue_head { - wait_queue_head_t wqh; -} ____cacheline_aligned_in_smp i_wait_queue_heads[1<i_state & I_LOCK) { - schedule(); - goto repeat; - } - remove_wait_queue(wq, &wait); - __set_current_state(TASK_RUNNING); + schedule(); + return 0; } /* - * If we try to find an inode in the inode hash while it is being deleted, we - * have to wait until the filesystem completes its deletion before reporting - * that it isn't found. This is because iget will immediately call - * ->read_inode, and we want to be sure that evidence of the deletion is found - * by ->read_inode. + * If we try to find an inode in the inode hash while it is being + * deleted, we have to wait until the filesystem completes its + * deletion before reporting that it isn't found. This function waits + * until the deletion _might_ have completed. Callers are responsible + * to recheck inode state. * - * This call might return early if an inode which shares the waitq is woken up. - * This is most easily handled by the caller which will loop around again - * looking for the inode. + * It doesn't matter if I_LOCK is not set initially, a call to + * wake_up_inode() after removing from the hash list will DTRT. * * This is called with inode_lock held. */ static void __wait_on_freeing_inode(struct inode *inode) { - DECLARE_WAITQUEUE(wait, current); - wait_queue_head_t *wq = i_waitq_head(inode); - - add_wait_queue(wq, &wait); - set_current_state(TASK_UNINTERRUPTIBLE); + wait_queue_head_t *wq; + DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK); + wq = bit_waitqueue(&inode->i_state, __I_LOCK); + prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); spin_unlock(&inode_lock); schedule(); - remove_wait_queue(wq, &wait); + finish_wait(wq, &wait.wait); spin_lock(&inode_lock); } void wake_up_inode(struct inode *inode) { - wait_queue_head_t *wq = i_waitq_head(inode); - /* * Prevent speculative execution through spin_unlock(&inode_lock); */ smp_mb(); - if (waitqueue_active(wq)) - wake_up_all(wq); + wake_up_bit(&inode->i_state, __I_LOCK); } static __initdata unsigned long ihash_entries; @@ -1374,63 +1346,60 @@ __setup("ihash_entries=", set_ihash_entries); /* * Initialize the waitqueues and inode hash table. */ +void __init inode_init_early(void) +{ + int loop; + + /* If hashes are distributed across NUMA nodes, defer + * hash allocation until vmalloc space is available. + */ + if (hashdist) + return; + + inode_hashtable = + alloc_large_system_hash("Inode-cache", + sizeof(struct hlist_head), + ihash_entries, + 14, + HASH_EARLY, + &i_hash_shift, + &i_hash_mask, + 0); + + for (loop = 0; loop < (1 << i_hash_shift); loop++) + INIT_HLIST_HEAD(&inode_hashtable[loop]); +} + void __init inode_init(unsigned long mempages) { - struct hlist_head *head; - unsigned long order; - unsigned int nr_hash; - int i; - - for (i = 0; i < ARRAY_SIZE(i_wait_queue_heads); i++) - init_waitqueue_head(&i_wait_queue_heads[i].wqh); - - if (!ihash_entries) - ihash_entries = PAGE_SHIFT < 14 ? - mempages >> (14 - PAGE_SHIFT) : - mempages << (PAGE_SHIFT - 14); - - ihash_entries *= sizeof(struct hlist_head); - for (order = 0; ((1UL << order) << PAGE_SHIFT) < ihash_entries; order++) - ; - - if (order > 5) - order = 5; - - do { - unsigned long tmp; - - nr_hash = (1UL << order) * PAGE_SIZE / - sizeof(struct hlist_head); - i_hash_mask = (nr_hash - 1); - - tmp = nr_hash; - i_hash_shift = 0; - while ((tmp >>= 1UL) != 0UL) - i_hash_shift++; - - inode_hashtable = (struct hlist_head *) - __get_free_pages(GFP_ATOMIC, order); - } while (inode_hashtable == NULL && --order >= 0); - - printk("Inode-cache hash table entries: %d (order: %ld, %ld bytes)\n", - nr_hash, order, (PAGE_SIZE << order)); - - if (!inode_hashtable) - panic("Failed to allocate inode hash table\n"); - - head = inode_hashtable; - i = nr_hash; - do { - INIT_HLIST_HEAD(head); - head++; - i--; - } while (i); + int loop; /* inode slab cache */ - inode_cachep = kmem_cache_create("inode_cache", sizeof(struct inode), - 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, init_once, - NULL); + inode_cachep = kmem_cache_create("inode_cache", + sizeof(struct inode), + 0, + (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| + SLAB_MEM_SPREAD), + init_once, + NULL); set_shrinker(DEFAULT_SEEKS, shrink_icache_memory); + + /* Hash may have been set up in inode_init_early */ + if (!hashdist) + return; + + inode_hashtable = + alloc_large_system_hash("Inode-cache", + sizeof(struct hlist_head), + ihash_entries, + 14, + 0, + &i_hash_shift, + &i_hash_mask, + 0); + + for (loop = 0; loop < (1 << i_hash_shift); loop++) + INIT_HLIST_HEAD(&inode_hashtable[loop]); } void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)