X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=fs%2Fdcache.c;fp=fs%2Fdcache.c;h=11dc83092d4aa48c78650f6dee4b48b834d25d7e;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=cbf76e692013281de5852d569896ac1625bd96fb;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/fs/dcache.c b/fs/dcache.c index cbf76e692..11dc83092 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -14,6 +14,7 @@ * the dcache entry is deleted or garbage collected. */ +#include #include #include #include @@ -33,16 +34,17 @@ #include #include +/* #define DCACHE_DEBUG 1 */ -int sysctl_vfs_cache_pressure __read_mostly = 100; +int sysctl_vfs_cache_pressure = 100; EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); -static __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); +static seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; EXPORT_SYMBOL(dcache_lock); -static kmem_cache_t *dentry_cache __read_mostly; +static kmem_cache_t *dentry_cache; #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) @@ -57,9 +59,9 @@ static kmem_cache_t *dentry_cache __read_mostly; #define D_HASHBITS d_hash_shift #define D_HASHMASK d_hash_mask -static unsigned int d_hash_mask __read_mostly; -static unsigned int d_hash_shift __read_mostly; -static struct hlist_head *dentry_hashtable __read_mostly; +static unsigned int d_hash_mask; +static unsigned int d_hash_shift; +static struct hlist_head *dentry_hashtable; static LIST_HEAD(dentry_unused); /* Statistics gathering. */ @@ -84,10 +86,6 @@ static void d_free(struct dentry *dentry) { if (dentry->d_op && dentry->d_op->d_release) dentry->d_op->d_release(dentry); - if (dentry->d_extra_attributes) { - kfree(dentry->d_extra_attributes); - dentry->d_extra_attributes = NULL; - } call_rcu(&dentry->d_u.d_rcu, d_callback); } @@ -327,13 +325,10 @@ static struct dentry * __d_find_alias(struct inode *inode, int want_discon) struct dentry * d_find_alias(struct inode *inode) { - struct dentry *de = NULL; - - if (!list_empty(&inode->i_dentry)) { - spin_lock(&dcache_lock); - de = __d_find_alias(inode, 0); - spin_unlock(&dcache_lock); - } + struct dentry *de; + spin_lock(&dcache_lock); + de = __d_find_alias(inode, 0); + spin_unlock(&dcache_lock); return de; } @@ -362,13 +357,12 @@ restart: } /* - * Throw away a dentry - free the inode, dput the parent. This requires that - * the LRU list has already been removed. - * + * Throw away a dentry - free the inode, dput the parent. + * This requires that the LRU list has already been + * removed. * Called with dcache_lock, drops it and then regains. - * Called with dentry->d_lock held, drops it. */ -static void prune_one_dentry(struct dentry * dentry) +static inline void prune_one_dentry(struct dentry * dentry) { struct dentry * parent; @@ -386,8 +380,6 @@ static void prune_one_dentry(struct dentry * dentry) /** * prune_dcache - shrink the dcache * @count: number of entries to try and free - * @sb: if given, ignore dentries for other superblocks - * which are being unmounted. * * Shrink the dcache. This is done when we need * more memory, or simply when we need to unmount @@ -398,29 +390,16 @@ static void prune_one_dentry(struct dentry * dentry) * all the dentries are in use. */ -static void prune_dcache(int count, struct super_block *sb) +static void prune_dcache(int count) { spin_lock(&dcache_lock); for (; count ; count--) { struct dentry *dentry; struct list_head *tmp; - struct rw_semaphore *s_umount; cond_resched_lock(&dcache_lock); tmp = dentry_unused.prev; - if (sb) { - /* Try to find a dentry for this sb, but don't try - * too hard, if they aren't near the tail they will - * be moved down again soon - */ - int skip = count; - while (skip && tmp != &dentry_unused && - list_entry(tmp, struct dentry, d_lru)->d_sb != sb) { - skip--; - tmp = tmp->prev; - } - } if (tmp == &dentry_unused) break; list_del_init(tmp); @@ -446,45 +425,7 @@ static void prune_dcache(int count, struct super_block *sb) spin_unlock(&dentry->d_lock); continue; } - /* - * If the dentry is not DCACHED_REFERENCED, it is time - * to remove it from the dcache, provided the super block is - * NULL (which means we are trying to reclaim memory) - * or this dentry belongs to the same super block that - * we want to shrink. - */ - /* - * If this dentry is for "my" filesystem, then I can prune it - * without taking the s_umount lock (I already hold it). - */ - if (sb && dentry->d_sb == sb) { - prune_one_dentry(dentry); - continue; - } - /* - * ...otherwise we need to be sure this filesystem isn't being - * unmounted, otherwise we could race with - * generic_shutdown_super(), and end up holding a reference to - * an inode while the filesystem is unmounted. - * So we try to get s_umount, and make sure s_root isn't NULL. - * (Take a local copy of s_umount to avoid a use-after-free of - * `dentry'). - */ - s_umount = &dentry->d_sb->s_umount; - if (down_read_trylock(s_umount)) { - if (dentry->d_sb->s_root != NULL) { - prune_one_dentry(dentry); - up_read(s_umount); - continue; - } - up_read(s_umount); - } - spin_unlock(&dentry->d_lock); - /* Cannot remove the first dentry, and it isn't appropriate - * to move it to the head of the list, so give up, and try - * later - */ - break; + prune_one_dentry(dentry); } spin_unlock(&dcache_lock); } @@ -525,7 +466,8 @@ void shrink_dcache_sb(struct super_block * sb) dentry = list_entry(tmp, struct dentry, d_lru); if (dentry->d_sb != sb) continue; - list_move(tmp, &dentry_unused); + list_del(tmp); + list_add(tmp, &dentry_unused); } /* @@ -544,142 +486,11 @@ repeat: continue; } prune_one_dentry(dentry); - cond_resched_lock(&dcache_lock); goto repeat; } spin_unlock(&dcache_lock); } -/* - * destroy a single subtree of dentries for unmount - * - see the comments on shrink_dcache_for_umount() for a description of the - * locking - */ -static void shrink_dcache_for_umount_subtree(struct dentry *dentry) -{ - struct dentry *parent; - - BUG_ON(!IS_ROOT(dentry)); - - /* detach this root from the system */ - spin_lock(&dcache_lock); - if (!list_empty(&dentry->d_lru)) { - dentry_stat.nr_unused--; - list_del_init(&dentry->d_lru); - } - __d_drop(dentry); - spin_unlock(&dcache_lock); - - for (;;) { - /* descend to the first leaf in the current subtree */ - while (!list_empty(&dentry->d_subdirs)) { - struct dentry *loop; - - /* this is a branch with children - detach all of them - * from the system in one go */ - spin_lock(&dcache_lock); - list_for_each_entry(loop, &dentry->d_subdirs, - d_u.d_child) { - if (!list_empty(&loop->d_lru)) { - dentry_stat.nr_unused--; - list_del_init(&loop->d_lru); - } - - __d_drop(loop); - cond_resched_lock(&dcache_lock); - } - spin_unlock(&dcache_lock); - - /* move to the first child */ - dentry = list_entry(dentry->d_subdirs.next, - struct dentry, d_u.d_child); - } - - /* consume the dentries from this leaf up through its parents - * until we find one with children or run out altogether */ - do { - struct inode *inode; - - if (atomic_read(&dentry->d_count) != 0) { - printk(KERN_ERR - "BUG: Dentry %p{i=%lx,n=%s}" - " still in use (%d)" - " [unmount of %s %s]\n", - dentry, - dentry->d_inode ? - dentry->d_inode->i_ino : 0UL, - dentry->d_name.name, - atomic_read(&dentry->d_count), - dentry->d_sb->s_type->name, - dentry->d_sb->s_id); - BUG(); - } - - parent = dentry->d_parent; - if (parent == dentry) - parent = NULL; - else - atomic_dec(&parent->d_count); - - list_del(&dentry->d_u.d_child); - dentry_stat.nr_dentry--; /* For d_free, below */ - - inode = dentry->d_inode; - if (inode) { - dentry->d_inode = NULL; - list_del_init(&dentry->d_alias); - if (dentry->d_op && dentry->d_op->d_iput) - dentry->d_op->d_iput(dentry, inode); - else - iput(inode); - } - - d_free(dentry); - - /* finished when we fall off the top of the tree, - * otherwise we ascend to the parent and move to the - * next sibling if there is one */ - if (!parent) - return; - - dentry = parent; - - } while (list_empty(&dentry->d_subdirs)); - - dentry = list_entry(dentry->d_subdirs.next, - struct dentry, d_u.d_child); - } -} - -/* - * destroy the dentries attached to a superblock on unmounting - * - we don't need to use dentry->d_lock, and only need dcache_lock when - * removing the dentry from the system lists and hashes because: - * - the superblock is detached from all mountings and open files, so the - * dentry trees will not be rearranged by the VFS - * - s_umount is write-locked, so the memory pressure shrinker will ignore - * any dentries belonging to this superblock that it comes across - * - the filesystem itself is no longer permitted to rearrange the dentries - * in this superblock - */ -void shrink_dcache_for_umount(struct super_block *sb) -{ - struct dentry *dentry; - - if (down_read_trylock(&sb->s_umount)) - BUG(); - - dentry = sb->s_root; - sb->s_root = NULL; - atomic_dec(&dentry->d_count); - shrink_dcache_for_umount_subtree(dentry); - - while (!hlist_empty(&sb->s_anon)) { - dentry = hlist_entry(sb->s_anon.first, struct dentry, d_hash); - shrink_dcache_for_umount_subtree(dentry); - } -} - /* * Search for at least 1 mount point in the dentry's subdirs. * We descend to the next level whenever the d_subdirs @@ -770,7 +581,7 @@ resume: * of the unused list for prune_dcache */ if (!atomic_read(&dentry->d_count)) { - list_add_tail(&dentry->d_lru, &dentry_unused); + list_add(&dentry->d_lru, dentry_unused.prev); dentry_stat.nr_unused++; found++; } @@ -788,6 +599,10 @@ resume: */ if (!list_empty(&dentry->d_subdirs)) { this_parent = dentry; +#ifdef DCACHE_DEBUG +printk(KERN_DEBUG "select_parent: descending to %s/%s, found=%d\n", +dentry->d_parent->d_name.name, dentry->d_name.name, found); +#endif goto repeat; } } @@ -797,6 +612,10 @@ resume: if (this_parent != parent) { next = this_parent->d_u.d_child.next; this_parent = this_parent->d_parent; +#ifdef DCACHE_DEBUG +printk(KERN_DEBUG "select_parent: ascending to %s/%s, found=%d\n", +this_parent->d_parent->d_name.name, this_parent->d_name.name, found); +#endif goto resume; } out: @@ -816,7 +635,46 @@ void shrink_dcache_parent(struct dentry * parent) int found; while ((found = select_parent(parent)) != 0) - prune_dcache(found, parent->d_sb); + prune_dcache(found); +} + +/** + * shrink_dcache_anon - further prune the cache + * @head: head of d_hash list of dentries to prune + * + * Prune the dentries that are anonymous + * + * parsing d_hash list does not hlist_for_each_entry_rcu() as it + * done under dcache_lock. + * + */ +void shrink_dcache_anon(struct hlist_head *head) +{ + struct hlist_node *lp; + int found; + do { + found = 0; + spin_lock(&dcache_lock); + hlist_for_each(lp, head) { + struct dentry *this = hlist_entry(lp, struct dentry, d_hash); + if (!list_empty(&this->d_lru)) { + dentry_stat.nr_unused--; + list_del_init(&this->d_lru); + } + + /* + * move only zero ref count dentries to the end + * of the unused list for prune_dcache + */ + if (!atomic_read(&this->d_count)) { + list_add_tail(&this->d_lru, &dentry_unused); + dentry_stat.nr_unused++; + found++; + } + } + spin_unlock(&dcache_lock); + prune_dcache(found); + } while(found); } /* @@ -836,7 +694,7 @@ static int shrink_dcache_memory(int nr, gfp_t gfp_mask) if (nr) { if (!(gfp_mask & __GFP_FS)) return -1; - prune_dcache(nr, NULL); + prune_dcache(nr); } return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; } @@ -884,7 +742,6 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) dentry->d_sb = NULL; dentry->d_op = NULL; dentry->d_fsdata = NULL; - dentry->d_extra_attributes = NULL; dentry->d_mounted = 0; #ifdef CONFIG_PROFILING dentry->d_cookie = NULL; @@ -937,12 +794,11 @@ struct dentry *d_alloc_name(struct dentry *parent, const char *name) void d_instantiate(struct dentry *entry, struct inode * inode) { - BUG_ON(!list_empty(&entry->d_alias)); + if (!list_empty(&entry->d_alias)) BUG(); spin_lock(&dcache_lock); if (inode) list_add(&entry->d_alias, &inode->i_dentry); entry->d_inode = inode; - fsnotify_d_instantiate(entry, inode); spin_unlock(&dcache_lock); security_d_instantiate(entry, inode); } @@ -963,19 +819,17 @@ void d_instantiate(struct dentry *entry, struct inode * inode) * (or otherwise set) by the caller to indicate that it is now * in use by the dcache. */ -static struct dentry *__d_instantiate_unique(struct dentry *entry, - struct inode *inode) +struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) { struct dentry *alias; int len = entry->d_name.len; const char *name = entry->d_name.name; unsigned int hash = entry->d_name.hash; - if (!inode) { - entry->d_inode = NULL; - return NULL; - } - + BUG_ON(!list_empty(&entry->d_alias)); + spin_lock(&dcache_lock); + if (!inode) + goto do_negative; list_for_each_entry(alias, &inode->i_dentry, d_alias) { struct qstr *qstr = &alias->d_name; @@ -988,35 +842,18 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry, if (memcmp(qstr->name, name, len)) continue; dget_locked(alias); + spin_unlock(&dcache_lock); + BUG_ON(!d_unhashed(alias)); + iput(inode); return alias; } - list_add(&entry->d_alias, &inode->i_dentry); +do_negative: entry->d_inode = inode; - fsnotify_d_instantiate(entry, inode); - return NULL; -} - -struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) -{ - struct dentry *result; - - BUG_ON(!list_empty(&entry->d_alias)); - - spin_lock(&dcache_lock); - result = __d_instantiate_unique(entry, inode); spin_unlock(&dcache_lock); - - if (!result) { - security_d_instantiate(entry, inode); - return NULL; - } - - BUG_ON(!d_unhashed(result)); - iput(inode); - return result; + security_d_instantiate(entry, inode); + return NULL; } - EXPORT_SYMBOL(d_instantiate_unique); /** @@ -1143,7 +980,6 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) new = __d_find_alias(inode, 1); if (new) { BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); - fsnotify_d_instantiate(new, inode); spin_unlock(&dcache_lock); security_d_instantiate(new, inode); d_rehash(dentry); @@ -1153,7 +989,6 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) /* d_instantiate takes dcache_lock, so we do it by hand */ list_add(&dentry->d_alias, &inode->i_dentry); dentry->d_inode = inode; - fsnotify_d_instantiate(dentry, inode); spin_unlock(&dcache_lock); security_d_instantiate(dentry, inode); d_rehash(dentry); @@ -1266,32 +1101,6 @@ next: return found; } -/** - * d_hash_and_lookup - hash the qstr then search for a dentry - * @dir: Directory to search in - * @name: qstr of name we wish to find - * - * On hash failure or on lookup failure NULL is returned. - */ -struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) -{ - struct dentry *dentry = NULL; - - /* - * Check for a fs-specific hash function. Note that we must - * calculate the standard hash first, as the d_op->d_hash() - * routine may choose to leave the hash value unchanged. - */ - name->hash = full_name_hash(name->name, name->len); - if (dir->d_op && dir->d_op->d_hash) { - if (dir->d_op->d_hash(dir, name) < 0) - goto out; - } - dentry = d_lookup(dir, name); -out: - return dentry; -} - /** * d_validate - verify dentry provided from insecure source * @dentry: The dentry alleged to be valid child of @dparent @@ -1366,9 +1175,6 @@ void d_delete(struct dentry * dentry) if (atomic_read(&dentry->d_count) == 1) { dentry_iput(dentry); fsnotify_nameremove(dentry, isdir); - - /* remove this and other inotify debug checks after 2.6.18 */ - dentry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED; return; } @@ -1388,11 +1194,6 @@ static void __d_rehash(struct dentry * entry, struct hlist_head *list) hlist_add_head_rcu(&entry->d_hash, list); } -static void _d_rehash(struct dentry * entry) -{ - __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash)); -} - /** * d_rehash - add an entry back to the hash * @entry: dentry to add to the hash @@ -1402,9 +1203,11 @@ static void _d_rehash(struct dentry * entry) void d_rehash(struct dentry * entry) { + struct hlist_head *list = d_hash(entry->d_parent, entry->d_name.hash); + spin_lock(&dcache_lock); spin_lock(&entry->d_lock); - _d_rehash(entry); + __d_rehash(entry, list); spin_unlock(&entry->d_lock); spin_unlock(&dcache_lock); } @@ -1495,10 +1298,10 @@ void d_move(struct dentry * dentry, struct dentry * target) */ if (target < dentry) { spin_lock(&target->d_lock); - spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); + spin_lock(&dentry->d_lock); } else { spin_lock(&dentry->d_lock); - spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED); + spin_lock(&target->d_lock); } /* Move the dentry to the target hash queue, if on different bucket */ @@ -1514,16 +1317,6 @@ already_unhashed: /* Unhash the target: dput() will then get rid of it */ __d_drop(target); - /* flush any possible attributes */ - if (dentry->d_extra_attributes) { - kfree(dentry->d_extra_attributes); - dentry->d_extra_attributes = NULL; - } - if (target->d_extra_attributes) { - kfree(target->d_extra_attributes); - target->d_extra_attributes = NULL; - } - list_del(&dentry->d_u.d_child); list_del(&target->d_u.d_child); @@ -1546,126 +1339,11 @@ already_unhashed: list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); spin_unlock(&target->d_lock); - fsnotify_d_move(dentry); spin_unlock(&dentry->d_lock); write_sequnlock(&rename_lock); spin_unlock(&dcache_lock); } -/* - * Prepare an anonymous dentry for life in the superblock's dentry tree as a - * named dentry in place of the dentry to be replaced. - */ -static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) -{ - struct dentry *dparent, *aparent; - - switch_names(dentry, anon); - do_switch(dentry->d_name.len, anon->d_name.len); - do_switch(dentry->d_name.hash, anon->d_name.hash); - - dparent = dentry->d_parent; - aparent = anon->d_parent; - - dentry->d_parent = (aparent == anon) ? dentry : aparent; - list_del(&dentry->d_u.d_child); - if (!IS_ROOT(dentry)) - list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); - else - INIT_LIST_HEAD(&dentry->d_u.d_child); - - anon->d_parent = (dparent == dentry) ? anon : dparent; - list_del(&anon->d_u.d_child); - if (!IS_ROOT(anon)) - list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs); - else - INIT_LIST_HEAD(&anon->d_u.d_child); - - anon->d_flags &= ~DCACHE_DISCONNECTED; -} - -/** - * d_materialise_unique - introduce an inode into the tree - * @dentry: candidate dentry - * @inode: inode to bind to the dentry, to which aliases may be attached - * - * Introduces an dentry into the tree, substituting an extant disconnected - * root directory alias in its place if there is one - */ -struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) -{ - struct dentry *alias, *actual; - - BUG_ON(!d_unhashed(dentry)); - - spin_lock(&dcache_lock); - - if (!inode) { - actual = dentry; - dentry->d_inode = NULL; - goto found_lock; - } - - /* See if a disconnected directory already exists as an anonymous root - * that we should splice into the tree instead */ - if (S_ISDIR(inode->i_mode) && (alias = __d_find_alias(inode, 1))) { - spin_lock(&alias->d_lock); - - /* Is this a mountpoint that we could splice into our tree? */ - if (IS_ROOT(alias)) - goto connect_mountpoint; - - if (alias->d_name.len == dentry->d_name.len && - alias->d_parent == dentry->d_parent && - memcmp(alias->d_name.name, - dentry->d_name.name, - dentry->d_name.len) == 0) - goto replace_with_alias; - - spin_unlock(&alias->d_lock); - - /* Doh! Seem to be aliasing directories for some reason... */ - dput(alias); - } - - /* Add a unique reference */ - actual = __d_instantiate_unique(dentry, inode); - if (!actual) - actual = dentry; - else if (unlikely(!d_unhashed(actual))) - goto shouldnt_be_hashed; - -found_lock: - spin_lock(&actual->d_lock); -found: - _d_rehash(actual); - spin_unlock(&actual->d_lock); - spin_unlock(&dcache_lock); - - if (actual == dentry) { - security_d_instantiate(dentry, inode); - return NULL; - } - - iput(inode); - return actual; - - /* Convert the anonymous/root alias into an ordinary dentry */ -connect_mountpoint: - __d_materialise_dentry(dentry, alias); - - /* Replace the candidate dentry with the alias in the tree */ -replace_with_alias: - __d_drop(alias); - actual = alias; - goto found; - -shouldnt_be_hashed: - spin_unlock(&dcache_lock); - BUG(); - goto shouldnt_be_hashed; -} - /** * d_path - return the path of a dentry * @dentry: dentry to report @@ -1682,7 +1360,7 @@ shouldnt_be_hashed: * * "buflen" should be positive. Caller holds the dcache_lock. */ -char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt, +static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt, struct dentry *root, struct vfsmount *rootmnt, char *buffer, int buflen) { @@ -1750,8 +1428,6 @@ Elong: return ERR_PTR(-ENAMETOOLONG); } -EXPORT_SYMBOL_GPL(__d_path); - /* write full pathname into buffer and return start of pathname */ char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt, char *buf, int buflen) @@ -1937,12 +1613,26 @@ ino_t find_inode_number(struct dentry *dir, struct qstr *name) struct dentry * dentry; ino_t ino = 0; - dentry = d_hash_and_lookup(dir, name); - if (dentry) { + /* + * Check for a fs-specific hash function. Note that we must + * calculate the standard hash first, as the d_op->d_hash() + * routine may choose to leave the hash value unchanged. + */ + name->hash = full_name_hash(name->name, name->len); + if (dir->d_op && dir->d_op->d_hash) + { + if (dir->d_op->d_hash(dir, name) != 0) + goto out; + } + + dentry = d_lookup(dir, name); + if (dentry) + { if (dentry->d_inode) ino = dentry->d_inode->i_ino; dput(dentry); } +out: return ino; } @@ -1980,23 +1670,6 @@ static void __init dcache_init_early(void) INIT_HLIST_HEAD(&dentry_hashtable[loop]); } -void flush_dentry_attributes (void) -{ - struct hlist_node *tmp; - struct dentry *dentry; - int i; - - spin_lock(&dcache_lock); - for (i = 0; i <= d_hash_mask; i++) - hlist_for_each_entry(dentry, tmp, dentry_hashtable+i, d_hash) { - kfree(dentry->d_extra_attributes); - dentry->d_extra_attributes = NULL; - } - spin_unlock(&dcache_lock); -} - -EXPORT_SYMBOL_GPL(flush_dentry_attributes); - static void __init dcache_init(unsigned long mempages) { int loop; @@ -2009,8 +1682,7 @@ static void __init dcache_init(unsigned long mempages) dentry_cache = kmem_cache_create("dentry_cache", sizeof(struct dentry), 0, - (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| - SLAB_MEM_SPREAD), + SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, NULL, NULL); set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory); @@ -2034,10 +1706,10 @@ static void __init dcache_init(unsigned long mempages) } /* SLAB cache for __getname() consumers */ -kmem_cache_t *names_cachep __read_mostly; +kmem_cache_t *names_cachep; /* SLAB cache for file structures */ -kmem_cache_t *filp_cachep __read_mostly; +kmem_cache_t *filp_cachep; EXPORT_SYMBOL(d_genocide); @@ -2083,7 +1755,6 @@ EXPORT_SYMBOL(d_instantiate); EXPORT_SYMBOL(d_invalidate); EXPORT_SYMBOL(d_lookup); EXPORT_SYMBOL(d_move); -EXPORT_SYMBOL_GPL(d_materialise_unique); EXPORT_SYMBOL(d_path); EXPORT_SYMBOL(d_prune_aliases); EXPORT_SYMBOL(d_rehash);