4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
10 * Notes on the allocation strategy:
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
17 #include <linux/config.h>
18 #include <linux/string.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/smp_lock.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/module.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <asm/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
33 #include <linux/bootmem.h>
35 /* #define DCACHE_DEBUG 1 */
37 int sysctl_vfs_cache_pressure = 100;
39 spinlock_t dcache_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
40 seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
42 EXPORT_SYMBOL(dcache_lock);
44 static kmem_cache_t *dentry_cache;
46 #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
49 * This is the single most critical data structure when it comes
50 * to the dcache: the hashtable for lookups. Somebody should try
51 * to make this good - I've just made it work.
53 * This hash-function tries to avoid losing too many bits of hash
54 * information, yet avoid using a prime hash-size or similar.
56 #define D_HASHBITS d_hash_shift
57 #define D_HASHMASK d_hash_mask
59 static unsigned int d_hash_mask;
60 static unsigned int d_hash_shift;
61 static struct hlist_head *dentry_hashtable;
62 static LIST_HEAD(dentry_unused);
64 static void prune_dcache(int count);
67 /* Statistics gathering. */
68 struct dentry_stat_t dentry_stat = {
72 static void d_callback(struct rcu_head *head)
74 struct dentry * dentry = container_of(head, struct dentry, d_rcu);
76 if (dname_external(dentry))
77 kfree(dentry->d_name.name);
78 kmem_cache_free(dentry_cache, dentry);
82 * no dcache_lock, please. The caller must decrement dentry_stat.nr_dentry
85 static void d_free(struct dentry *dentry)
87 if (dentry->d_op && dentry->d_op->d_release)
88 dentry->d_op->d_release(dentry);
89 if (dentry->d_extra_attributes) {
90 kfree(dentry->d_extra_attributes);
91 dentry->d_extra_attributes = NULL;
93 call_rcu(&dentry->d_rcu, d_callback);
97 * Release the dentry's inode, using the filesystem
98 * d_iput() operation if defined.
99 * Called with dcache_lock and per dentry lock held, drops both.
101 static inline void dentry_iput(struct dentry * dentry)
103 struct inode *inode = dentry->d_inode;
105 dentry->d_inode = NULL;
106 list_del_init(&dentry->d_alias);
107 spin_unlock(&dentry->d_lock);
108 spin_unlock(&dcache_lock);
109 if (dentry->d_op && dentry->d_op->d_iput)
110 dentry->d_op->d_iput(dentry, inode);
114 spin_unlock(&dentry->d_lock);
115 spin_unlock(&dcache_lock);
122 * This is complicated by the fact that we do not want to put
123 * dentries that are no longer on any hash chain on the unused
124 * list: we'd much rather just get rid of them immediately.
126 * However, that implies that we have to traverse the dentry
127 * tree upwards to the parents which might _also_ now be
128 * scheduled for deletion (it may have been only waiting for
129 * its last child to go away).
131 * This tail recursion is done by hand as we don't want to depend
132 * on the compiler to always get this right (gcc generally doesn't).
133 * Real recursion would eat up our stack space.
137 * dput - release a dentry
138 * @dentry: dentry to release
140 * Release a dentry. This will drop the usage count and if appropriate
141 * call the dentry unlink method as well as removing it from the queues and
142 * releasing its resources. If the parent dentries were scheduled for release
143 * they too may now get deleted.
145 * no dcache lock, please.
148 void dput(struct dentry *dentry)
154 if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock))
157 spin_lock(&dentry->d_lock);
158 if (atomic_read(&dentry->d_count)) {
159 spin_unlock(&dentry->d_lock);
160 spin_unlock(&dcache_lock);
165 * AV: ->d_delete() is _NOT_ allowed to block now.
167 if (dentry->d_op && dentry->d_op->d_delete) {
168 if (dentry->d_op->d_delete(dentry))
171 /* Unreachable? Get rid of it */
172 if (d_unhashed(dentry))
174 if (list_empty(&dentry->d_lru)) {
175 dentry->d_flags |= DCACHE_REFERENCED;
176 list_add(&dentry->d_lru, &dentry_unused);
177 dentry_stat.nr_unused++;
179 spin_unlock(&dentry->d_lock);
180 spin_unlock(&dcache_lock);
187 struct dentry *parent;
189 /* If dentry was on d_lru list
190 * delete it from there
192 if (!list_empty(&dentry->d_lru)) {
193 list_del(&dentry->d_lru);
194 dentry_stat.nr_unused--;
196 list_del(&dentry->d_child);
197 dentry_stat.nr_dentry--; /* For d_free, below */
198 /*drops the locks, at that point nobody can reach this dentry */
200 parent = dentry->d_parent;
202 if (dentry == parent)
210 * d_invalidate - invalidate a dentry
211 * @dentry: dentry to invalidate
213 * Try to invalidate the dentry if it turns out to be
214 * possible. If there are other dentries that can be
215 * reached through this one we can't delete it and we
216 * return -EBUSY. On success we return 0.
221 int d_invalidate(struct dentry * dentry)
224 * If it's already been dropped, return OK.
226 spin_lock(&dcache_lock);
227 if (d_unhashed(dentry)) {
228 spin_unlock(&dcache_lock);
232 * Check whether to do a partial shrink_dcache
233 * to get rid of unused child entries.
235 if (!list_empty(&dentry->d_subdirs)) {
236 spin_unlock(&dcache_lock);
237 shrink_dcache_parent(dentry);
238 spin_lock(&dcache_lock);
242 * Somebody else still using it?
244 * If it's a directory, we can't drop it
245 * for fear of somebody re-populating it
246 * with children (even though dropping it
247 * would make it unreachable from the root,
248 * we might still populate it if it was a
249 * working directory or similar).
251 spin_lock(&dentry->d_lock);
252 if (atomic_read(&dentry->d_count) > 1) {
253 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
254 spin_unlock(&dentry->d_lock);
255 spin_unlock(&dcache_lock);
261 spin_unlock(&dentry->d_lock);
262 spin_unlock(&dcache_lock);
266 /* This should be called _only_ with dcache_lock held */
268 static inline struct dentry * __dget_locked(struct dentry *dentry)
270 atomic_inc(&dentry->d_count);
271 if (!list_empty(&dentry->d_lru)) {
272 dentry_stat.nr_unused--;
273 list_del_init(&dentry->d_lru);
278 struct dentry * dget_locked(struct dentry *dentry)
280 return __dget_locked(dentry);
284 * d_find_alias - grab a hashed alias of inode
285 * @inode: inode in question
287 * If inode has a hashed alias - acquire the reference to alias and
288 * return it. Otherwise return NULL. Notice that if inode is a directory
289 * there can be only one alias and it can be unhashed only if it has
292 * If the inode has a DCACHE_DISCONNECTED alias, then prefer
293 * any other hashed alias over that one.
296 struct dentry * d_find_alias(struct inode *inode)
298 struct list_head *head, *next, *tmp;
299 struct dentry *alias, *discon_alias=NULL;
301 spin_lock(&dcache_lock);
302 head = &inode->i_dentry;
303 next = inode->i_dentry.next;
304 while (next != head) {
308 alias = list_entry(tmp, struct dentry, d_alias);
309 if (!d_unhashed(alias)) {
310 if (alias->d_flags & DCACHE_DISCONNECTED)
311 discon_alias = alias;
313 __dget_locked(alias);
314 spin_unlock(&dcache_lock);
320 __dget_locked(discon_alias);
321 spin_unlock(&dcache_lock);
326 * Try to kill dentries associated with this inode.
327 * WARNING: you must own a reference to inode.
329 void d_prune_aliases(struct inode *inode)
331 struct list_head *tmp, *head = &inode->i_dentry;
333 spin_lock(&dcache_lock);
335 while ((tmp = tmp->next) != head) {
336 struct dentry *dentry = list_entry(tmp, struct dentry, d_alias);
337 if (!atomic_read(&dentry->d_count)) {
338 __dget_locked(dentry);
340 spin_unlock(&dcache_lock);
345 spin_unlock(&dcache_lock);
349 * Throw away a dentry - free the inode, dput the parent.
350 * This requires that the LRU list has already been
352 * Called with dcache_lock, drops it and then regains.
354 static inline void prune_one_dentry(struct dentry * dentry)
356 struct dentry * parent;
359 list_del(&dentry->d_child);
360 dentry_stat.nr_dentry--; /* For d_free, below */
362 parent = dentry->d_parent;
364 if (parent != dentry)
366 spin_lock(&dcache_lock);
370 * prune_dcache - shrink the dcache
371 * @count: number of entries to try and free
373 * Shrink the dcache. This is done when we need
374 * more memory, or simply when we need to unmount
375 * something (at which point we need to unuse
378 * This function may fail to free any resources if
379 * all the dentries are in use.
382 static void prune_dcache(int count)
384 spin_lock(&dcache_lock);
385 for (; count ; count--) {
386 struct dentry *dentry;
387 struct list_head *tmp;
389 cond_resched_lock(&dcache_lock);
391 tmp = dentry_unused.prev;
392 if (tmp == &dentry_unused)
395 prefetch(dentry_unused.prev);
396 dentry_stat.nr_unused--;
397 dentry = list_entry(tmp, struct dentry, d_lru);
399 spin_lock(&dentry->d_lock);
401 * We found an inuse dentry which was not removed from
402 * dentry_unused because of laziness during lookup. Do not free
403 * it - just keep it off the dentry_unused list.
405 if (atomic_read(&dentry->d_count)) {
406 spin_unlock(&dentry->d_lock);
409 /* If the dentry was recently referenced, don't free it. */
410 if (dentry->d_flags & DCACHE_REFERENCED) {
411 dentry->d_flags &= ~DCACHE_REFERENCED;
412 list_add(&dentry->d_lru, &dentry_unused);
413 dentry_stat.nr_unused++;
414 spin_unlock(&dentry->d_lock);
417 prune_one_dentry(dentry);
419 spin_unlock(&dcache_lock);
423 * Shrink the dcache for the specified super block.
424 * This allows us to unmount a device without disturbing
425 * the dcache for the other devices.
427 * This implementation makes just two traversals of the
428 * unused list. On the first pass we move the selected
429 * dentries to the most recent end, and on the second
430 * pass we free them. The second pass must restart after
431 * each dput(), but since the target dentries are all at
432 * the end, it's really just a single traversal.
436 * shrink_dcache_sb - shrink dcache for a superblock
439 * Shrink the dcache for the specified super block. This
440 * is used to free the dcache before unmounting a file
444 void shrink_dcache_sb(struct super_block * sb)
446 struct list_head *tmp, *next;
447 struct dentry *dentry;
450 * Pass one ... move the dentries for the specified
451 * superblock to the most recent end of the unused list.
453 spin_lock(&dcache_lock);
454 next = dentry_unused.next;
455 while (next != &dentry_unused) {
458 dentry = list_entry(tmp, struct dentry, d_lru);
459 if (dentry->d_sb != sb)
462 list_add(tmp, &dentry_unused);
466 * Pass two ... free the dentries for this superblock.
469 next = dentry_unused.next;
470 while (next != &dentry_unused) {
473 dentry = list_entry(tmp, struct dentry, d_lru);
474 if (dentry->d_sb != sb)
476 dentry_stat.nr_unused--;
478 spin_lock(&dentry->d_lock);
479 if (atomic_read(&dentry->d_count)) {
480 spin_unlock(&dentry->d_lock);
483 prune_one_dentry(dentry);
486 spin_unlock(&dcache_lock);
490 * Search for at least 1 mount point in the dentry's subdirs.
491 * We descend to the next level whenever the d_subdirs
492 * list is non-empty and continue searching.
496 * have_submounts - check for mounts over a dentry
497 * @parent: dentry to check.
499 * Return true if the parent or its subdirectories contain
503 int have_submounts(struct dentry *parent)
505 struct dentry *this_parent = parent;
506 struct list_head *next;
508 spin_lock(&dcache_lock);
509 if (d_mountpoint(parent))
512 next = this_parent->d_subdirs.next;
514 while (next != &this_parent->d_subdirs) {
515 struct list_head *tmp = next;
516 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
518 /* Have we found a mount point ? */
519 if (d_mountpoint(dentry))
521 if (!list_empty(&dentry->d_subdirs)) {
522 this_parent = dentry;
527 * All done at this level ... ascend and resume the search.
529 if (this_parent != parent) {
530 next = this_parent->d_child.next;
531 this_parent = this_parent->d_parent;
534 spin_unlock(&dcache_lock);
535 return 0; /* No mount points found in tree */
537 spin_unlock(&dcache_lock);
542 * Search the dentry child list for the specified parent,
543 * and move any unused dentries to the end of the unused
544 * list for prune_dcache(). We descend to the next level
545 * whenever the d_subdirs list is non-empty and continue
548 static int select_parent(struct dentry * parent)
550 struct dentry *this_parent = parent;
551 struct list_head *next;
554 spin_lock(&dcache_lock);
556 next = this_parent->d_subdirs.next;
558 while (next != &this_parent->d_subdirs) {
559 struct list_head *tmp = next;
560 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
563 if (!list_empty(&dentry->d_lru)) {
564 dentry_stat.nr_unused--;
565 list_del_init(&dentry->d_lru);
568 * move only zero ref count dentries to the end
569 * of the unused list for prune_dcache
571 if (!atomic_read(&dentry->d_count)) {
572 list_add(&dentry->d_lru, dentry_unused.prev);
573 dentry_stat.nr_unused++;
577 * Descend a level if the d_subdirs list is non-empty.
579 if (!list_empty(&dentry->d_subdirs)) {
580 this_parent = dentry;
582 printk(KERN_DEBUG "select_parent: descending to %s/%s, found=%d\n",
583 dentry->d_parent->d_name.name, dentry->d_name.name, found);
589 * All done at this level ... ascend and resume the search.
591 if (this_parent != parent) {
592 next = this_parent->d_child.next;
593 this_parent = this_parent->d_parent;
595 printk(KERN_DEBUG "select_parent: ascending to %s/%s, found=%d\n",
596 this_parent->d_parent->d_name.name, this_parent->d_name.name, found);
600 spin_unlock(&dcache_lock);
605 * shrink_dcache_parent - prune dcache
606 * @parent: parent of entries to prune
608 * Prune the dcache to remove unused children of the parent dentry.
611 void shrink_dcache_parent(struct dentry * parent)
615 while ((found = select_parent(parent)) != 0)
620 * shrink_dcache_anon - further prune the cache
621 * @head: head of d_hash list of dentries to prune
623 * Prune the dentries that are anonymous
625 * parsing d_hash list does not read_barrier_depends() as it
626 * done under dcache_lock.
629 void shrink_dcache_anon(struct hlist_head *head)
631 struct hlist_node *lp;
635 spin_lock(&dcache_lock);
636 hlist_for_each(lp, head) {
637 struct dentry *this = hlist_entry(lp, struct dentry, d_hash);
638 if (!list_empty(&this->d_lru)) {
639 dentry_stat.nr_unused--;
640 list_del_init(&this->d_lru);
644 * move only zero ref count dentries to the end
645 * of the unused list for prune_dcache
647 if (!atomic_read(&this->d_count)) {
648 list_add_tail(&this->d_lru, &dentry_unused);
649 dentry_stat.nr_unused++;
653 spin_unlock(&dcache_lock);
659 * Scan `nr' dentries and return the number which remain.
661 * We need to avoid reentering the filesystem if the caller is performing a
662 * GFP_NOFS allocation attempt. One example deadlock is:
664 * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache->
665 * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->put_inode->
666 * ext2_discard_prealloc->ext2_free_blocks->lock_super->DEADLOCK.
668 * In this case we return -1 to tell the caller that we baled.
670 static int shrink_dcache_memory(int nr, unsigned int gfp_mask)
673 if (!(gfp_mask & __GFP_FS))
677 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
681 * d_alloc - allocate a dcache entry
682 * @parent: parent of entry to allocate
683 * @name: qstr of the name
685 * Allocates a dentry. It returns %NULL if there is insufficient memory
686 * available. On a success the dentry is returned. The name passed in is
687 * copied and the copy passed in may be reused after this call.
690 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
692 struct dentry *dentry;
695 #define DENTRY_UNUSED_THRESHOLD 30000
696 #define DENTRY_BATCH_COUNT 32
698 if (dentry_stat.nr_unused > DENTRY_UNUSED_THRESHOLD) {
700 spin_lock(&dcache_lock);
701 if (dentry_stat.nr_unused < DENTRY_UNUSED_THRESHOLD)
703 spin_unlock(&dcache_lock);
705 prune_dcache(DENTRY_BATCH_COUNT);
708 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
712 if (name->len > DNAME_INLINE_LEN-1) {
713 dname = kmalloc(name->len + 1, GFP_KERNEL);
715 kmem_cache_free(dentry_cache, dentry);
719 dname = dentry->d_iname;
721 dentry->d_name.name = dname;
723 dentry->d_name.len = name->len;
724 dentry->d_name.hash = name->hash;
725 memcpy(dname, name->name, name->len);
726 dname[name->len] = 0;
728 atomic_set(&dentry->d_count, 1);
729 dentry->d_flags = DCACHE_UNHASHED;
730 dentry->d_lock = SPIN_LOCK_UNLOCKED;
731 dentry->d_inode = NULL;
732 dentry->d_parent = NULL;
735 dentry->d_fsdata = NULL;
736 dentry->d_extra_attributes = NULL;
737 dentry->d_mounted = 0;
738 dentry->d_cookie = NULL;
739 dentry->d_bucket = NULL;
740 INIT_HLIST_NODE(&dentry->d_hash);
741 INIT_LIST_HEAD(&dentry->d_lru);
742 INIT_LIST_HEAD(&dentry->d_subdirs);
743 INIT_LIST_HEAD(&dentry->d_alias);
746 dentry->d_parent = dget(parent);
747 dentry->d_sb = parent->d_sb;
749 INIT_LIST_HEAD(&dentry->d_child);
752 spin_lock(&dcache_lock);
754 list_add(&dentry->d_child, &parent->d_subdirs);
755 dentry_stat.nr_dentry++;
756 spin_unlock(&dcache_lock);
762 * d_instantiate - fill in inode information for a dentry
763 * @entry: dentry to complete
764 * @inode: inode to attach to this dentry
766 * Fill in inode information in the entry.
768 * This turns negative dentries into productive full members
771 * NOTE! This assumes that the inode count has been incremented
772 * (or otherwise set) by the caller to indicate that it is now
773 * in use by the dcache.
776 void d_instantiate(struct dentry *entry, struct inode * inode)
778 if (!list_empty(&entry->d_alias)) BUG();
779 spin_lock(&dcache_lock);
781 list_add(&entry->d_alias, &inode->i_dentry);
782 entry->d_inode = inode;
783 spin_unlock(&dcache_lock);
784 security_d_instantiate(entry, inode);
788 * d_alloc_root - allocate root dentry
789 * @root_inode: inode to allocate the root for
791 * Allocate a root ("/") dentry for the inode given. The inode is
792 * instantiated and returned. %NULL is returned if there is insufficient
793 * memory or the inode passed is %NULL.
796 struct dentry * d_alloc_root(struct inode * root_inode)
798 struct dentry *res = NULL;
801 static const struct qstr name = { .name = "/", .len = 1 };
803 res = d_alloc(NULL, &name);
805 res->d_sb = root_inode->i_sb;
807 d_instantiate(res, root_inode);
813 static inline struct hlist_head *d_hash(struct dentry *parent,
816 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
817 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
818 return dentry_hashtable + (hash & D_HASHMASK);
822 * d_alloc_anon - allocate an anonymous dentry
823 * @inode: inode to allocate the dentry for
825 * This is similar to d_alloc_root. It is used by filesystems when
826 * creating a dentry for a given inode, often in the process of
827 * mapping a filehandle to a dentry. The returned dentry may be
828 * anonymous, or may have a full name (if the inode was already
829 * in the cache). The file system may need to make further
830 * efforts to connect this dentry into the dcache properly.
832 * When called on a directory inode, we must ensure that
833 * the inode only ever has one dentry. If a dentry is
834 * found, that is returned instead of allocating a new one.
836 * On successful return, the reference to the inode has been transferred
837 * to the dentry. If %NULL is returned (indicating kmalloc failure),
838 * the reference on the inode has not been released.
841 struct dentry * d_alloc_anon(struct inode *inode)
843 static const struct qstr anonstring = { .name = "" };
847 if ((res = d_find_alias(inode))) {
852 tmp = d_alloc(NULL, &anonstring);
856 tmp->d_parent = tmp; /* make sure dput doesn't croak */
858 spin_lock(&dcache_lock);
859 if (S_ISDIR(inode->i_mode) && !list_empty(&inode->i_dentry)) {
860 /* A directory can only have one dentry.
861 * This (now) has one, so use it.
863 res = list_entry(inode->i_dentry.next, struct dentry, d_alias);
866 /* attach a disconnected dentry */
870 spin_lock(&res->d_lock);
871 res->d_sb = inode->i_sb;
873 res->d_inode = inode;
876 * Set d_bucket to an "impossible" bucket address so
877 * that d_move() doesn't get a false positive
879 res->d_bucket = NULL;
880 res->d_flags |= DCACHE_DISCONNECTED;
881 res->d_flags &= ~DCACHE_UNHASHED;
882 list_add(&res->d_alias, &inode->i_dentry);
883 hlist_add_head(&res->d_hash, &inode->i_sb->s_anon);
884 spin_unlock(&res->d_lock);
886 inode = NULL; /* don't drop reference */
888 spin_unlock(&dcache_lock);
899 * d_splice_alias - splice a disconnected dentry into the tree if one exists
900 * @inode: the inode which may have a disconnected dentry
901 * @dentry: a negative dentry which we want to point to the inode.
903 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and
904 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry
905 * and return it, else simply d_add the inode to the dentry and return NULL.
907 * This is (will be) needed in the lookup routine of any filesystem that is exportable
908 * (via knfsd) so that we can build dcache paths to directories effectively.
910 * If a dentry was found and moved, then it is returned. Otherwise NULL
911 * is returned. This matches the expected return value of ->lookup.
914 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
916 struct dentry *new = NULL;
918 if (inode && S_ISDIR(inode->i_mode)) {
919 spin_lock(&dcache_lock);
920 if (!list_empty(&inode->i_dentry)) {
921 new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
923 spin_unlock(&dcache_lock);
924 security_d_instantiate(new, inode);
929 /* d_instantiate takes dcache_lock, so we do it by hand */
930 list_add(&dentry->d_alias, &inode->i_dentry);
931 dentry->d_inode = inode;
932 spin_unlock(&dcache_lock);
933 security_d_instantiate(dentry, inode);
937 d_add(dentry, inode);
943 * d_lookup - search for a dentry
944 * @parent: parent dentry
945 * @name: qstr of name we wish to find
947 * Searches the children of the parent dentry for the name in question. If
948 * the dentry is found its reference count is incremented and the dentry
949 * is returned. The caller must use d_put to free the entry when it has
950 * finished using it. %NULL is returned on failure.
952 * __d_lookup is dcache_lock free. The hash list is protected using RCU.
953 * Memory barriers are used while updating and doing lockless traversal.
954 * To avoid races with d_move while rename is happening, d_lock is used.
956 * Overflows in memcmp(), while d_move, are avoided by keeping the length
957 * and name pointer in one structure pointed by d_qstr.
959 * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while
960 * lookup is going on.
962 * dentry_unused list is not updated even if lookup finds the required dentry
963 * in there. It is updated in places such as prune_dcache, shrink_dcache_sb,
964 * select_parent and __dget_locked. This laziness saves lookup from dcache_lock
967 * d_lookup() is protected against the concurrent renames in some unrelated
968 * directory using the seqlockt_t rename_lock.
971 struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
973 struct dentry * dentry = NULL;
977 seq = read_seqbegin(&rename_lock);
978 dentry = __d_lookup(parent, name);
981 } while (read_seqretry(&rename_lock, seq));
985 struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
987 unsigned int len = name->len;
988 unsigned int hash = name->hash;
989 const unsigned char *str = name->name;
990 struct hlist_head *head = d_hash(parent,hash);
991 struct dentry *found = NULL;
992 struct hlist_node *node;
996 hlist_for_each (node, head) {
997 struct dentry *dentry;
1000 smp_read_barrier_depends();
1001 dentry = hlist_entry(node, struct dentry, d_hash);
1005 if (dentry->d_name.hash != hash)
1007 if (dentry->d_parent != parent)
1010 spin_lock(&dentry->d_lock);
1013 * If lookup ends up in a different bucket due to concurrent
1016 if (unlikely(dentry->d_bucket != head))
1020 * Recheck the dentry after taking the lock - d_move may have
1021 * changed things. Don't bother checking the hash because we're
1022 * about to compare the whole name anyway.
1024 if (dentry->d_parent != parent)
1027 qstr = &dentry->d_name;
1028 smp_read_barrier_depends();
1029 if (parent->d_op && parent->d_op->d_compare) {
1030 if (parent->d_op->d_compare(parent, qstr, name))
1033 if (qstr->len != len)
1035 if (memcmp(qstr->name, str, len))
1039 if (!d_unhashed(dentry)) {
1040 atomic_inc(&dentry->d_count);
1044 spin_unlock(&dentry->d_lock);
1047 spin_unlock(&dentry->d_lock);
1055 * d_validate - verify dentry provided from insecure source
1056 * @dentry: The dentry alleged to be valid child of @dparent
1057 * @dparent: The parent dentry (known to be valid)
1058 * @hash: Hash of the dentry
1059 * @len: Length of the name
1061 * An insecure source has sent us a dentry, here we verify it and dget() it.
1062 * This is used by ncpfs in its readdir implementation.
1063 * Zero is returned in the dentry is invalid.
1066 int d_validate(struct dentry *dentry, struct dentry *dparent)
1068 struct hlist_head *base;
1069 struct hlist_node *lhp;
1071 /* Check whether the ptr might be valid at all.. */
1072 if (!kmem_ptr_validate(dentry_cache, dentry))
1075 if (dentry->d_parent != dparent)
1078 spin_lock(&dcache_lock);
1079 base = d_hash(dparent, dentry->d_name.hash);
1080 hlist_for_each(lhp,base) {
1081 /* read_barrier_depends() not required for d_hash list
1082 * as it is parsed under dcache_lock
1084 if (dentry == hlist_entry(lhp, struct dentry, d_hash)) {
1085 __dget_locked(dentry);
1086 spin_unlock(&dcache_lock);
1090 spin_unlock(&dcache_lock);
1096 * When a file is deleted, we have two options:
1097 * - turn this dentry into a negative dentry
1098 * - unhash this dentry and free it.
1100 * Usually, we want to just turn this into
1101 * a negative dentry, but if anybody else is
1102 * currently using the dentry or the inode
1103 * we can't do that and we fall back on removing
1104 * it from the hash queues and waiting for
1105 * it to be deleted later when it has no users
1109 * d_delete - delete a dentry
1110 * @dentry: The dentry to delete
1112 * Turn the dentry into a negative dentry if possible, otherwise
1113 * remove it from the hash queues so it can be deleted later
1116 void d_delete(struct dentry * dentry)
1119 * Are we the only user?
1121 spin_lock(&dcache_lock);
1122 spin_lock(&dentry->d_lock);
1123 if (atomic_read(&dentry->d_count) == 1) {
1124 dentry_iput(dentry);
1128 if (!d_unhashed(dentry))
1131 spin_unlock(&dentry->d_lock);
1132 spin_unlock(&dcache_lock);
1136 * d_rehash - add an entry back to the hash
1137 * @entry: dentry to add to the hash
1139 * Adds a dentry to the hash according to its name.
1142 void d_rehash(struct dentry * entry)
1144 struct hlist_head *list = d_hash(entry->d_parent, entry->d_name.hash);
1146 spin_lock(&dcache_lock);
1147 spin_lock(&entry->d_lock);
1148 entry->d_flags &= ~DCACHE_UNHASHED;
1149 spin_unlock(&entry->d_lock);
1150 entry->d_bucket = list;
1151 hlist_add_head_rcu(&entry->d_hash, list);
1152 spin_unlock(&dcache_lock);
1155 #define do_switch(x,y) do { \
1156 __typeof__ (x) __tmp = x; \
1157 x = y; y = __tmp; } while (0)
1160 * When switching names, the actual string doesn't strictly have to
1161 * be preserved in the target - because we're dropping the target
1162 * anyway. As such, we can just do a simple memcpy() to copy over
1163 * the new name before we switch.
1165 * Note that we have to be a lot more careful about getting the hash
1166 * switched - we have to switch the hash value properly even if it
1167 * then no longer matches the actual (corrupted) string of the target.
1168 * The hash value has to match the hash queue that the dentry is on..
1170 static void switch_names(struct dentry *dentry, struct dentry *target)
1172 if (dname_external(target)) {
1173 if (dname_external(dentry)) {
1175 * Both external: swap the pointers
1177 do_switch(target->d_name.name, dentry->d_name.name);
1180 * dentry:internal, target:external. Steal target's
1181 * storage and make target internal.
1183 dentry->d_name.name = target->d_name.name;
1184 target->d_name.name = target->d_iname;
1187 if (dname_external(dentry)) {
1189 * dentry:external, target:internal. Give dentry's
1190 * storage to target and make dentry internal
1192 memcpy(dentry->d_iname, target->d_name.name,
1193 target->d_name.len + 1);
1194 target->d_name.name = dentry->d_name.name;
1195 dentry->d_name.name = dentry->d_iname;
1198 * Both are internal. Just copy target to dentry
1200 memcpy(dentry->d_iname, target->d_name.name,
1201 target->d_name.len + 1);
1207 * We cannibalize "target" when moving dentry on top of it,
1208 * because it's going to be thrown away anyway. We could be more
1209 * polite about it, though.
1211 * This forceful removal will result in ugly /proc output if
1212 * somebody holds a file open that got deleted due to a rename.
1213 * We could be nicer about the deleted file, and let it show
1214 * up under the name it got deleted rather than the name that
1219 * d_move - move a dentry
1220 * @dentry: entry to move
1221 * @target: new dentry
1223 * Update the dcache to reflect the move of a file name. Negative
1224 * dcache entries should not be moved in this way.
1227 void d_move(struct dentry * dentry, struct dentry * target)
1229 if (!dentry->d_inode)
1230 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
1232 spin_lock(&dcache_lock);
1233 write_seqlock(&rename_lock);
1235 * XXXX: do we really need to take target->d_lock?
1237 if (target < dentry) {
1238 spin_lock(&target->d_lock);
1239 spin_lock(&dentry->d_lock);
1241 spin_lock(&dentry->d_lock);
1242 spin_lock(&target->d_lock);
1245 /* Move the dentry to the target hash queue, if on different bucket */
1246 if (dentry->d_flags & DCACHE_UNHASHED)
1247 goto already_unhashed;
1248 if (dentry->d_bucket != target->d_bucket) {
1249 hlist_del_rcu(&dentry->d_hash);
1251 dentry->d_bucket = target->d_bucket;
1252 hlist_add_head_rcu(&dentry->d_hash, target->d_bucket);
1253 dentry->d_flags &= ~DCACHE_UNHASHED;
1256 /* Unhash the target: dput() will then get rid of it */
1259 /* flush any possible attributes */
1260 if (dentry->d_extra_attributes) {
1261 kfree(dentry->d_extra_attributes);
1262 dentry->d_extra_attributes = NULL;
1264 if (target->d_extra_attributes) {
1265 kfree(target->d_extra_attributes);
1266 target->d_extra_attributes = NULL;
1269 list_del(&dentry->d_child);
1270 list_del(&target->d_child);
1272 /* Switch the names.. */
1273 switch_names(dentry, target);
1275 do_switch(dentry->d_name.len, target->d_name.len);
1276 do_switch(dentry->d_name.hash, target->d_name.hash);
1278 /* ... and switch the parents */
1279 if (IS_ROOT(dentry)) {
1280 dentry->d_parent = target->d_parent;
1281 target->d_parent = target;
1282 INIT_LIST_HEAD(&target->d_child);
1284 do_switch(dentry->d_parent, target->d_parent);
1286 /* And add them back to the (new) parent lists */
1287 list_add(&target->d_child, &target->d_parent->d_subdirs);
1290 list_add(&dentry->d_child, &dentry->d_parent->d_subdirs);
1291 spin_unlock(&target->d_lock);
1292 spin_unlock(&dentry->d_lock);
1293 write_sequnlock(&rename_lock);
1294 spin_unlock(&dcache_lock);
1298 * d_path - return the path of a dentry
1299 * @dentry: dentry to report
1300 * @vfsmnt: vfsmnt to which the dentry belongs
1301 * @root: root dentry
1302 * @rootmnt: vfsmnt to which the root dentry belongs
1303 * @buffer: buffer to return value in
1304 * @buflen: buffer length
1306 * Convert a dentry into an ASCII path name. If the entry has been deleted
1307 * the string " (deleted)" is appended. Note that this is ambiguous.
1309 * Returns the buffer or an error code if the path was too long.
1311 * "buflen" should be positive. Caller holds the dcache_lock.
1313 char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
1314 struct dentry *root, struct vfsmount *rootmnt,
1315 char *buffer, int buflen)
1317 char * end = buffer+buflen;
1323 if (!IS_ROOT(dentry) && d_unhashed(dentry)) {
1328 memcpy(end, " (deleted)", 10);
1338 struct dentry * parent;
1340 if (dentry == root && vfsmnt == rootmnt)
1342 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
1344 spin_lock(&vfsmount_lock);
1345 if (vfsmnt->mnt_parent == vfsmnt) {
1346 spin_unlock(&vfsmount_lock);
1349 dentry = vfsmnt->mnt_mountpoint;
1350 vfsmnt = vfsmnt->mnt_parent;
1351 spin_unlock(&vfsmount_lock);
1354 parent = dentry->d_parent;
1356 namelen = dentry->d_name.len;
1357 buflen -= namelen + 1;
1361 memcpy(end, dentry->d_name.name, namelen);
1370 namelen = dentry->d_name.len;
1374 retval -= namelen-1; /* hit the slash */
1375 memcpy(retval, dentry->d_name.name, namelen);
1378 return ERR_PTR(-ENAMETOOLONG);
1381 EXPORT_SYMBOL_GPL(__d_path);
1383 /* write full pathname into buffer and return start of pathname */
1384 char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
1385 char *buf, int buflen)
1388 struct vfsmount *rootmnt;
1389 struct dentry *root;
1391 read_lock(¤t->fs->lock);
1392 rootmnt = mntget(current->fs->rootmnt);
1393 root = dget(current->fs->root);
1394 read_unlock(¤t->fs->lock);
1395 spin_lock(&dcache_lock);
1396 res = __d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
1397 spin_unlock(&dcache_lock);
1404 * NOTE! The user-level library version returns a
1405 * character pointer. The kernel system call just
1406 * returns the length of the buffer filled (which
1407 * includes the ending '\0' character), or a negative
1408 * error value. So libc would do something like
1410 * char *getcwd(char * buf, size_t size)
1414 * retval = sys_getcwd(buf, size);
1421 asmlinkage long sys_getcwd(char __user *buf, unsigned long size)
1424 struct vfsmount *pwdmnt, *rootmnt;
1425 struct dentry *pwd, *root;
1426 char *page = (char *) __get_free_page(GFP_USER);
1431 read_lock(¤t->fs->lock);
1432 pwdmnt = mntget(current->fs->pwdmnt);
1433 pwd = dget(current->fs->pwd);
1434 rootmnt = mntget(current->fs->rootmnt);
1435 root = dget(current->fs->root);
1436 read_unlock(¤t->fs->lock);
1439 /* Has the current directory has been unlinked? */
1440 spin_lock(&dcache_lock);
1441 if (pwd->d_parent == pwd || !d_unhashed(pwd)) {
1445 cwd = __d_path(pwd, pwdmnt, root, rootmnt, page, PAGE_SIZE);
1446 spin_unlock(&dcache_lock);
1448 error = PTR_ERR(cwd);
1453 len = PAGE_SIZE + page - cwd;
1456 if (copy_to_user(buf, cwd, len))
1460 spin_unlock(&dcache_lock);
1467 free_page((unsigned long) page);
1472 * Test whether new_dentry is a subdirectory of old_dentry.
1474 * Trivially implemented using the dcache structure
1478 * is_subdir - is new dentry a subdirectory of old_dentry
1479 * @new_dentry: new dentry
1480 * @old_dentry: old dentry
1482 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
1483 * Returns 0 otherwise.
1484 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
1487 int is_subdir(struct dentry * new_dentry, struct dentry * old_dentry)
1490 struct dentry * saved = new_dentry;
1494 /* need rcu_readlock to protect against the d_parent trashing due to
1499 /* for restarting inner loop in case of seq retry */
1501 seq = read_seqbegin(&rename_lock);
1503 if (new_dentry != old_dentry) {
1504 struct dentry * parent = new_dentry->d_parent;
1505 if (parent == new_dentry)
1507 new_dentry = parent;
1513 } while (read_seqretry(&rename_lock, seq));
1519 void d_genocide(struct dentry *root)
1521 struct dentry *this_parent = root;
1522 struct list_head *next;
1524 spin_lock(&dcache_lock);
1526 next = this_parent->d_subdirs.next;
1528 while (next != &this_parent->d_subdirs) {
1529 struct list_head *tmp = next;
1530 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1532 if (d_unhashed(dentry)||!dentry->d_inode)
1534 if (!list_empty(&dentry->d_subdirs)) {
1535 this_parent = dentry;
1538 atomic_dec(&dentry->d_count);
1540 if (this_parent != root) {
1541 next = this_parent->d_child.next;
1542 atomic_dec(&this_parent->d_count);
1543 this_parent = this_parent->d_parent;
1546 spin_unlock(&dcache_lock);
1550 * find_inode_number - check for dentry with name
1551 * @dir: directory to check
1552 * @name: Name to find.
1554 * Check whether a dentry already exists for the given name,
1555 * and return the inode number if it has an inode. Otherwise
1558 * This routine is used to post-process directory listings for
1559 * filesystems using synthetic inode numbers, and is necessary
1560 * to keep getcwd() working.
1563 ino_t find_inode_number(struct dentry *dir, struct qstr *name)
1565 struct dentry * dentry;
1569 * Check for a fs-specific hash function. Note that we must
1570 * calculate the standard hash first, as the d_op->d_hash()
1571 * routine may choose to leave the hash value unchanged.
1573 name->hash = full_name_hash(name->name, name->len);
1574 if (dir->d_op && dir->d_op->d_hash)
1576 if (dir->d_op->d_hash(dir, name) != 0)
1580 dentry = d_lookup(dir, name);
1583 if (dentry->d_inode)
1584 ino = dentry->d_inode->i_ino;
1591 static __initdata unsigned long dhash_entries;
1592 static int __init set_dhash_entries(char *str)
1596 dhash_entries = simple_strtoul(str, &str, 0);
1599 __setup("dhash_entries=", set_dhash_entries);
1601 static void __init dcache_init_early(void)
1606 alloc_large_system_hash("Dentry cache",
1607 sizeof(struct hlist_head),
1614 for (loop = 0; loop < (1 << d_hash_shift); loop++)
1615 INIT_HLIST_HEAD(&dentry_hashtable[loop]);
1618 void flush_dentry_attributes (void)
1620 struct hlist_node *tmp;
1621 struct dentry *dentry;
1624 spin_lock(&dcache_lock);
1625 for (i = 0; i <= d_hash_mask; i++)
1626 hlist_for_each_entry(dentry, tmp, dentry_hashtable+i, d_hash) {
1627 kfree(dentry->d_extra_attributes);
1628 dentry->d_extra_attributes = NULL;
1630 spin_unlock(&dcache_lock);
1633 EXPORT_SYMBOL_GPL(flush_dentry_attributes);
1635 static void __init dcache_init(unsigned long mempages)
1638 * A constructor could be added for stable state like the lists,
1639 * but it is probably not worth it because of the cache nature
1642 dentry_cache = kmem_cache_create("dentry_cache",
1643 sizeof(struct dentry),
1645 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC,
1648 set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory);
1651 /* SLAB cache for __getname() consumers */
1652 kmem_cache_t *names_cachep;
1654 /* SLAB cache for file structures */
1655 kmem_cache_t *filp_cachep;
1657 EXPORT_SYMBOL(d_genocide);
1659 extern void bdev_cache_init(void);
1660 extern void chrdev_init(void);
1662 void __init vfs_caches_init_early(void)
1664 dcache_init_early();
1668 void __init vfs_caches_init(unsigned long mempages)
1670 unsigned long reserve;
1672 /* Base hash sizes on available memory, with a reserve equal to
1673 150% of current kernel size */
1675 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
1676 mempages -= reserve;
1678 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
1679 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1681 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
1682 SLAB_HWCACHE_ALIGN|SLAB_PANIC, filp_ctor, filp_dtor);
1684 dcache_init(mempages);
1685 inode_init(mempages);
1686 files_init(mempages);
1692 EXPORT_SYMBOL(d_alloc);
1693 EXPORT_SYMBOL(d_alloc_anon);
1694 EXPORT_SYMBOL(d_alloc_root);
1695 EXPORT_SYMBOL(d_delete);
1696 EXPORT_SYMBOL(d_find_alias);
1697 EXPORT_SYMBOL(d_instantiate);
1698 EXPORT_SYMBOL(d_invalidate);
1699 EXPORT_SYMBOL(d_lookup);
1700 EXPORT_SYMBOL(d_move);
1701 EXPORT_SYMBOL(d_path);
1702 EXPORT_SYMBOL(d_prune_aliases);
1703 EXPORT_SYMBOL(d_rehash);
1704 EXPORT_SYMBOL(d_splice_alias);
1705 EXPORT_SYMBOL(d_validate);
1706 EXPORT_SYMBOL(dget_locked);
1707 EXPORT_SYMBOL(dput);
1708 EXPORT_SYMBOL(find_inode_number);
1709 EXPORT_SYMBOL(have_submounts);
1710 EXPORT_SYMBOL(is_subdir);
1711 EXPORT_SYMBOL(names_cachep);
1712 EXPORT_SYMBOL(shrink_dcache_anon);
1713 EXPORT_SYMBOL(shrink_dcache_parent);
1714 EXPORT_SYMBOL(shrink_dcache_sb);