4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
10 * Notes on the allocation strategy:
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
17 #include <linux/config.h>
18 #include <linux/string.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/smp_lock.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/module.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <asm/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
34 #define DCACHE_PARANOIA 1
35 /* #define DCACHE_DEBUG 1 */
37 spinlock_t dcache_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
38 seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
40 EXPORT_SYMBOL(dcache_lock);
42 static kmem_cache_t *dentry_cache;
44 #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
47 * This is the single most critical data structure when it comes
48 * to the dcache: the hashtable for lookups. Somebody should try
49 * to make this good - I've just made it work.
51 * This hash-function tries to avoid losing too many bits of hash
52 * information, yet avoid using a prime hash-size or similar.
54 #define D_HASHBITS d_hash_shift
55 #define D_HASHMASK d_hash_mask
57 static unsigned int d_hash_mask;
58 static unsigned int d_hash_shift;
59 static struct hlist_head *dentry_hashtable;
60 static LIST_HEAD(dentry_unused);
62 static void prune_dcache(int count);
65 /* Statistics gathering. */
66 struct dentry_stat_t dentry_stat = {
70 static void d_callback(void *arg)
72 struct dentry * dentry = (struct dentry *)arg;
74 if (dname_external(dentry))
75 kfree(dentry->d_name.name);
76 kmem_cache_free(dentry_cache, dentry);
80 * no dcache_lock, please. The caller must decrement dentry_stat.nr_dentry
83 static void d_free(struct dentry *dentry)
85 if (dentry->d_op && dentry->d_op->d_release)
86 dentry->d_op->d_release(dentry);
87 if (dentry->d_extra_attributes) {
88 kfree(dentry->d_extra_attributes);
89 dentry->d_extra_attributes = NULL;
91 call_rcu(&dentry->d_rcu, d_callback, dentry);
95 * Release the dentry's inode, using the filesystem
96 * d_iput() operation if defined.
97 * Called with dcache_lock and per dentry lock held, drops both.
99 static inline void dentry_iput(struct dentry * dentry)
101 struct inode *inode = dentry->d_inode;
103 dentry->d_inode = NULL;
104 list_del_init(&dentry->d_alias);
105 spin_unlock(&dentry->d_lock);
106 spin_unlock(&dcache_lock);
107 if (dentry->d_op && dentry->d_op->d_iput)
108 dentry->d_op->d_iput(dentry, inode);
112 spin_unlock(&dentry->d_lock);
113 spin_unlock(&dcache_lock);
120 * This is complicated by the fact that we do not want to put
121 * dentries that are no longer on any hash chain on the unused
122 * list: we'd much rather just get rid of them immediately.
124 * However, that implies that we have to traverse the dentry
125 * tree upwards to the parents which might _also_ now be
126 * scheduled for deletion (it may have been only waiting for
127 * its last child to go away).
129 * This tail recursion is done by hand as we don't want to depend
130 * on the compiler to always get this right (gcc generally doesn't).
131 * Real recursion would eat up our stack space.
135 * dput - release a dentry
136 * @dentry: dentry to release
138 * Release a dentry. This will drop the usage count and if appropriate
139 * call the dentry unlink method as well as removing it from the queues and
140 * releasing its resources. If the parent dentries were scheduled for release
141 * they too may now get deleted.
143 * no dcache lock, please.
146 void dput(struct dentry *dentry)
152 if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock))
155 spin_lock(&dentry->d_lock);
156 if (atomic_read(&dentry->d_count)) {
157 spin_unlock(&dentry->d_lock);
158 spin_unlock(&dcache_lock);
163 * AV: ->d_delete() is _NOT_ allowed to block now.
165 if (dentry->d_op && dentry->d_op->d_delete) {
166 if (dentry->d_op->d_delete(dentry))
169 /* Unreachable? Get rid of it */
170 if (d_unhashed(dentry))
172 if (list_empty(&dentry->d_lru)) {
173 dentry->d_flags |= DCACHE_REFERENCED;
174 list_add(&dentry->d_lru, &dentry_unused);
175 dentry_stat.nr_unused++;
177 spin_unlock(&dentry->d_lock);
178 spin_unlock(&dcache_lock);
185 struct dentry *parent;
187 /* If dentry was on d_lru list
188 * delete it from there
190 if (!list_empty(&dentry->d_lru)) {
191 list_del(&dentry->d_lru);
192 dentry_stat.nr_unused--;
194 list_del(&dentry->d_child);
195 dentry_stat.nr_dentry--; /* For d_free, below */
196 /*drops the locks, at that point nobody can reach this dentry */
198 parent = dentry->d_parent;
200 if (dentry == parent)
208 * d_invalidate - invalidate a dentry
209 * @dentry: dentry to invalidate
211 * Try to invalidate the dentry if it turns out to be
212 * possible. If there are other dentries that can be
213 * reached through this one we can't delete it and we
214 * return -EBUSY. On success we return 0.
219 int d_invalidate(struct dentry * dentry)
222 * If it's already been dropped, return OK.
224 spin_lock(&dcache_lock);
225 if (d_unhashed(dentry)) {
226 spin_unlock(&dcache_lock);
230 * Check whether to do a partial shrink_dcache
231 * to get rid of unused child entries.
233 if (!list_empty(&dentry->d_subdirs)) {
234 spin_unlock(&dcache_lock);
235 shrink_dcache_parent(dentry);
236 spin_lock(&dcache_lock);
240 * Somebody else still using it?
242 * If it's a directory, we can't drop it
243 * for fear of somebody re-populating it
244 * with children (even though dropping it
245 * would make it unreachable from the root,
246 * we might still populate it if it was a
247 * working directory or similar).
249 spin_lock(&dentry->d_lock);
250 if (atomic_read(&dentry->d_count) > 1) {
251 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
252 spin_unlock(&dentry->d_lock);
253 spin_unlock(&dcache_lock);
259 spin_unlock(&dentry->d_lock);
260 spin_unlock(&dcache_lock);
264 /* This should be called _only_ with dcache_lock held */
266 static inline struct dentry * __dget_locked(struct dentry *dentry)
268 atomic_inc(&dentry->d_count);
269 if (!list_empty(&dentry->d_lru)) {
270 dentry_stat.nr_unused--;
271 list_del_init(&dentry->d_lru);
276 struct dentry * dget_locked(struct dentry *dentry)
278 return __dget_locked(dentry);
282 * d_find_alias - grab a hashed alias of inode
283 * @inode: inode in question
285 * If inode has a hashed alias - acquire the reference to alias and
286 * return it. Otherwise return NULL. Notice that if inode is a directory
287 * there can be only one alias and it can be unhashed only if it has
290 * If the inode has a DCACHE_DISCONNECTED alias, then prefer
291 * any other hashed alias over that one.
294 struct dentry * d_find_alias(struct inode *inode)
296 struct list_head *head, *next, *tmp;
297 struct dentry *alias, *discon_alias=NULL;
299 spin_lock(&dcache_lock);
300 head = &inode->i_dentry;
301 next = inode->i_dentry.next;
302 while (next != head) {
306 alias = list_entry(tmp, struct dentry, d_alias);
307 if (!d_unhashed(alias)) {
308 if (alias->d_flags & DCACHE_DISCONNECTED)
309 discon_alias = alias;
311 __dget_locked(alias);
312 spin_unlock(&dcache_lock);
318 __dget_locked(discon_alias);
319 spin_unlock(&dcache_lock);
324 * Try to kill dentries associated with this inode.
325 * WARNING: you must own a reference to inode.
327 void d_prune_aliases(struct inode *inode)
329 struct list_head *tmp, *head = &inode->i_dentry;
331 spin_lock(&dcache_lock);
333 while ((tmp = tmp->next) != head) {
334 struct dentry *dentry = list_entry(tmp, struct dentry, d_alias);
335 if (!atomic_read(&dentry->d_count)) {
336 __dget_locked(dentry);
338 spin_unlock(&dcache_lock);
343 spin_unlock(&dcache_lock);
347 * Throw away a dentry - free the inode, dput the parent.
348 * This requires that the LRU list has already been
350 * Called with dcache_lock, drops it and then regains.
352 static inline void prune_one_dentry(struct dentry * dentry)
354 struct dentry * parent;
357 list_del(&dentry->d_child);
358 dentry_stat.nr_dentry--; /* For d_free, below */
360 parent = dentry->d_parent;
362 if (parent != dentry)
364 spin_lock(&dcache_lock);
368 * prune_dcache - shrink the dcache
369 * @count: number of entries to try and free
371 * Shrink the dcache. This is done when we need
372 * more memory, or simply when we need to unmount
373 * something (at which point we need to unuse
376 * This function may fail to free any resources if
377 * all the dentries are in use.
380 static void prune_dcache(int count)
382 spin_lock(&dcache_lock);
383 for (; count ; count--) {
384 struct dentry *dentry;
385 struct list_head *tmp;
387 tmp = dentry_unused.prev;
388 if (tmp == &dentry_unused)
391 prefetch(dentry_unused.prev);
392 dentry_stat.nr_unused--;
393 dentry = list_entry(tmp, struct dentry, d_lru);
395 spin_lock(&dentry->d_lock);
397 * We found an inuse dentry which was not removed from
398 * dentry_unused because of laziness during lookup. Do not free
399 * it - just keep it off the dentry_unused list.
401 if (atomic_read(&dentry->d_count)) {
402 spin_unlock(&dentry->d_lock);
405 /* If the dentry was recently referenced, don't free it. */
406 if (dentry->d_flags & DCACHE_REFERENCED) {
407 dentry->d_flags &= ~DCACHE_REFERENCED;
408 list_add(&dentry->d_lru, &dentry_unused);
409 dentry_stat.nr_unused++;
410 spin_unlock(&dentry->d_lock);
413 prune_one_dentry(dentry);
415 spin_unlock(&dcache_lock);
419 * Shrink the dcache for the specified super block.
420 * This allows us to unmount a device without disturbing
421 * the dcache for the other devices.
423 * This implementation makes just two traversals of the
424 * unused list. On the first pass we move the selected
425 * dentries to the most recent end, and on the second
426 * pass we free them. The second pass must restart after
427 * each dput(), but since the target dentries are all at
428 * the end, it's really just a single traversal.
432 * shrink_dcache_sb - shrink dcache for a superblock
435 * Shrink the dcache for the specified super block. This
436 * is used to free the dcache before unmounting a file
440 void shrink_dcache_sb(struct super_block * sb)
442 struct list_head *tmp, *next;
443 struct dentry *dentry;
446 * Pass one ... move the dentries for the specified
447 * superblock to the most recent end of the unused list.
449 spin_lock(&dcache_lock);
450 next = dentry_unused.next;
451 while (next != &dentry_unused) {
454 dentry = list_entry(tmp, struct dentry, d_lru);
455 if (dentry->d_sb != sb)
458 list_add(tmp, &dentry_unused);
462 * Pass two ... free the dentries for this superblock.
465 next = dentry_unused.next;
466 while (next != &dentry_unused) {
469 dentry = list_entry(tmp, struct dentry, d_lru);
470 if (dentry->d_sb != sb)
472 dentry_stat.nr_unused--;
474 spin_lock(&dentry->d_lock);
475 if (atomic_read(&dentry->d_count)) {
476 spin_unlock(&dentry->d_lock);
479 prune_one_dentry(dentry);
482 spin_unlock(&dcache_lock);
486 * Search for at least 1 mount point in the dentry's subdirs.
487 * We descend to the next level whenever the d_subdirs
488 * list is non-empty and continue searching.
492 * have_submounts - check for mounts over a dentry
493 * @parent: dentry to check.
495 * Return true if the parent or its subdirectories contain
499 int have_submounts(struct dentry *parent)
501 struct dentry *this_parent = parent;
502 struct list_head *next;
504 spin_lock(&dcache_lock);
505 if (d_mountpoint(parent))
508 next = this_parent->d_subdirs.next;
510 while (next != &this_parent->d_subdirs) {
511 struct list_head *tmp = next;
512 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
514 /* Have we found a mount point ? */
515 if (d_mountpoint(dentry))
517 if (!list_empty(&dentry->d_subdirs)) {
518 this_parent = dentry;
523 * All done at this level ... ascend and resume the search.
525 if (this_parent != parent) {
526 next = this_parent->d_child.next;
527 this_parent = this_parent->d_parent;
530 spin_unlock(&dcache_lock);
531 return 0; /* No mount points found in tree */
533 spin_unlock(&dcache_lock);
538 * Search the dentry child list for the specified parent,
539 * and move any unused dentries to the end of the unused
540 * list for prune_dcache(). We descend to the next level
541 * whenever the d_subdirs list is non-empty and continue
544 static int select_parent(struct dentry * parent)
546 struct dentry *this_parent = parent;
547 struct list_head *next;
550 spin_lock(&dcache_lock);
552 next = this_parent->d_subdirs.next;
554 while (next != &this_parent->d_subdirs) {
555 struct list_head *tmp = next;
556 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
559 if (!list_empty(&dentry->d_lru)) {
560 dentry_stat.nr_unused--;
561 list_del_init(&dentry->d_lru);
564 * move only zero ref count dentries to the end
565 * of the unused list for prune_dcache
567 if (!atomic_read(&dentry->d_count)) {
568 list_add(&dentry->d_lru, dentry_unused.prev);
569 dentry_stat.nr_unused++;
573 * Descend a level if the d_subdirs list is non-empty.
575 if (!list_empty(&dentry->d_subdirs)) {
576 this_parent = dentry;
578 printk(KERN_DEBUG "select_parent: descending to %s/%s, found=%d\n",
579 dentry->d_parent->d_name.name, dentry->d_name.name, found);
585 * All done at this level ... ascend and resume the search.
587 if (this_parent != parent) {
588 next = this_parent->d_child.next;
589 this_parent = this_parent->d_parent;
591 printk(KERN_DEBUG "select_parent: ascending to %s/%s, found=%d\n",
592 this_parent->d_parent->d_name.name, this_parent->d_name.name, found);
596 spin_unlock(&dcache_lock);
601 * shrink_dcache_parent - prune dcache
602 * @parent: parent of entries to prune
604 * Prune the dcache to remove unused children of the parent dentry.
607 void shrink_dcache_parent(struct dentry * parent)
611 while ((found = select_parent(parent)) != 0)
616 * shrink_dcache_anon - further prune the cache
617 * @head: head of d_hash list of dentries to prune
619 * Prune the dentries that are anonymous
621 * parsing d_hash list does not read_barrier_depends() as it
622 * done under dcache_lock.
625 void shrink_dcache_anon(struct hlist_head *head)
627 struct hlist_node *lp;
631 spin_lock(&dcache_lock);
632 hlist_for_each(lp, head) {
633 struct dentry *this = hlist_entry(lp, struct dentry, d_hash);
634 if (!list_empty(&this->d_lru)) {
635 dentry_stat.nr_unused--;
636 list_del(&this->d_lru);
640 * move only zero ref count dentries to the end
641 * of the unused list for prune_dcache
643 if (!atomic_read(&this->d_count)) {
644 list_add_tail(&this->d_lru, &dentry_unused);
645 dentry_stat.nr_unused++;
649 spin_unlock(&dcache_lock);
655 * Scan `nr' dentries and return the number which remain.
657 * We need to avoid reentering the filesystem if the caller is performing a
658 * GFP_NOFS allocation attempt. One example deadlock is:
660 * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache->
661 * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->put_inode->
662 * ext2_discard_prealloc->ext2_free_blocks->lock_super->DEADLOCK.
664 * In this case we return -1 to tell the caller that we baled.
666 static int shrink_dcache_memory(int nr, unsigned int gfp_mask)
669 if (!(gfp_mask & __GFP_FS))
673 return dentry_stat.nr_unused;
677 * d_alloc - allocate a dcache entry
678 * @parent: parent of entry to allocate
679 * @name: qstr of the name
681 * Allocates a dentry. It returns %NULL if there is insufficient memory
682 * available. On a success the dentry is returned. The name passed in is
683 * copied and the copy passed in may be reused after this call.
686 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
688 struct dentry *dentry;
691 #define DENTRY_UNUSED_THRESHOLD 30000
692 #define DENTRY_BATCH_COUNT 32
694 if (dentry_stat.nr_unused > DENTRY_UNUSED_THRESHOLD) {
696 spin_lock(&dcache_lock);
697 if (dentry_stat.nr_unused < DENTRY_UNUSED_THRESHOLD)
699 spin_unlock(&dcache_lock);
701 prune_dcache(DENTRY_BATCH_COUNT);
704 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
708 if (name->len > DNAME_INLINE_LEN-1) {
709 dname = kmalloc(name->len + 1, GFP_KERNEL);
711 kmem_cache_free(dentry_cache, dentry);
715 dname = dentry->d_iname;
717 dentry->d_name.name = dname;
719 dentry->d_name.len = name->len;
720 dentry->d_name.hash = name->hash;
721 memcpy(dname, name->name, name->len);
722 dname[name->len] = 0;
724 atomic_set(&dentry->d_count, 1);
725 dentry->d_flags = DCACHE_UNHASHED;
726 dentry->d_lock = SPIN_LOCK_UNLOCKED;
727 dentry->d_inode = NULL;
728 dentry->d_parent = NULL;
731 dentry->d_fsdata = NULL;
732 dentry->d_extra_attributes = NULL;
733 dentry->d_mounted = 0;
734 dentry->d_cookie = NULL;
735 dentry->d_bucket = NULL;
736 INIT_HLIST_NODE(&dentry->d_hash);
737 INIT_LIST_HEAD(&dentry->d_lru);
738 INIT_LIST_HEAD(&dentry->d_subdirs);
739 INIT_LIST_HEAD(&dentry->d_alias);
742 dentry->d_parent = dget(parent);
743 dentry->d_sb = parent->d_sb;
745 INIT_LIST_HEAD(&dentry->d_child);
748 spin_lock(&dcache_lock);
750 list_add(&dentry->d_child, &parent->d_subdirs);
751 dentry_stat.nr_dentry++;
752 spin_unlock(&dcache_lock);
758 * d_instantiate - fill in inode information for a dentry
759 * @entry: dentry to complete
760 * @inode: inode to attach to this dentry
762 * Fill in inode information in the entry.
764 * This turns negative dentries into productive full members
767 * NOTE! This assumes that the inode count has been incremented
768 * (or otherwise set) by the caller to indicate that it is now
769 * in use by the dcache.
772 void d_instantiate(struct dentry *entry, struct inode * inode)
774 if (!list_empty(&entry->d_alias)) BUG();
775 spin_lock(&dcache_lock);
777 list_add(&entry->d_alias, &inode->i_dentry);
778 entry->d_inode = inode;
779 spin_unlock(&dcache_lock);
780 security_d_instantiate(entry, inode);
784 * d_alloc_root - allocate root dentry
785 * @root_inode: inode to allocate the root for
787 * Allocate a root ("/") dentry for the inode given. The inode is
788 * instantiated and returned. %NULL is returned if there is insufficient
789 * memory or the inode passed is %NULL.
792 struct dentry * d_alloc_root(struct inode * root_inode)
794 struct dentry *res = NULL;
797 static const struct qstr name = { .name = "/", .len = 1 };
799 res = d_alloc(NULL, &name);
801 res->d_sb = root_inode->i_sb;
803 d_instantiate(res, root_inode);
809 static inline struct hlist_head *d_hash(struct dentry *parent,
812 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
813 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
814 return dentry_hashtable + (hash & D_HASHMASK);
818 * d_alloc_anon - allocate an anonymous dentry
819 * @inode: inode to allocate the dentry for
821 * This is similar to d_alloc_root. It is used by filesystems when
822 * creating a dentry for a given inode, often in the process of
823 * mapping a filehandle to a dentry. The returned dentry may be
824 * anonymous, or may have a full name (if the inode was already
825 * in the cache). The file system may need to make further
826 * efforts to connect this dentry into the dcache properly.
828 * When called on a directory inode, we must ensure that
829 * the inode only ever has one dentry. If a dentry is
830 * found, that is returned instead of allocating a new one.
832 * On successful return, the reference to the inode has been transferred
833 * to the dentry. If %NULL is returned (indicating kmalloc failure),
834 * the reference on the inode has not been released.
837 struct dentry * d_alloc_anon(struct inode *inode)
839 static const struct qstr anonstring = { .name = "" };
843 if ((res = d_find_alias(inode))) {
848 tmp = d_alloc(NULL, &anonstring);
852 tmp->d_parent = tmp; /* make sure dput doesn't croak */
854 spin_lock(&dcache_lock);
855 if (S_ISDIR(inode->i_mode) && !list_empty(&inode->i_dentry)) {
856 /* A directory can only have one dentry.
857 * This (now) has one, so use it.
859 res = list_entry(inode->i_dentry.next, struct dentry, d_alias);
862 /* attach a disconnected dentry */
866 spin_lock(&res->d_lock);
867 res->d_sb = inode->i_sb;
869 res->d_inode = inode;
872 * Set d_bucket to an "impossible" bucket address so
873 * that d_move() doesn't get a false positive
875 res->d_bucket = NULL;
876 res->d_flags |= DCACHE_DISCONNECTED;
877 res->d_flags &= ~DCACHE_UNHASHED;
878 list_add(&res->d_alias, &inode->i_dentry);
879 hlist_add_head(&res->d_hash, &inode->i_sb->s_anon);
880 spin_unlock(&res->d_lock);
882 inode = NULL; /* don't drop reference */
884 spin_unlock(&dcache_lock);
895 * d_splice_alias - splice a disconnected dentry into the tree if one exists
896 * @inode: the inode which may have a disconnected dentry
897 * @dentry: a negative dentry which we want to point to the inode.
899 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and
900 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry
901 * and return it, else simply d_add the inode to the dentry and return NULL.
903 * This is (will be) needed in the lookup routine of any filesystem that is exportable
904 * (via knfsd) so that we can build dcache paths to directories effectively.
906 * If a dentry was found and moved, then it is returned. Otherwise NULL
907 * is returned. This matches the expected return value of ->lookup.
910 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
912 struct dentry *new = NULL;
914 if (inode && S_ISDIR(inode->i_mode)) {
915 spin_lock(&dcache_lock);
916 if (!list_empty(&inode->i_dentry)) {
917 new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
919 spin_unlock(&dcache_lock);
920 security_d_instantiate(new, inode);
925 /* d_instantiate takes dcache_lock, so we do it by hand */
926 list_add(&dentry->d_alias, &inode->i_dentry);
927 dentry->d_inode = inode;
928 spin_unlock(&dcache_lock);
929 security_d_instantiate(dentry, inode);
933 d_add(dentry, inode);
939 * d_lookup - search for a dentry
940 * @parent: parent dentry
941 * @name: qstr of name we wish to find
943 * Searches the children of the parent dentry for the name in question. If
944 * the dentry is found its reference count is incremented and the dentry
945 * is returned. The caller must use d_put to free the entry when it has
946 * finished using it. %NULL is returned on failure.
948 * __d_lookup is dcache_lock free. The hash list is protected using RCU.
949 * Memory barriers are used while updating and doing lockless traversal.
950 * To avoid races with d_move while rename is happening, d_lock is used.
952 * Overflows in memcmp(), while d_move, are avoided by keeping the length
953 * and name pointer in one structure pointed by d_qstr.
955 * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while
956 * lookup is going on.
958 * dentry_unused list is not updated even if lookup finds the required dentry
959 * in there. It is updated in places such as prune_dcache, shrink_dcache_sb,
960 * select_parent and __dget_locked. This laziness saves lookup from dcache_lock
963 * d_lookup() is protected against the concurrent renames in some unrelated
964 * directory using the seqlockt_t rename_lock.
967 struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
969 struct dentry * dentry = NULL;
973 seq = read_seqbegin(&rename_lock);
974 dentry = __d_lookup(parent, name);
977 } while (read_seqretry(&rename_lock, seq));
981 struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
983 unsigned int len = name->len;
984 unsigned int hash = name->hash;
985 const unsigned char *str = name->name;
986 struct hlist_head *head = d_hash(parent,hash);
987 struct dentry *found = NULL;
988 struct hlist_node *node;
992 hlist_for_each (node, head) {
993 struct dentry *dentry;
996 smp_read_barrier_depends();
997 dentry = hlist_entry(node, struct dentry, d_hash);
1001 if (dentry->d_name.hash != hash)
1003 if (dentry->d_parent != parent)
1006 spin_lock(&dentry->d_lock);
1009 * If lookup ends up in a different bucket due to concurrent
1012 if (unlikely(dentry->d_bucket != head))
1016 * Recheck the dentry after taking the lock - d_move may have
1017 * changed things. Don't bother checking the hash because we're
1018 * about to compare the whole name anyway.
1020 if (dentry->d_parent != parent)
1023 qstr = &dentry->d_name;
1024 smp_read_barrier_depends();
1025 if (parent->d_op && parent->d_op->d_compare) {
1026 if (parent->d_op->d_compare(parent, qstr, name))
1029 if (qstr->len != len)
1031 if (memcmp(qstr->name, str, len))
1035 if (!d_unhashed(dentry)) {
1036 atomic_inc(&dentry->d_count);
1040 spin_unlock(&dentry->d_lock);
1043 spin_unlock(&dentry->d_lock);
1051 * d_validate - verify dentry provided from insecure source
1052 * @dentry: The dentry alleged to be valid child of @dparent
1053 * @dparent: The parent dentry (known to be valid)
1054 * @hash: Hash of the dentry
1055 * @len: Length of the name
1057 * An insecure source has sent us a dentry, here we verify it and dget() it.
1058 * This is used by ncpfs in its readdir implementation.
1059 * Zero is returned in the dentry is invalid.
1062 int d_validate(struct dentry *dentry, struct dentry *dparent)
1064 struct hlist_head *base;
1065 struct hlist_node *lhp;
1067 /* Check whether the ptr might be valid at all.. */
1068 if (!kmem_ptr_validate(dentry_cache, dentry))
1071 if (dentry->d_parent != dparent)
1074 spin_lock(&dcache_lock);
1075 base = d_hash(dparent, dentry->d_name.hash);
1076 hlist_for_each(lhp,base) {
1077 /* read_barrier_depends() not required for d_hash list
1078 * as it is parsed under dcache_lock
1080 if (dentry == hlist_entry(lhp, struct dentry, d_hash)) {
1081 __dget_locked(dentry);
1082 spin_unlock(&dcache_lock);
1086 spin_unlock(&dcache_lock);
1092 * When a file is deleted, we have two options:
1093 * - turn this dentry into a negative dentry
1094 * - unhash this dentry and free it.
1096 * Usually, we want to just turn this into
1097 * a negative dentry, but if anybody else is
1098 * currently using the dentry or the inode
1099 * we can't do that and we fall back on removing
1100 * it from the hash queues and waiting for
1101 * it to be deleted later when it has no users
1105 * d_delete - delete a dentry
1106 * @dentry: The dentry to delete
1108 * Turn the dentry into a negative dentry if possible, otherwise
1109 * remove it from the hash queues so it can be deleted later
1112 void d_delete(struct dentry * dentry)
1115 * Are we the only user?
1117 spin_lock(&dcache_lock);
1118 spin_lock(&dentry->d_lock);
1119 if (atomic_read(&dentry->d_count) == 1) {
1120 dentry_iput(dentry);
1124 if (!d_unhashed(dentry))
1127 spin_unlock(&dentry->d_lock);
1128 spin_unlock(&dcache_lock);
1132 * d_rehash - add an entry back to the hash
1133 * @entry: dentry to add to the hash
1135 * Adds a dentry to the hash according to its name.
1138 void d_rehash(struct dentry * entry)
1140 struct hlist_head *list = d_hash(entry->d_parent, entry->d_name.hash);
1142 spin_lock(&dcache_lock);
1143 spin_lock(&entry->d_lock);
1144 entry->d_flags &= ~DCACHE_UNHASHED;
1145 spin_unlock(&entry->d_lock);
1146 entry->d_bucket = list;
1147 hlist_add_head_rcu(&entry->d_hash, list);
1148 spin_unlock(&dcache_lock);
1151 #define do_switch(x,y) do { \
1152 __typeof__ (x) __tmp = x; \
1153 x = y; y = __tmp; } while (0)
1156 * When switching names, the actual string doesn't strictly have to
1157 * be preserved in the target - because we're dropping the target
1158 * anyway. As such, we can just do a simple memcpy() to copy over
1159 * the new name before we switch.
1161 * Note that we have to be a lot more careful about getting the hash
1162 * switched - we have to switch the hash value properly even if it
1163 * then no longer matches the actual (corrupted) string of the target.
1164 * The hash value has to match the hash queue that the dentry is on..
1166 static void switch_names(struct dentry *dentry, struct dentry *target)
1168 if (dname_external(target)) {
1169 if (dname_external(dentry)) {
1171 * Both external: swap the pointers
1173 do_switch(target->d_name.name, dentry->d_name.name);
1176 * dentry:internal, target:external. Steal target's
1177 * storage and make target internal.
1179 dentry->d_name.name = target->d_name.name;
1180 target->d_name.name = target->d_iname;
1183 if (dname_external(dentry)) {
1185 * dentry:external, target:internal. Give dentry's
1186 * storage to target and make dentry internal
1188 memcpy(dentry->d_iname, target->d_name.name,
1189 target->d_name.len + 1);
1190 target->d_name.name = dentry->d_name.name;
1191 dentry->d_name.name = dentry->d_iname;
1194 * Both are internal. Just copy target to dentry
1196 memcpy(dentry->d_iname, target->d_name.name,
1197 target->d_name.len + 1);
1203 * We cannibalize "target" when moving dentry on top of it,
1204 * because it's going to be thrown away anyway. We could be more
1205 * polite about it, though.
1207 * This forceful removal will result in ugly /proc output if
1208 * somebody holds a file open that got deleted due to a rename.
1209 * We could be nicer about the deleted file, and let it show
1210 * up under the name it got deleted rather than the name that
1215 * d_move - move a dentry
1216 * @dentry: entry to move
1217 * @target: new dentry
1219 * Update the dcache to reflect the move of a file name. Negative
1220 * dcache entries should not be moved in this way.
1223 void d_move(struct dentry * dentry, struct dentry * target)
1225 if (!dentry->d_inode)
1226 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
1228 spin_lock(&dcache_lock);
1229 write_seqlock(&rename_lock);
1231 * XXXX: do we really need to take target->d_lock?
1233 if (target < dentry) {
1234 spin_lock(&target->d_lock);
1235 spin_lock(&dentry->d_lock);
1237 spin_lock(&dentry->d_lock);
1238 spin_lock(&target->d_lock);
1241 /* Move the dentry to the target hash queue, if on different bucket */
1242 if (dentry->d_flags & DCACHE_UNHASHED)
1243 goto already_unhashed;
1244 if (dentry->d_bucket != target->d_bucket) {
1245 hlist_del_rcu(&dentry->d_hash);
1247 dentry->d_bucket = target->d_bucket;
1248 hlist_add_head_rcu(&dentry->d_hash, target->d_bucket);
1249 dentry->d_flags &= ~DCACHE_UNHASHED;
1252 /* Unhash the target: dput() will then get rid of it */
1255 /* flush any possible attributes */
1256 if (dentry->d_extra_attributes) {
1257 kfree(dentry->d_extra_attributes);
1258 dentry->d_extra_attributes = NULL;
1260 if (target->d_extra_attributes) {
1261 kfree(target->d_extra_attributes);
1262 target->d_extra_attributes = NULL;
1265 list_del(&dentry->d_child);
1266 list_del(&target->d_child);
1268 /* Switch the names.. */
1269 switch_names(dentry, target);
1271 do_switch(dentry->d_name.len, target->d_name.len);
1272 do_switch(dentry->d_name.hash, target->d_name.hash);
1274 /* ... and switch the parents */
1275 if (IS_ROOT(dentry)) {
1276 dentry->d_parent = target->d_parent;
1277 target->d_parent = target;
1278 INIT_LIST_HEAD(&target->d_child);
1280 do_switch(dentry->d_parent, target->d_parent);
1282 /* And add them back to the (new) parent lists */
1283 list_add(&target->d_child, &target->d_parent->d_subdirs);
1286 list_add(&dentry->d_child, &dentry->d_parent->d_subdirs);
1287 spin_unlock(&target->d_lock);
1288 spin_unlock(&dentry->d_lock);
1289 write_sequnlock(&rename_lock);
1290 spin_unlock(&dcache_lock);
1294 * d_path - return the path of a dentry
1295 * @dentry: dentry to report
1296 * @vfsmnt: vfsmnt to which the dentry belongs
1297 * @root: root dentry
1298 * @rootmnt: vfsmnt to which the root dentry belongs
1299 * @buffer: buffer to return value in
1300 * @buflen: buffer length
1302 * Convert a dentry into an ASCII path name. If the entry has been deleted
1303 * the string " (deleted)" is appended. Note that this is ambiguous.
1305 * Returns the buffer or an error code if the path was too long.
1307 * "buflen" should be positive. Caller holds the dcache_lock.
1309 char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
1310 struct dentry *root, struct vfsmount *rootmnt,
1311 char *buffer, int buflen)
1313 char * end = buffer+buflen;
1319 if (!IS_ROOT(dentry) && d_unhashed(dentry)) {
1324 memcpy(end, " (deleted)", 10);
1334 struct dentry * parent;
1336 if (dentry == root && vfsmnt == rootmnt)
1338 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
1340 spin_lock(&vfsmount_lock);
1341 if (vfsmnt->mnt_parent == vfsmnt) {
1342 spin_unlock(&vfsmount_lock);
1345 dentry = vfsmnt->mnt_mountpoint;
1346 vfsmnt = vfsmnt->mnt_parent;
1347 spin_unlock(&vfsmount_lock);
1350 parent = dentry->d_parent;
1352 namelen = dentry->d_name.len;
1353 buflen -= namelen + 1;
1357 memcpy(end, dentry->d_name.name, namelen);
1366 namelen = dentry->d_name.len;
1370 retval -= namelen-1; /* hit the slash */
1371 memcpy(retval, dentry->d_name.name, namelen);
1374 return ERR_PTR(-ENAMETOOLONG);
1377 EXPORT_SYMBOL_GPL(__d_path);
1379 /* write full pathname into buffer and return start of pathname */
1380 char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
1381 char *buf, int buflen)
1384 struct vfsmount *rootmnt;
1385 struct dentry *root;
1387 read_lock(¤t->fs->lock);
1388 rootmnt = mntget(current->fs->rootmnt);
1389 root = dget(current->fs->root);
1390 read_unlock(¤t->fs->lock);
1391 spin_lock(&dcache_lock);
1392 res = __d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
1393 spin_unlock(&dcache_lock);
1400 * NOTE! The user-level library version returns a
1401 * character pointer. The kernel system call just
1402 * returns the length of the buffer filled (which
1403 * includes the ending '\0' character), or a negative
1404 * error value. So libc would do something like
1406 * char *getcwd(char * buf, size_t size)
1410 * retval = sys_getcwd(buf, size);
1417 asmlinkage long sys_getcwd(char __user *buf, unsigned long size)
1420 struct vfsmount *pwdmnt, *rootmnt;
1421 struct dentry *pwd, *root;
1422 char *page = (char *) __get_free_page(GFP_USER);
1427 read_lock(¤t->fs->lock);
1428 pwdmnt = mntget(current->fs->pwdmnt);
1429 pwd = dget(current->fs->pwd);
1430 rootmnt = mntget(current->fs->rootmnt);
1431 root = dget(current->fs->root);
1432 read_unlock(¤t->fs->lock);
1435 /* Has the current directory has been unlinked? */
1436 spin_lock(&dcache_lock);
1437 if (pwd->d_parent == pwd || !d_unhashed(pwd)) {
1441 cwd = __d_path(pwd, pwdmnt, root, rootmnt, page, PAGE_SIZE);
1442 spin_unlock(&dcache_lock);
1444 error = PTR_ERR(cwd);
1449 len = PAGE_SIZE + page - cwd;
1452 if (copy_to_user(buf, cwd, len))
1456 spin_unlock(&dcache_lock);
1463 free_page((unsigned long) page);
1468 * Test whether new_dentry is a subdirectory of old_dentry.
1470 * Trivially implemented using the dcache structure
1474 * is_subdir - is new dentry a subdirectory of old_dentry
1475 * @new_dentry: new dentry
1476 * @old_dentry: old dentry
1478 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
1479 * Returns 0 otherwise.
1480 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
1483 int is_subdir(struct dentry * new_dentry, struct dentry * old_dentry)
1486 struct dentry * saved = new_dentry;
1490 /* need rcu_readlock to protect against the d_parent trashing due to
1495 /* for restarting inner loop in case of seq retry */
1497 seq = read_seqbegin(&rename_lock);
1499 if (new_dentry != old_dentry) {
1500 struct dentry * parent = new_dentry->d_parent;
1501 if (parent == new_dentry)
1503 new_dentry = parent;
1509 } while (read_seqretry(&rename_lock, seq));
1515 void d_genocide(struct dentry *root)
1517 struct dentry *this_parent = root;
1518 struct list_head *next;
1520 spin_lock(&dcache_lock);
1522 next = this_parent->d_subdirs.next;
1524 while (next != &this_parent->d_subdirs) {
1525 struct list_head *tmp = next;
1526 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1528 if (d_unhashed(dentry)||!dentry->d_inode)
1530 if (!list_empty(&dentry->d_subdirs)) {
1531 this_parent = dentry;
1534 atomic_dec(&dentry->d_count);
1536 if (this_parent != root) {
1537 next = this_parent->d_child.next;
1538 atomic_dec(&this_parent->d_count);
1539 this_parent = this_parent->d_parent;
1542 spin_unlock(&dcache_lock);
1546 * find_inode_number - check for dentry with name
1547 * @dir: directory to check
1548 * @name: Name to find.
1550 * Check whether a dentry already exists for the given name,
1551 * and return the inode number if it has an inode. Otherwise
1554 * This routine is used to post-process directory listings for
1555 * filesystems using synthetic inode numbers, and is necessary
1556 * to keep getcwd() working.
1559 ino_t find_inode_number(struct dentry *dir, struct qstr *name)
1561 struct dentry * dentry;
1565 * Check for a fs-specific hash function. Note that we must
1566 * calculate the standard hash first, as the d_op->d_hash()
1567 * routine may choose to leave the hash value unchanged.
1569 name->hash = full_name_hash(name->name, name->len);
1570 if (dir->d_op && dir->d_op->d_hash)
1572 if (dir->d_op->d_hash(dir, name) != 0)
1576 dentry = d_lookup(dir, name);
1579 if (dentry->d_inode)
1580 ino = dentry->d_inode->i_ino;
1587 static __initdata unsigned long dhash_entries;
1588 static int __init set_dhash_entries(char *str)
1592 dhash_entries = simple_strtoul(str, &str, 0);
1595 __setup("dhash_entries=", set_dhash_entries);
1597 void flush_dentry_attributes (void)
1599 struct hlist_node *tmp;
1600 struct dentry *dentry;
1603 spin_lock(&dcache_lock);
1604 for (i = 0; i <= d_hash_mask; i++)
1605 hlist_for_each_entry(dentry, tmp, dentry_hashtable+i, d_hash) {
1606 kfree(dentry->d_extra_attributes);
1607 dentry->d_extra_attributes = NULL;
1609 spin_unlock(&dcache_lock);
1612 EXPORT_SYMBOL_GPL(flush_dentry_attributes);
1614 static void __init dcache_init(unsigned long mempages)
1616 struct hlist_head *d;
1617 unsigned long order;
1618 unsigned int nr_hash;
1622 * A constructor could be added for stable state like the lists,
1623 * but it is probably not worth it because of the cache nature
1626 dentry_cache = kmem_cache_create("dentry_cache",
1627 sizeof(struct dentry),
1629 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC,
1632 set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory);
1635 dhash_entries = PAGE_SHIFT < 13 ?
1636 mempages >> (13 - PAGE_SHIFT) :
1637 mempages << (PAGE_SHIFT - 13);
1639 dhash_entries *= sizeof(struct hlist_head);
1640 for (order = 0; ((1UL << order) << PAGE_SHIFT) < dhash_entries; order++)
1649 nr_hash = (1UL << order) * PAGE_SIZE /
1650 sizeof(struct hlist_head);
1651 d_hash_mask = (nr_hash - 1);
1655 while ((tmp >>= 1UL) != 0UL)
1658 dentry_hashtable = (struct hlist_head *)
1659 __get_free_pages(GFP_ATOMIC, order);
1660 } while (dentry_hashtable == NULL && --order >= 0);
1662 printk(KERN_INFO "Dentry cache hash table entries: %d (order: %ld, %ld bytes)\n",
1663 nr_hash, order, (PAGE_SIZE << order));
1665 if (!dentry_hashtable)
1666 panic("Failed to allocate dcache hash table\n");
1668 d = dentry_hashtable;
1677 /* SLAB cache for __getname() consumers */
1678 kmem_cache_t *names_cachep;
1680 /* SLAB cache for file structures */
1681 kmem_cache_t *filp_cachep;
1683 EXPORT_SYMBOL(d_genocide);
1685 extern void bdev_cache_init(void);
1686 extern void chrdev_init(void);
1688 void __init vfs_caches_init(unsigned long mempages)
1690 unsigned long reserve;
1692 /* Base hash sizes on available memory, with a reserve equal to
1693 150% of current kernel size */
1695 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
1696 mempages -= reserve;
1698 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
1699 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1701 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
1702 SLAB_HWCACHE_ALIGN|SLAB_PANIC, filp_ctor, filp_dtor);
1704 dcache_init(mempages);
1705 inode_init(mempages);
1706 files_init(mempages);
1712 EXPORT_SYMBOL(d_alloc);
1713 EXPORT_SYMBOL(d_alloc_anon);
1714 EXPORT_SYMBOL(d_alloc_root);
1715 EXPORT_SYMBOL(d_delete);
1716 EXPORT_SYMBOL(d_find_alias);
1717 EXPORT_SYMBOL(d_instantiate);
1718 EXPORT_SYMBOL(d_invalidate);
1719 EXPORT_SYMBOL(d_lookup);
1720 EXPORT_SYMBOL(d_move);
1721 EXPORT_SYMBOL(d_path);
1722 EXPORT_SYMBOL(d_prune_aliases);
1723 EXPORT_SYMBOL(d_rehash);
1724 EXPORT_SYMBOL(d_splice_alias);
1725 EXPORT_SYMBOL(d_validate);
1726 EXPORT_SYMBOL(dget_locked);
1727 EXPORT_SYMBOL(dput);
1728 EXPORT_SYMBOL(find_inode_number);
1729 EXPORT_SYMBOL(have_submounts);
1730 EXPORT_SYMBOL(is_subdir);
1731 EXPORT_SYMBOL(names_cachep);
1732 EXPORT_SYMBOL(shrink_dcache_anon);
1733 EXPORT_SYMBOL(shrink_dcache_parent);
1734 EXPORT_SYMBOL(shrink_dcache_sb);