4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
10 * Notes on the allocation strategy:
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
17 #include <linux/config.h>
18 #include <linux/string.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/smp_lock.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/module.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <asm/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
34 #define DCACHE_PARANOIA 1
35 /* #define DCACHE_DEBUG 1 */
37 spinlock_t dcache_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
38 seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
40 EXPORT_SYMBOL(dcache_lock);
42 static kmem_cache_t *dentry_cache;
44 #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
47 * This is the single most critical data structure when it comes
48 * to the dcache: the hashtable for lookups. Somebody should try
49 * to make this good - I've just made it work.
51 * This hash-function tries to avoid losing too many bits of hash
52 * information, yet avoid using a prime hash-size or similar.
54 #define D_HASHBITS d_hash_shift
55 #define D_HASHMASK d_hash_mask
57 static unsigned int d_hash_mask;
58 static unsigned int d_hash_shift;
59 static struct hlist_head *dentry_hashtable;
60 static LIST_HEAD(dentry_unused);
62 /* Statistics gathering. */
63 struct dentry_stat_t dentry_stat = {
67 static void d_callback(void *arg)
69 struct dentry * dentry = (struct dentry *)arg;
71 if (dname_external(dentry))
72 kfree(dentry->d_name.name);
73 kmem_cache_free(dentry_cache, dentry);
77 * no dcache_lock, please. The caller must decrement dentry_stat.nr_dentry
80 static void d_free(struct dentry *dentry)
82 if (dentry->d_op && dentry->d_op->d_release)
83 dentry->d_op->d_release(dentry);
84 if (dentry->d_extra_attributes) {
85 kfree(dentry->d_extra_attributes);
86 dentry->d_extra_attributes = NULL;
88 call_rcu(&dentry->d_rcu, d_callback, dentry);
92 * Release the dentry's inode, using the filesystem
93 * d_iput() operation if defined.
94 * Called with dcache_lock and per dentry lock held, drops both.
96 static inline void dentry_iput(struct dentry * dentry)
98 struct inode *inode = dentry->d_inode;
100 dentry->d_inode = NULL;
101 list_del_init(&dentry->d_alias);
102 spin_unlock(&dentry->d_lock);
103 spin_unlock(&dcache_lock);
104 if (dentry->d_op && dentry->d_op->d_iput)
105 dentry->d_op->d_iput(dentry, inode);
109 spin_unlock(&dentry->d_lock);
110 spin_unlock(&dcache_lock);
117 * This is complicated by the fact that we do not want to put
118 * dentries that are no longer on any hash chain on the unused
119 * list: we'd much rather just get rid of them immediately.
121 * However, that implies that we have to traverse the dentry
122 * tree upwards to the parents which might _also_ now be
123 * scheduled for deletion (it may have been only waiting for
124 * its last child to go away).
126 * This tail recursion is done by hand as we don't want to depend
127 * on the compiler to always get this right (gcc generally doesn't).
128 * Real recursion would eat up our stack space.
132 * dput - release a dentry
133 * @dentry: dentry to release
135 * Release a dentry. This will drop the usage count and if appropriate
136 * call the dentry unlink method as well as removing it from the queues and
137 * releasing its resources. If the parent dentries were scheduled for release
138 * they too may now get deleted.
140 * no dcache lock, please.
143 void dput(struct dentry *dentry)
149 if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock))
152 spin_lock(&dentry->d_lock);
153 if (atomic_read(&dentry->d_count)) {
154 spin_unlock(&dentry->d_lock);
155 spin_unlock(&dcache_lock);
160 * AV: ->d_delete() is _NOT_ allowed to block now.
162 if (dentry->d_op && dentry->d_op->d_delete) {
163 if (dentry->d_op->d_delete(dentry))
166 /* Unreachable? Get rid of it */
167 if (d_unhashed(dentry))
169 if (list_empty(&dentry->d_lru)) {
170 dentry->d_flags |= DCACHE_REFERENCED;
171 list_add(&dentry->d_lru, &dentry_unused);
172 dentry_stat.nr_unused++;
174 spin_unlock(&dentry->d_lock);
175 spin_unlock(&dcache_lock);
182 struct dentry *parent;
184 /* If dentry was on d_lru list
185 * delete it from there
187 if (!list_empty(&dentry->d_lru)) {
188 list_del(&dentry->d_lru);
189 dentry_stat.nr_unused--;
191 list_del(&dentry->d_child);
192 dentry_stat.nr_dentry--; /* For d_free, below */
193 /*drops the locks, at that point nobody can reach this dentry */
195 parent = dentry->d_parent;
197 if (dentry == parent)
205 * d_invalidate - invalidate a dentry
206 * @dentry: dentry to invalidate
208 * Try to invalidate the dentry if it turns out to be
209 * possible. If there are other dentries that can be
210 * reached through this one we can't delete it and we
211 * return -EBUSY. On success we return 0.
216 int d_invalidate(struct dentry * dentry)
219 * If it's already been dropped, return OK.
221 spin_lock(&dcache_lock);
222 if (d_unhashed(dentry)) {
223 spin_unlock(&dcache_lock);
227 * Check whether to do a partial shrink_dcache
228 * to get rid of unused child entries.
230 if (!list_empty(&dentry->d_subdirs)) {
231 spin_unlock(&dcache_lock);
232 shrink_dcache_parent(dentry);
233 spin_lock(&dcache_lock);
237 * Somebody else still using it?
239 * If it's a directory, we can't drop it
240 * for fear of somebody re-populating it
241 * with children (even though dropping it
242 * would make it unreachable from the root,
243 * we might still populate it if it was a
244 * working directory or similar).
246 spin_lock(&dentry->d_lock);
247 if (atomic_read(&dentry->d_count) > 1) {
248 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
249 spin_unlock(&dentry->d_lock);
250 spin_unlock(&dcache_lock);
256 spin_unlock(&dentry->d_lock);
257 spin_unlock(&dcache_lock);
261 /* This should be called _only_ with dcache_lock held */
263 static inline struct dentry * __dget_locked(struct dentry *dentry)
265 atomic_inc(&dentry->d_count);
266 if (!list_empty(&dentry->d_lru)) {
267 dentry_stat.nr_unused--;
268 list_del_init(&dentry->d_lru);
273 struct dentry * dget_locked(struct dentry *dentry)
275 return __dget_locked(dentry);
279 * d_find_alias - grab a hashed alias of inode
280 * @inode: inode in question
282 * If inode has a hashed alias - acquire the reference to alias and
283 * return it. Otherwise return NULL. Notice that if inode is a directory
284 * there can be only one alias and it can be unhashed only if it has
287 * If the inode has a DCACHE_DISCONNECTED alias, then prefer
288 * any other hashed alias over that one.
291 struct dentry * d_find_alias(struct inode *inode)
293 struct list_head *head, *next, *tmp;
294 struct dentry *alias, *discon_alias=NULL;
296 spin_lock(&dcache_lock);
297 head = &inode->i_dentry;
298 next = inode->i_dentry.next;
299 while (next != head) {
303 alias = list_entry(tmp, struct dentry, d_alias);
304 if (!d_unhashed(alias)) {
305 if (alias->d_flags & DCACHE_DISCONNECTED)
306 discon_alias = alias;
308 __dget_locked(alias);
309 spin_unlock(&dcache_lock);
315 __dget_locked(discon_alias);
316 spin_unlock(&dcache_lock);
321 * Try to kill dentries associated with this inode.
322 * WARNING: you must own a reference to inode.
324 void d_prune_aliases(struct inode *inode)
326 struct list_head *tmp, *head = &inode->i_dentry;
328 spin_lock(&dcache_lock);
330 while ((tmp = tmp->next) != head) {
331 struct dentry *dentry = list_entry(tmp, struct dentry, d_alias);
332 if (!atomic_read(&dentry->d_count)) {
333 __dget_locked(dentry);
335 spin_unlock(&dcache_lock);
340 spin_unlock(&dcache_lock);
344 * Throw away a dentry - free the inode, dput the parent.
345 * This requires that the LRU list has already been
347 * Called with dcache_lock, drops it and then regains.
349 static inline void prune_one_dentry(struct dentry * dentry)
351 struct dentry * parent;
354 list_del(&dentry->d_child);
355 dentry_stat.nr_dentry--; /* For d_free, below */
357 parent = dentry->d_parent;
359 if (parent != dentry)
361 spin_lock(&dcache_lock);
365 * prune_dcache - shrink the dcache
366 * @count: number of entries to try and free
368 * Shrink the dcache. This is done when we need
369 * more memory, or simply when we need to unmount
370 * something (at which point we need to unuse
373 * This function may fail to free any resources if
374 * all the dentries are in use.
377 static void prune_dcache(int count)
379 spin_lock(&dcache_lock);
380 for (; count ; count--) {
381 struct dentry *dentry;
382 struct list_head *tmp;
384 tmp = dentry_unused.prev;
385 if (tmp == &dentry_unused)
388 prefetch(dentry_unused.prev);
389 dentry_stat.nr_unused--;
390 dentry = list_entry(tmp, struct dentry, d_lru);
392 spin_lock(&dentry->d_lock);
394 * We found an inuse dentry which was not removed from
395 * dentry_unused because of laziness during lookup. Do not free
396 * it - just keep it off the dentry_unused list.
398 if (atomic_read(&dentry->d_count)) {
399 spin_unlock(&dentry->d_lock);
402 /* If the dentry was recently referenced, don't free it. */
403 if (dentry->d_flags & DCACHE_REFERENCED) {
404 dentry->d_flags &= ~DCACHE_REFERENCED;
405 list_add(&dentry->d_lru, &dentry_unused);
406 dentry_stat.nr_unused++;
407 spin_unlock(&dentry->d_lock);
410 prune_one_dentry(dentry);
412 spin_unlock(&dcache_lock);
416 * Shrink the dcache for the specified super block.
417 * This allows us to unmount a device without disturbing
418 * the dcache for the other devices.
420 * This implementation makes just two traversals of the
421 * unused list. On the first pass we move the selected
422 * dentries to the most recent end, and on the second
423 * pass we free them. The second pass must restart after
424 * each dput(), but since the target dentries are all at
425 * the end, it's really just a single traversal.
429 * shrink_dcache_sb - shrink dcache for a superblock
432 * Shrink the dcache for the specified super block. This
433 * is used to free the dcache before unmounting a file
437 void shrink_dcache_sb(struct super_block * sb)
439 struct list_head *tmp, *next;
440 struct dentry *dentry;
443 * Pass one ... move the dentries for the specified
444 * superblock to the most recent end of the unused list.
446 spin_lock(&dcache_lock);
447 next = dentry_unused.next;
448 while (next != &dentry_unused) {
451 dentry = list_entry(tmp, struct dentry, d_lru);
452 if (dentry->d_sb != sb)
455 list_add(tmp, &dentry_unused);
459 * Pass two ... free the dentries for this superblock.
462 next = dentry_unused.next;
463 while (next != &dentry_unused) {
466 dentry = list_entry(tmp, struct dentry, d_lru);
467 if (dentry->d_sb != sb)
469 dentry_stat.nr_unused--;
471 spin_lock(&dentry->d_lock);
472 if (atomic_read(&dentry->d_count)) {
473 spin_unlock(&dentry->d_lock);
476 prune_one_dentry(dentry);
479 spin_unlock(&dcache_lock);
483 * Search for at least 1 mount point in the dentry's subdirs.
484 * We descend to the next level whenever the d_subdirs
485 * list is non-empty and continue searching.
489 * have_submounts - check for mounts over a dentry
490 * @parent: dentry to check.
492 * Return true if the parent or its subdirectories contain
496 int have_submounts(struct dentry *parent)
498 struct dentry *this_parent = parent;
499 struct list_head *next;
501 spin_lock(&dcache_lock);
502 if (d_mountpoint(parent))
505 next = this_parent->d_subdirs.next;
507 while (next != &this_parent->d_subdirs) {
508 struct list_head *tmp = next;
509 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
511 /* Have we found a mount point ? */
512 if (d_mountpoint(dentry))
514 if (!list_empty(&dentry->d_subdirs)) {
515 this_parent = dentry;
520 * All done at this level ... ascend and resume the search.
522 if (this_parent != parent) {
523 next = this_parent->d_child.next;
524 this_parent = this_parent->d_parent;
527 spin_unlock(&dcache_lock);
528 return 0; /* No mount points found in tree */
530 spin_unlock(&dcache_lock);
535 * Search the dentry child list for the specified parent,
536 * and move any unused dentries to the end of the unused
537 * list for prune_dcache(). We descend to the next level
538 * whenever the d_subdirs list is non-empty and continue
541 static int select_parent(struct dentry * parent)
543 struct dentry *this_parent = parent;
544 struct list_head *next;
547 spin_lock(&dcache_lock);
549 next = this_parent->d_subdirs.next;
551 while (next != &this_parent->d_subdirs) {
552 struct list_head *tmp = next;
553 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
556 if (!list_empty(&dentry->d_lru)) {
557 dentry_stat.nr_unused--;
558 list_del_init(&dentry->d_lru);
561 * move only zero ref count dentries to the end
562 * of the unused list for prune_dcache
564 if (!atomic_read(&dentry->d_count)) {
565 list_add(&dentry->d_lru, dentry_unused.prev);
566 dentry_stat.nr_unused++;
570 * Descend a level if the d_subdirs list is non-empty.
572 if (!list_empty(&dentry->d_subdirs)) {
573 this_parent = dentry;
575 printk(KERN_DEBUG "select_parent: descending to %s/%s, found=%d\n",
576 dentry->d_parent->d_name.name, dentry->d_name.name, found);
582 * All done at this level ... ascend and resume the search.
584 if (this_parent != parent) {
585 next = this_parent->d_child.next;
586 this_parent = this_parent->d_parent;
588 printk(KERN_DEBUG "select_parent: ascending to %s/%s, found=%d\n",
589 this_parent->d_parent->d_name.name, this_parent->d_name.name, found);
593 spin_unlock(&dcache_lock);
598 * shrink_dcache_parent - prune dcache
599 * @parent: parent of entries to prune
601 * Prune the dcache to remove unused children of the parent dentry.
604 void shrink_dcache_parent(struct dentry * parent)
608 while ((found = select_parent(parent)) != 0)
613 * shrink_dcache_anon - further prune the cache
614 * @head: head of d_hash list of dentries to prune
616 * Prune the dentries that are anonymous
618 * parsing d_hash list does not read_barrier_depends() as it
619 * done under dcache_lock.
622 void shrink_dcache_anon(struct hlist_head *head)
624 struct hlist_node *lp;
628 spin_lock(&dcache_lock);
629 hlist_for_each(lp, head) {
630 struct dentry *this = hlist_entry(lp, struct dentry, d_hash);
631 if (!list_empty(&this->d_lru)) {
632 dentry_stat.nr_unused--;
633 list_del(&this->d_lru);
637 * move only zero ref count dentries to the end
638 * of the unused list for prune_dcache
640 if (!atomic_read(&this->d_count)) {
641 list_add_tail(&this->d_lru, &dentry_unused);
642 dentry_stat.nr_unused++;
646 spin_unlock(&dcache_lock);
652 * Scan `nr' dentries and return the number which remain.
654 * We need to avoid reentering the filesystem if the caller is performing a
655 * GFP_NOFS allocation attempt. One example deadlock is:
657 * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache->
658 * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->put_inode->
659 * ext2_discard_prealloc->ext2_free_blocks->lock_super->DEADLOCK.
661 * In this case we return -1 to tell the caller that we baled.
663 static int shrink_dcache_memory(int nr, unsigned int gfp_mask)
666 if (!(gfp_mask & __GFP_FS))
670 return dentry_stat.nr_unused;
674 * d_alloc - allocate a dcache entry
675 * @parent: parent of entry to allocate
676 * @name: qstr of the name
678 * Allocates a dentry. It returns %NULL if there is insufficient memory
679 * available. On a success the dentry is returned. The name passed in is
680 * copied and the copy passed in may be reused after this call.
683 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
685 struct dentry *dentry;
688 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
692 if (name->len > DNAME_INLINE_LEN-1) {
693 dname = kmalloc(name->len + 1, GFP_KERNEL);
695 kmem_cache_free(dentry_cache, dentry);
699 dname = dentry->d_iname;
701 dentry->d_name.name = dname;
703 dentry->d_name.len = name->len;
704 dentry->d_name.hash = name->hash;
705 memcpy(dname, name->name, name->len);
706 dname[name->len] = 0;
708 atomic_set(&dentry->d_count, 1);
709 dentry->d_flags = DCACHE_UNHASHED;
710 dentry->d_lock = SPIN_LOCK_UNLOCKED;
711 dentry->d_inode = NULL;
712 dentry->d_parent = NULL;
715 dentry->d_fsdata = NULL;
716 dentry->d_extra_attributes = NULL;
717 dentry->d_mounted = 0;
718 dentry->d_cookie = NULL;
719 dentry->d_bucket = NULL;
720 INIT_HLIST_NODE(&dentry->d_hash);
721 INIT_LIST_HEAD(&dentry->d_lru);
722 INIT_LIST_HEAD(&dentry->d_subdirs);
723 INIT_LIST_HEAD(&dentry->d_alias);
726 dentry->d_parent = dget(parent);
727 dentry->d_sb = parent->d_sb;
729 INIT_LIST_HEAD(&dentry->d_child);
732 spin_lock(&dcache_lock);
734 list_add(&dentry->d_child, &parent->d_subdirs);
735 dentry_stat.nr_dentry++;
736 spin_unlock(&dcache_lock);
742 * d_instantiate - fill in inode information for a dentry
743 * @entry: dentry to complete
744 * @inode: inode to attach to this dentry
746 * Fill in inode information in the entry.
748 * This turns negative dentries into productive full members
751 * NOTE! This assumes that the inode count has been incremented
752 * (or otherwise set) by the caller to indicate that it is now
753 * in use by the dcache.
756 void d_instantiate(struct dentry *entry, struct inode * inode)
758 if (!list_empty(&entry->d_alias)) BUG();
759 spin_lock(&dcache_lock);
761 list_add(&entry->d_alias, &inode->i_dentry);
762 entry->d_inode = inode;
763 spin_unlock(&dcache_lock);
764 security_d_instantiate(entry, inode);
768 * d_alloc_root - allocate root dentry
769 * @root_inode: inode to allocate the root for
771 * Allocate a root ("/") dentry for the inode given. The inode is
772 * instantiated and returned. %NULL is returned if there is insufficient
773 * memory or the inode passed is %NULL.
776 struct dentry * d_alloc_root(struct inode * root_inode)
778 struct dentry *res = NULL;
781 static const struct qstr name = { .name = "/", .len = 1 };
783 res = d_alloc(NULL, &name);
785 res->d_sb = root_inode->i_sb;
787 d_instantiate(res, root_inode);
793 static inline struct hlist_head *d_hash(struct dentry *parent,
796 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
797 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
798 return dentry_hashtable + (hash & D_HASHMASK);
802 * d_alloc_anon - allocate an anonymous dentry
803 * @inode: inode to allocate the dentry for
805 * This is similar to d_alloc_root. It is used by filesystems when
806 * creating a dentry for a given inode, often in the process of
807 * mapping a filehandle to a dentry. The returned dentry may be
808 * anonymous, or may have a full name (if the inode was already
809 * in the cache). The file system may need to make further
810 * efforts to connect this dentry into the dcache properly.
812 * When called on a directory inode, we must ensure that
813 * the inode only ever has one dentry. If a dentry is
814 * found, that is returned instead of allocating a new one.
816 * On successful return, the reference to the inode has been transferred
817 * to the dentry. If %NULL is returned (indicating kmalloc failure),
818 * the reference on the inode has not been released.
821 struct dentry * d_alloc_anon(struct inode *inode)
823 static const struct qstr anonstring = { .name = "" };
827 if ((res = d_find_alias(inode))) {
832 tmp = d_alloc(NULL, &anonstring);
836 tmp->d_parent = tmp; /* make sure dput doesn't croak */
838 spin_lock(&dcache_lock);
839 if (S_ISDIR(inode->i_mode) && !list_empty(&inode->i_dentry)) {
840 /* A directory can only have one dentry.
841 * This (now) has one, so use it.
843 res = list_entry(inode->i_dentry.next, struct dentry, d_alias);
846 /* attach a disconnected dentry */
850 spin_lock(&res->d_lock);
851 res->d_sb = inode->i_sb;
853 res->d_inode = inode;
856 * Set d_bucket to an "impossible" bucket address so
857 * that d_move() doesn't get a false positive
859 res->d_bucket = NULL;
860 res->d_flags |= DCACHE_DISCONNECTED;
861 res->d_flags &= ~DCACHE_UNHASHED;
862 list_add(&res->d_alias, &inode->i_dentry);
863 hlist_add_head(&res->d_hash, &inode->i_sb->s_anon);
864 spin_unlock(&res->d_lock);
866 inode = NULL; /* don't drop reference */
868 spin_unlock(&dcache_lock);
879 * d_splice_alias - splice a disconnected dentry into the tree if one exists
880 * @inode: the inode which may have a disconnected dentry
881 * @dentry: a negative dentry which we want to point to the inode.
883 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and
884 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry
885 * and return it, else simply d_add the inode to the dentry and return NULL.
887 * This is (will be) needed in the lookup routine of any filesystem that is exportable
888 * (via knfsd) so that we can build dcache paths to directories effectively.
890 * If a dentry was found and moved, then it is returned. Otherwise NULL
891 * is returned. This matches the expected return value of ->lookup.
894 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
896 struct dentry *new = NULL;
898 if (inode && S_ISDIR(inode->i_mode)) {
899 spin_lock(&dcache_lock);
900 if (!list_empty(&inode->i_dentry)) {
901 new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
903 spin_unlock(&dcache_lock);
904 security_d_instantiate(new, inode);
909 /* d_instantiate takes dcache_lock, so we do it by hand */
910 list_add(&dentry->d_alias, &inode->i_dentry);
911 dentry->d_inode = inode;
912 spin_unlock(&dcache_lock);
913 security_d_instantiate(dentry, inode);
917 d_add(dentry, inode);
923 * d_lookup - search for a dentry
924 * @parent: parent dentry
925 * @name: qstr of name we wish to find
927 * Searches the children of the parent dentry for the name in question. If
928 * the dentry is found its reference count is incremented and the dentry
929 * is returned. The caller must use d_put to free the entry when it has
930 * finished using it. %NULL is returned on failure.
932 * __d_lookup is dcache_lock free. The hash list is protected using RCU.
933 * Memory barriers are used while updating and doing lockless traversal.
934 * To avoid races with d_move while rename is happening, d_lock is used.
936 * Overflows in memcmp(), while d_move, are avoided by keeping the length
937 * and name pointer in one structure pointed by d_qstr.
939 * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while
940 * lookup is going on.
942 * dentry_unused list is not updated even if lookup finds the required dentry
943 * in there. It is updated in places such as prune_dcache, shrink_dcache_sb,
944 * select_parent and __dget_locked. This laziness saves lookup from dcache_lock
947 * d_lookup() is protected against the concurrent renames in some unrelated
948 * directory using the seqlockt_t rename_lock.
951 struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
953 struct dentry * dentry = NULL;
957 seq = read_seqbegin(&rename_lock);
958 dentry = __d_lookup(parent, name);
961 } while (read_seqretry(&rename_lock, seq));
965 struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
967 unsigned int len = name->len;
968 unsigned int hash = name->hash;
969 const unsigned char *str = name->name;
970 struct hlist_head *head = d_hash(parent,hash);
971 struct dentry *found = NULL;
972 struct hlist_node *node;
976 hlist_for_each (node, head) {
977 struct dentry *dentry;
980 smp_read_barrier_depends();
981 dentry = hlist_entry(node, struct dentry, d_hash);
985 if (dentry->d_name.hash != hash)
987 if (dentry->d_parent != parent)
990 spin_lock(&dentry->d_lock);
993 * If lookup ends up in a different bucket due to concurrent
996 if (unlikely(dentry->d_bucket != head))
1000 * Recheck the dentry after taking the lock - d_move may have
1001 * changed things. Don't bother checking the hash because we're
1002 * about to compare the whole name anyway.
1004 if (dentry->d_parent != parent)
1007 qstr = &dentry->d_name;
1008 smp_read_barrier_depends();
1009 if (parent->d_op && parent->d_op->d_compare) {
1010 if (parent->d_op->d_compare(parent, qstr, name))
1013 if (qstr->len != len)
1015 if (memcmp(qstr->name, str, len))
1019 if (!d_unhashed(dentry)) {
1020 atomic_inc(&dentry->d_count);
1024 spin_unlock(&dentry->d_lock);
1027 spin_unlock(&dentry->d_lock);
1035 * d_validate - verify dentry provided from insecure source
1036 * @dentry: The dentry alleged to be valid child of @dparent
1037 * @dparent: The parent dentry (known to be valid)
1038 * @hash: Hash of the dentry
1039 * @len: Length of the name
1041 * An insecure source has sent us a dentry, here we verify it and dget() it.
1042 * This is used by ncpfs in its readdir implementation.
1043 * Zero is returned in the dentry is invalid.
1046 int d_validate(struct dentry *dentry, struct dentry *dparent)
1048 struct hlist_head *base;
1049 struct hlist_node *lhp;
1051 /* Check whether the ptr might be valid at all.. */
1052 if (!kmem_ptr_validate(dentry_cache, dentry))
1055 if (dentry->d_parent != dparent)
1058 spin_lock(&dcache_lock);
1059 base = d_hash(dparent, dentry->d_name.hash);
1060 hlist_for_each(lhp,base) {
1061 /* read_barrier_depends() not required for d_hash list
1062 * as it is parsed under dcache_lock
1064 if (dentry == hlist_entry(lhp, struct dentry, d_hash)) {
1065 __dget_locked(dentry);
1066 spin_unlock(&dcache_lock);
1070 spin_unlock(&dcache_lock);
1076 * When a file is deleted, we have two options:
1077 * - turn this dentry into a negative dentry
1078 * - unhash this dentry and free it.
1080 * Usually, we want to just turn this into
1081 * a negative dentry, but if anybody else is
1082 * currently using the dentry or the inode
1083 * we can't do that and we fall back on removing
1084 * it from the hash queues and waiting for
1085 * it to be deleted later when it has no users
1089 * d_delete - delete a dentry
1090 * @dentry: The dentry to delete
1092 * Turn the dentry into a negative dentry if possible, otherwise
1093 * remove it from the hash queues so it can be deleted later
1096 void d_delete(struct dentry * dentry)
1099 * Are we the only user?
1101 spin_lock(&dcache_lock);
1102 spin_lock(&dentry->d_lock);
1103 if (atomic_read(&dentry->d_count) == 1) {
1104 dentry_iput(dentry);
1108 if (!d_unhashed(dentry))
1111 spin_unlock(&dentry->d_lock);
1112 spin_unlock(&dcache_lock);
1116 * d_rehash - add an entry back to the hash
1117 * @entry: dentry to add to the hash
1119 * Adds a dentry to the hash according to its name.
1122 void d_rehash(struct dentry * entry)
1124 struct hlist_head *list = d_hash(entry->d_parent, entry->d_name.hash);
1126 spin_lock(&dcache_lock);
1127 spin_lock(&entry->d_lock);
1128 entry->d_flags &= ~DCACHE_UNHASHED;
1129 spin_unlock(&entry->d_lock);
1130 entry->d_bucket = list;
1131 hlist_add_head_rcu(&entry->d_hash, list);
1132 spin_unlock(&dcache_lock);
1135 #define do_switch(x,y) do { \
1136 __typeof__ (x) __tmp = x; \
1137 x = y; y = __tmp; } while (0)
1140 * When switching names, the actual string doesn't strictly have to
1141 * be preserved in the target - because we're dropping the target
1142 * anyway. As such, we can just do a simple memcpy() to copy over
1143 * the new name before we switch.
1145 * Note that we have to be a lot more careful about getting the hash
1146 * switched - we have to switch the hash value properly even if it
1147 * then no longer matches the actual (corrupted) string of the target.
1148 * The hash value has to match the hash queue that the dentry is on..
1150 static void switch_names(struct dentry *dentry, struct dentry *target)
1152 if (dname_external(target)) {
1153 if (dname_external(dentry)) {
1155 * Both external: swap the pointers
1157 do_switch(target->d_name.name, dentry->d_name.name);
1160 * dentry:internal, target:external. Steal target's
1161 * storage and make target internal.
1163 dentry->d_name.name = target->d_name.name;
1164 target->d_name.name = target->d_iname;
1167 if (dname_external(dentry)) {
1169 * dentry:external, target:internal. Give dentry's
1170 * storage to target and make dentry internal
1172 memcpy(dentry->d_iname, target->d_name.name,
1173 target->d_name.len + 1);
1174 target->d_name.name = dentry->d_name.name;
1175 dentry->d_name.name = dentry->d_iname;
1178 * Both are internal. Just copy target to dentry
1180 memcpy(dentry->d_iname, target->d_name.name,
1181 target->d_name.len + 1);
1187 * We cannibalize "target" when moving dentry on top of it,
1188 * because it's going to be thrown away anyway. We could be more
1189 * polite about it, though.
1191 * This forceful removal will result in ugly /proc output if
1192 * somebody holds a file open that got deleted due to a rename.
1193 * We could be nicer about the deleted file, and let it show
1194 * up under the name it got deleted rather than the name that
1199 * d_move - move a dentry
1200 * @dentry: entry to move
1201 * @target: new dentry
1203 * Update the dcache to reflect the move of a file name. Negative
1204 * dcache entries should not be moved in this way.
1207 void d_move(struct dentry * dentry, struct dentry * target)
1209 if (!dentry->d_inode)
1210 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
1212 spin_lock(&dcache_lock);
1213 write_seqlock(&rename_lock);
1215 * XXXX: do we really need to take target->d_lock?
1217 if (target < dentry) {
1218 spin_lock(&target->d_lock);
1219 spin_lock(&dentry->d_lock);
1221 spin_lock(&dentry->d_lock);
1222 spin_lock(&target->d_lock);
1225 /* Move the dentry to the target hash queue, if on different bucket */
1226 if (dentry->d_flags & DCACHE_UNHASHED)
1227 goto already_unhashed;
1228 if (dentry->d_bucket != target->d_bucket) {
1229 hlist_del_rcu(&dentry->d_hash);
1231 dentry->d_bucket = target->d_bucket;
1232 hlist_add_head_rcu(&dentry->d_hash, target->d_bucket);
1233 dentry->d_flags &= ~DCACHE_UNHASHED;
1236 /* Unhash the target: dput() will then get rid of it */
1239 /* flush any possible attributes */
1240 if (dentry->d_extra_attributes) {
1241 kfree(dentry->d_extra_attributes);
1242 dentry->d_extra_attributes = NULL;
1244 if (target->d_extra_attributes) {
1245 kfree(target->d_extra_attributes);
1246 target->d_extra_attributes = NULL;
1249 list_del(&dentry->d_child);
1250 list_del(&target->d_child);
1252 /* Switch the names.. */
1253 switch_names(dentry, target);
1255 do_switch(dentry->d_name.len, target->d_name.len);
1256 do_switch(dentry->d_name.hash, target->d_name.hash);
1258 /* ... and switch the parents */
1259 if (IS_ROOT(dentry)) {
1260 dentry->d_parent = target->d_parent;
1261 target->d_parent = target;
1262 INIT_LIST_HEAD(&target->d_child);
1264 do_switch(dentry->d_parent, target->d_parent);
1266 /* And add them back to the (new) parent lists */
1267 list_add(&target->d_child, &target->d_parent->d_subdirs);
1270 list_add(&dentry->d_child, &dentry->d_parent->d_subdirs);
1271 spin_unlock(&target->d_lock);
1272 spin_unlock(&dentry->d_lock);
1273 write_sequnlock(&rename_lock);
1274 spin_unlock(&dcache_lock);
1278 * d_path - return the path of a dentry
1279 * @dentry: dentry to report
1280 * @vfsmnt: vfsmnt to which the dentry belongs
1281 * @root: root dentry
1282 * @rootmnt: vfsmnt to which the root dentry belongs
1283 * @buffer: buffer to return value in
1284 * @buflen: buffer length
1286 * Convert a dentry into an ASCII path name. If the entry has been deleted
1287 * the string " (deleted)" is appended. Note that this is ambiguous.
1289 * Returns the buffer or an error code if the path was too long.
1291 * "buflen" should be positive. Caller holds the dcache_lock.
1293 char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
1294 struct dentry *root, struct vfsmount *rootmnt,
1295 char *buffer, int buflen)
1297 char * end = buffer+buflen;
1303 if (!IS_ROOT(dentry) && d_unhashed(dentry)) {
1308 memcpy(end, " (deleted)", 10);
1318 struct dentry * parent;
1320 if (dentry == root && vfsmnt == rootmnt)
1322 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
1324 spin_lock(&vfsmount_lock);
1325 if (vfsmnt->mnt_parent == vfsmnt) {
1326 spin_unlock(&vfsmount_lock);
1329 dentry = vfsmnt->mnt_mountpoint;
1330 vfsmnt = vfsmnt->mnt_parent;
1331 spin_unlock(&vfsmount_lock);
1334 parent = dentry->d_parent;
1336 namelen = dentry->d_name.len;
1337 buflen -= namelen + 1;
1341 memcpy(end, dentry->d_name.name, namelen);
1350 namelen = dentry->d_name.len;
1354 retval -= namelen-1; /* hit the slash */
1355 memcpy(retval, dentry->d_name.name, namelen);
1358 return ERR_PTR(-ENAMETOOLONG);
1361 EXPORT_SYMBOL_GPL(__d_path);
1363 /* write full pathname into buffer and return start of pathname */
1364 char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
1365 char *buf, int buflen)
1368 struct vfsmount *rootmnt;
1369 struct dentry *root;
1371 read_lock(¤t->fs->lock);
1372 rootmnt = mntget(current->fs->rootmnt);
1373 root = dget(current->fs->root);
1374 read_unlock(¤t->fs->lock);
1375 spin_lock(&dcache_lock);
1376 res = __d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
1377 spin_unlock(&dcache_lock);
1384 * NOTE! The user-level library version returns a
1385 * character pointer. The kernel system call just
1386 * returns the length of the buffer filled (which
1387 * includes the ending '\0' character), or a negative
1388 * error value. So libc would do something like
1390 * char *getcwd(char * buf, size_t size)
1394 * retval = sys_getcwd(buf, size);
1401 asmlinkage long sys_getcwd(char __user *buf, unsigned long size)
1404 struct vfsmount *pwdmnt, *rootmnt;
1405 struct dentry *pwd, *root;
1406 char *page = (char *) __get_free_page(GFP_USER);
1411 read_lock(¤t->fs->lock);
1412 pwdmnt = mntget(current->fs->pwdmnt);
1413 pwd = dget(current->fs->pwd);
1414 rootmnt = mntget(current->fs->rootmnt);
1415 root = dget(current->fs->root);
1416 read_unlock(¤t->fs->lock);
1419 /* Has the current directory has been unlinked? */
1420 spin_lock(&dcache_lock);
1421 if (pwd->d_parent == pwd || !d_unhashed(pwd)) {
1425 cwd = __d_path(pwd, pwdmnt, root, rootmnt, page, PAGE_SIZE);
1426 spin_unlock(&dcache_lock);
1428 error = PTR_ERR(cwd);
1433 len = PAGE_SIZE + page - cwd;
1436 if (copy_to_user(buf, cwd, len))
1440 spin_unlock(&dcache_lock);
1447 free_page((unsigned long) page);
1452 * Test whether new_dentry is a subdirectory of old_dentry.
1454 * Trivially implemented using the dcache structure
1458 * is_subdir - is new dentry a subdirectory of old_dentry
1459 * @new_dentry: new dentry
1460 * @old_dentry: old dentry
1462 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
1463 * Returns 0 otherwise.
1464 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
1467 int is_subdir(struct dentry * new_dentry, struct dentry * old_dentry)
1470 struct dentry * saved = new_dentry;
1474 /* need rcu_readlock to protect against the d_parent trashing due to
1479 /* for restarting inner loop in case of seq retry */
1481 seq = read_seqbegin(&rename_lock);
1483 if (new_dentry != old_dentry) {
1484 struct dentry * parent = new_dentry->d_parent;
1485 if (parent == new_dentry)
1487 new_dentry = parent;
1493 } while (read_seqretry(&rename_lock, seq));
1499 void d_genocide(struct dentry *root)
1501 struct dentry *this_parent = root;
1502 struct list_head *next;
1504 spin_lock(&dcache_lock);
1506 next = this_parent->d_subdirs.next;
1508 while (next != &this_parent->d_subdirs) {
1509 struct list_head *tmp = next;
1510 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1512 if (d_unhashed(dentry)||!dentry->d_inode)
1514 if (!list_empty(&dentry->d_subdirs)) {
1515 this_parent = dentry;
1518 atomic_dec(&dentry->d_count);
1520 if (this_parent != root) {
1521 next = this_parent->d_child.next;
1522 atomic_dec(&this_parent->d_count);
1523 this_parent = this_parent->d_parent;
1526 spin_unlock(&dcache_lock);
1530 * find_inode_number - check for dentry with name
1531 * @dir: directory to check
1532 * @name: Name to find.
1534 * Check whether a dentry already exists for the given name,
1535 * and return the inode number if it has an inode. Otherwise
1538 * This routine is used to post-process directory listings for
1539 * filesystems using synthetic inode numbers, and is necessary
1540 * to keep getcwd() working.
1543 ino_t find_inode_number(struct dentry *dir, struct qstr *name)
1545 struct dentry * dentry;
1549 * Check for a fs-specific hash function. Note that we must
1550 * calculate the standard hash first, as the d_op->d_hash()
1551 * routine may choose to leave the hash value unchanged.
1553 name->hash = full_name_hash(name->name, name->len);
1554 if (dir->d_op && dir->d_op->d_hash)
1556 if (dir->d_op->d_hash(dir, name) != 0)
1560 dentry = d_lookup(dir, name);
1563 if (dentry->d_inode)
1564 ino = dentry->d_inode->i_ino;
1571 static __initdata unsigned long dhash_entries;
1572 static int __init set_dhash_entries(char *str)
1576 dhash_entries = simple_strtoul(str, &str, 0);
1579 __setup("dhash_entries=", set_dhash_entries);
1581 void flush_dentry_attributes (void)
1583 struct hlist_node *tmp;
1584 struct dentry *dentry;
1587 spin_lock(&dcache_lock);
1588 for (i = 0; i <= d_hash_mask; i++)
1589 hlist_for_each_entry(dentry, tmp, dentry_hashtable+i, d_hash) {
1590 kfree(dentry->d_extra_attributes);
1591 dentry->d_extra_attributes = NULL;
1593 spin_unlock(&dcache_lock);
1596 EXPORT_SYMBOL_GPL(flush_dentry_attributes);
1598 static void __init dcache_init(unsigned long mempages)
1600 struct hlist_head *d;
1601 unsigned long order;
1602 unsigned int nr_hash;
1606 * A constructor could be added for stable state like the lists,
1607 * but it is probably not worth it because of the cache nature
1610 dentry_cache = kmem_cache_create("dentry_cache",
1611 sizeof(struct dentry),
1613 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC,
1616 set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory);
1619 dhash_entries = PAGE_SHIFT < 13 ?
1620 mempages >> (13 - PAGE_SHIFT) :
1621 mempages << (PAGE_SHIFT - 13);
1623 dhash_entries *= sizeof(struct hlist_head);
1624 for (order = 0; ((1UL << order) << PAGE_SHIFT) < dhash_entries; order++)
1633 nr_hash = (1UL << order) * PAGE_SIZE /
1634 sizeof(struct hlist_head);
1635 d_hash_mask = (nr_hash - 1);
1639 while ((tmp >>= 1UL) != 0UL)
1642 dentry_hashtable = (struct hlist_head *)
1643 __get_free_pages(GFP_ATOMIC, order);
1644 } while (dentry_hashtable == NULL && --order >= 0);
1646 printk(KERN_INFO "Dentry cache hash table entries: %d (order: %ld, %ld bytes)\n",
1647 nr_hash, order, (PAGE_SIZE << order));
1649 if (!dentry_hashtable)
1650 panic("Failed to allocate dcache hash table\n");
1652 d = dentry_hashtable;
1661 /* SLAB cache for __getname() consumers */
1662 kmem_cache_t *names_cachep;
1664 /* SLAB cache for file structures */
1665 kmem_cache_t *filp_cachep;
1667 EXPORT_SYMBOL(d_genocide);
1669 extern void bdev_cache_init(void);
1670 extern void chrdev_init(void);
1672 void __init vfs_caches_init(unsigned long mempages)
1674 unsigned long reserve;
1676 /* Base hash sizes on available memory, with a reserve equal to
1677 150% of current kernel size */
1679 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
1680 mempages -= reserve;
1682 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
1683 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1685 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
1686 SLAB_HWCACHE_ALIGN|SLAB_PANIC, filp_ctor, filp_dtor);
1688 dcache_init(mempages);
1689 inode_init(mempages);
1690 files_init(mempages);
1696 EXPORT_SYMBOL(d_alloc);
1697 EXPORT_SYMBOL(d_alloc_anon);
1698 EXPORT_SYMBOL(d_alloc_root);
1699 EXPORT_SYMBOL(d_delete);
1700 EXPORT_SYMBOL(d_find_alias);
1701 EXPORT_SYMBOL(d_instantiate);
1702 EXPORT_SYMBOL(d_invalidate);
1703 EXPORT_SYMBOL(d_lookup);
1704 EXPORT_SYMBOL(d_move);
1705 EXPORT_SYMBOL(d_path);
1706 EXPORT_SYMBOL(d_prune_aliases);
1707 EXPORT_SYMBOL(d_rehash);
1708 EXPORT_SYMBOL(d_splice_alias);
1709 EXPORT_SYMBOL(d_validate);
1710 EXPORT_SYMBOL(dget_locked);
1711 EXPORT_SYMBOL(dput);
1712 EXPORT_SYMBOL(find_inode_number);
1713 EXPORT_SYMBOL(have_submounts);
1714 EXPORT_SYMBOL(is_subdir);
1715 EXPORT_SYMBOL(names_cachep);
1716 EXPORT_SYMBOL(shrink_dcache_anon);
1717 EXPORT_SYMBOL(shrink_dcache_parent);
1718 EXPORT_SYMBOL(shrink_dcache_sb);