X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=fs%2Fnamei.c;fp=fs%2Fnamei.c;h=49ce7ff26095b32e14ed186ab9b24307ec24c010;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=b9d0804bb3833d38a4b312001e19e92fd921b542;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/fs/namei.c b/fs/namei.c index b9d0804bb..49ce7ff26 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -107,7 +107,7 @@ */ /* * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland) - * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives + * implemented. Let's see if raised priority of ->s_vfs_rename_sem gives * any extra contention... */ @@ -162,7 +162,7 @@ char * getname(const char __user * filename) #ifdef CONFIG_AUDITSYSCALL void putname(const char *name) { - if (unlikely(!audit_dummy_context())) + if (unlikely(current->audit_context)) audit_putname(name); else __putname(name); @@ -230,7 +230,7 @@ int generic_permission(struct inode *inode, int mask, static inline int vx_barrier(struct inode *inode) { - if (IS_BARRIER(inode) && !vx_check(0, VX_ADMIN|VX_WATCH)) { + if (IS_BARRIER(inode) && !vx_check(0, VX_ADMIN|VX_IDENT)) { vxwprintk(1, "xid=%d did hit the barrier.", vx_current_xid()); return 1; @@ -255,10 +255,10 @@ static inline int xid_permission(struct inode *inode, int mask, struct nameidata int permission(struct inode *inode, int mask, struct nameidata *nd) { - umode_t mode = inode->i_mode; int retval, submask; if (mask & MAY_WRITE) { + umode_t mode = inode->i_mode; /* * Nobody gets write access to a read-only fs. @@ -275,13 +275,6 @@ int permission(struct inode *inode, int mask, struct nameidata *nd) } - /* - * MAY_EXEC on regular files requires special handling: We override - * filesystem execute permissions if the mode bits aren't set. - */ - if ((mask & MAY_EXEC) && S_ISREG(mode) && !(mode & S_IXUGO)) - return -EACCES; - /* Ordinary permission routines do not understand MAY_APPEND. */ submask = mask & ~MAY_APPEND; if ((retval = xid_permission(inode, mask, nd))) @@ -402,29 +395,6 @@ void release_open_intent(struct nameidata *nd) fput(nd->intent.open.file); } -static inline struct dentry *do_revalidate(struct dentry *dentry, struct nameidata *nd) -{ - int status = dentry->d_op->d_revalidate(dentry, nd); - if (unlikely(status <= 0)) { - /* - * The dentry failed validation. - * If d_revalidate returned 0 attempt to invalidate - * the dentry otherwise d_revalidate is asking us - * to return a fail status. - */ - if (!status) { - if (!d_invalidate(dentry)) { - dput(dentry); - dentry = NULL; - } - } else { - dput(dentry); - dentry = ERR_PTR(status); - } - } - return dentry; -} - /* * Internal lookup() using the new generic dcache. * SMP-safe @@ -439,9 +409,12 @@ static struct dentry * cached_lookup(struct dentry * parent, struct qstr * name, if (!dentry) dentry = d_lookup(parent, name); - if (dentry && dentry->d_op && dentry->d_op->d_revalidate) - dentry = do_revalidate(dentry, nd); - + if (dentry && dentry->d_op && dentry->d_op->d_revalidate) { + if (!dentry->d_op->d_revalidate(dentry, nd) && !d_invalidate(dentry)) { + dput(dentry); + dentry = NULL; + } + } return dentry; } @@ -536,9 +509,10 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, s */ mutex_unlock(&dir->i_mutex); if (result->d_op && result->d_op->d_revalidate) { - result = do_revalidate(result, nd); - if (!result) + if (!result->d_op->d_revalidate(result, nd) && !d_invalidate(result)) { + dput(result); result = ERR_PTR(-ENOENT); + } } return result; } @@ -604,22 +578,6 @@ struct path { struct dentry *dentry; }; -static inline void dput_path(struct path *path, struct nameidata *nd) -{ - dput(path->dentry); - if (path->mnt != nd->mnt) - mntput(path->mnt); -} - -static inline void path_to_nameidata(struct path *path, struct nameidata *nd) -{ - dput(nd->dentry); - if (nd->mnt != path->mnt) - mntput(nd->mnt); - nd->mnt = path->mnt; - nd->dentry = path->dentry; -} - static __always_inline int __do_follow_link(struct path *path, struct nameidata *nd) { int error; @@ -629,11 +587,8 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata touch_atime(path->mnt, dentry); nd_set_link(nd, NULL); - if (path->mnt != nd->mnt) { - path_to_nameidata(path, nd); - dget(dentry); - } - mntget(path->mnt); + if (path->mnt == nd->mnt) + mntget(path->mnt); cookie = dentry->d_inode->i_op->follow_link(dentry, nd); error = PTR_ERR(cookie); if (!IS_ERR(cookie)) { @@ -650,6 +605,22 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata return error; } +static inline void dput_path(struct path *path, struct nameidata *nd) +{ + dput(path->dentry); + if (path->mnt != nd->mnt) + mntput(path->mnt); +} + +static inline void path_to_nameidata(struct path *path, struct nameidata *nd) +{ + dput(nd->dentry); + if (nd->mnt != path->mnt) + mntput(nd->mnt); + nd->mnt = path->mnt; + nd->dentry = path->dentry; +} + /* * This limits recursive symlink follows to 8, while * limiting consecutive symlinks to 40. @@ -797,7 +768,7 @@ static __always_inline void follow_dotdot(struct nameidata *nd) * It _is_ time-critical. */ static int do_lookup(struct nameidata *nd, struct qstr *name, - struct path *path, int atomic) + struct path *path) { struct vfsmount *mnt = nd->mnt; struct dentry *dentry = __d_lookup(nd->dentry, name); @@ -807,10 +778,9 @@ static int do_lookup(struct nameidata *nd, struct qstr *name, goto need_lookup; if (dentry->d_op && dentry->d_op->d_revalidate) goto need_revalidate; -done: inode = dentry->d_inode; if (!inode) - goto no_inode; + goto done; if (!vx_check(inode->i_xid, VX_WATCH|VX_ADMIN|VX_HOSTID|VX_IDENT)) goto hidden; if (inode->i_sb->s_magic == PROC_SUPER_MAGIC) { @@ -819,7 +789,7 @@ done: if (de && !vx_hide_check(0, de->vx_flags)) goto hidden; } -no_inode: +done: path->mnt = mnt; path->dentry = dentry; __follow_mount(path); @@ -832,22 +802,18 @@ hidden: return -ENOENT; need_lookup: - if (atomic) - return -EWOULDBLOCKIO; dentry = real_lookup(nd->dentry, name, nd); if (IS_ERR(dentry)) goto fail; goto done; need_revalidate: - if (atomic) - return -EWOULDBLOCKIO; - dentry = do_revalidate(dentry, nd); - if (!dentry) - goto need_lookup; - if (IS_ERR(dentry)) - goto fail; - goto done; + if (dentry->d_op->d_revalidate(dentry, nd)) + goto done; + if (d_invalidate(dentry)) + goto done; + dput(dentry); + goto need_lookup; fail: return PTR_ERR(dentry); @@ -865,11 +831,9 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd) { struct path next; struct inode *inode; - int err, atomic; + int err; unsigned int lookup_flags = nd->flags; - - atomic = (lookup_flags & LOOKUP_ATOMIC); - + while (*name=='/') name++; if (!*name) @@ -938,7 +902,7 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd) break; } /* This does the actual lookups.. */ - err = do_lookup(nd, &this, &next, atomic); + err = do_lookup(nd, &this, &next); if (err) break; @@ -993,7 +957,7 @@ last_component: if (err < 0) break; } - err = do_lookup(nd, &this, &next, atomic); + err = do_lookup(nd, &this, &next); if (err) break; inode = next.dentry->d_inode; @@ -1209,9 +1173,9 @@ static int fastcall do_path_lookup(int dfd, const char *name, retval = link_path_walk(name, nd); out: if (likely(retval == 0)) { - if (unlikely(!audit_dummy_context() && nd && nd->dentry && + if (unlikely(current->audit_context && nd && nd->dentry && nd->dentry->d_inode)) - audit_inode(name, nd->dentry->d_inode); + audit_inode(name, nd->dentry->d_inode, flags); } out_fail: return retval; @@ -1339,7 +1303,7 @@ out: return dentry; } -static struct dentry *lookup_hash(struct nameidata *nd) +struct dentry * lookup_hash(struct nameidata *nd) { return __lookup_hash(&nd->last, nd->dentry, nd); } @@ -1442,7 +1406,6 @@ static int may_delete(struct inode *dir, struct dentry *victim, return -ENOENT; BUG_ON(victim->d_parent->d_inode != dir); - audit_inode_child(victim->d_name.name, victim->d_inode, dir); error = permission(dir,MAY_WRITE | MAY_EXEC, nd); if (error) @@ -1496,8 +1459,6 @@ static inline int lookup_flags(unsigned int f) if (f & O_DIRECTORY) retval |= LOOKUP_DIRECTORY; - if (f & O_ATOMICLOOKUP) - retval |= LOOKUP_ATOMIC; return retval; } @@ -1510,30 +1471,30 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) struct dentry *p; if (p1 == p2) { - mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); + mutex_lock(&p1->d_inode->i_mutex); return NULL; } - mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex); + down(&p1->d_inode->i_sb->s_vfs_rename_sem); for (p = p1; p->d_parent != p; p = p->d_parent) { if (p->d_parent == p2) { - mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT); - mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_CHILD); + mutex_lock(&p2->d_inode->i_mutex); + mutex_lock(&p1->d_inode->i_mutex); return p; } } for (p = p2; p->d_parent != p; p = p->d_parent) { if (p->d_parent == p1) { - mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); - mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD); + mutex_lock(&p1->d_inode->i_mutex); + mutex_lock(&p2->d_inode->i_mutex); return p; } } - mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); - mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD); + mutex_lock(&p1->d_inode->i_mutex); + mutex_lock(&p2->d_inode->i_mutex); return NULL; } @@ -1542,7 +1503,7 @@ void unlock_rename(struct dentry *p1, struct dentry *p2) mutex_unlock(&p1->d_inode->i_mutex); if (p1 != p2) { mutex_unlock(&p2->d_inode->i_mutex); - mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex); + up(&p1->d_inode->i_sb->s_vfs_rename_sem); } } @@ -1564,7 +1525,7 @@ int vfs_create(struct inode *dir, struct dentry *dentry, int mode, DQUOT_INIT(dir); error = dir->i_op->create(dir, dentry, mode, nd); if (!error) - fsnotify_create(dir, dentry); + fsnotify_create(dir, dentry->d_name.name); return error; } @@ -1747,7 +1708,6 @@ do_last: * It already exists. */ mutex_unlock(&dir->d_inode->i_mutex); - audit_inode_update(path.dentry->d_inode); error = -EEXIST; if (flag & O_EXCL) @@ -1758,7 +1718,6 @@ do_last: if (flag & O_NOFOLLOW) goto exit_dput; } - error = -ENOENT; if (!path.dentry->d_inode) goto exit_dput; @@ -1847,7 +1806,7 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir) { struct dentry *dentry = ERR_PTR(-EEXIST); - mutex_lock_nested(&nd->dentry->d_inode->i_mutex, I_MUTEX_PARENT); + mutex_lock(&nd->dentry->d_inode->i_mutex); /* * Yucky last component or no last component at all? * (foo/., foo/.., /////) @@ -1855,8 +1814,6 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir) if (nd->last_type != LAST_NORM) goto fail; nd->flags &= ~LOOKUP_PARENT; - nd->flags |= LOOKUP_CREATE; - nd->intent.open.flags = O_EXCL; /* * Do the final lookup. @@ -1903,7 +1860,7 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, DQUOT_INIT(dir); error = dir->i_op->mknod(dir, dentry, mode, dev); if (!error) - fsnotify_create(dir, dentry); + fsnotify_create(dir, dentry->d_name.name); return error; } @@ -1982,7 +1939,7 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, DQUOT_INIT(dir); error = dir->i_op->mkdir(dir, dentry, mode); if (!error) - fsnotify_mkdir(dir, dentry); + fsnotify_mkdir(dir, dentry->d_name.name); return error; } @@ -2111,7 +2068,7 @@ static long do_rmdir(int dfd, const char __user *pathname) error = -EBUSY; goto exit1; } - mutex_lock_nested(&nd.dentry->d_inode->i_mutex, I_MUTEX_PARENT); + mutex_lock(&nd.dentry->d_inode->i_mutex); dentry = lookup_hash(&nd); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { @@ -2186,7 +2143,7 @@ static long do_unlinkat(int dfd, const char __user *pathname) error = -EISDIR; if (nd.last_type != LAST_NORM) goto exit1; - mutex_lock_nested(&nd.dentry->d_inode->i_mutex, I_MUTEX_PARENT); + mutex_lock(&nd.dentry->d_inode->i_mutex); dentry = lookup_hash(&nd); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { @@ -2249,7 +2206,7 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, DQUOT_INIT(dir); error = dir->i_op->symlink(dir, dentry, oldname); if (!error) - fsnotify_create(dir, dentry); + fsnotify_create(dir, dentry->d_name.name); return error; } @@ -2328,7 +2285,7 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, error = dir->i_op->link(old_dentry, dir, new_dentry); mutex_unlock(&old_dentry->d_inode->i_mutex); if (!error) - fsnotify_create(dir, new_dentry); + fsnotify_create(dir, new_dentry->d_name.name); return error; } @@ -2350,16 +2307,14 @@ asmlinkage long sys_linkat(int olddfd, const char __user *oldname, int error; char * to; - if ((flags & ~AT_SYMLINK_FOLLOW) != 0) + if (flags != 0) return -EINVAL; to = getname(newname); if (IS_ERR(to)) return PTR_ERR(to); - error = __user_walk_fd(olddfd, oldname, - flags & AT_SYMLINK_FOLLOW ? LOOKUP_FOLLOW : 0, - &old_nd); + error = __user_walk_fd(olddfd, oldname, 0, &old_nd); if (error) goto exit; error = do_path_lookup(newdfd, to, LOOKUP_PARENT, &nd); @@ -2398,17 +2353,17 @@ asmlinkage long sys_link(const char __user *oldname, const char __user *newname) * a) we can get into loop creation. Check is done in is_subdir(). * b) race potential - two innocent renames can create a loop together. * That's where 4.4 screws up. Current fix: serialization on - * sb->s_vfs_rename_mutex. We might be more accurate, but that's another + * sb->s_vfs_rename_sem. We might be more accurate, but that's another * story. * c) we have to lock _three_ objects - parents and victim (if it exists). * And that - after we got ->i_mutex on parents (until then we don't know * whether the target exists). Solution: try to be smart with locking * order for inodes. We rely on the fact that tree topology may change - * only under ->s_vfs_rename_mutex _and_ that parent of the object we + * only under ->s_vfs_rename_sem _and_ that parent of the object we * move will be locked. Thus we can rank directories by the tree * (ancestors first) and rank all non-directories after them. * That works since everybody except rename does "lock parent, lookup, - * lock child" and rename is under ->s_vfs_rename_mutex. + * lock child" and rename is under ->s_vfs_rename_sem. * HOWEVER, it relies on the assumption that any object with ->lookup() * has no more than 1 dentry. If "hybrid" objects will ever appear, * we'd better make sure that there's no link(2) for them. @@ -2690,7 +2645,8 @@ static char *page_getlink(struct dentry * dentry, struct page **ppage) { struct page * page; struct address_space *mapping = dentry->d_inode->i_mapping; - page = read_mapping_page(mapping, 0, NULL); + page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage, + NULL); if (IS_ERR(page)) goto sync_fail; wait_on_page_locked(page); @@ -2744,27 +2700,16 @@ int __page_symlink(struct inode *inode, const char *symname, int len, int err = -ENOMEM; char *kaddr; -retry: page = find_or_create_page(mapping, 0, gfp_mask); if (!page) goto fail; err = mapping->a_ops->prepare_write(NULL, page, 0, len-1); - if (err == AOP_TRUNCATED_PAGE) { - page_cache_release(page); - goto retry; - } if (err) goto fail_map; kaddr = kmap_atomic(page, KM_USER0); memcpy(kaddr, symname, len-1); kunmap_atomic(kaddr, KM_USER0); - err = mapping->a_ops->commit_write(NULL, page, 0, len-1); - if (err == AOP_TRUNCATED_PAGE) { - page_cache_release(page); - goto retry; - } - if (err) - goto fail_map; + mapping->a_ops->commit_write(NULL, page, 0, len-1); /* * Notice that we are _not_ going to block here - end of page is * unmapped, so this will only try to map the rest of page, see @@ -2774,8 +2719,7 @@ retry: */ if (!PageUptodate(page)) { err = mapping->a_ops->readpage(NULL, page); - if (err != AOP_TRUNCATED_PAGE) - wait_on_page_locked(page); + wait_on_page_locked(page); } else { unlock_page(page); } @@ -2810,6 +2754,7 @@ EXPORT_SYMBOL(follow_up); EXPORT_SYMBOL(get_write_access); /* binfmt_aout */ EXPORT_SYMBOL(getname); EXPORT_SYMBOL(lock_rename); +EXPORT_SYMBOL(lookup_hash); EXPORT_SYMBOL(lookup_one_len); EXPORT_SYMBOL(page_follow_link_light); EXPORT_SYMBOL(page_put_link);