This commit was generated by cvs2svn to compensate for changes in r1129,
[linux-2.6.git] / fs / devfs / base.c
index a62d941..b621521 100644 (file)
 #include <linux/rwsem.h>
 #include <linux/sched.h>
 #include <linux/namei.h>
+#include <linux/bitops.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/processor.h>
 #include <asm/system.h>
 #include <asm/pgtable.h>
-#include <asm/bitops.h>
 #include <asm/atomic.h>
 
 #define DEVFS_VERSION            "2004-01-31"
@@ -831,7 +831,7 @@ static kmem_cache_t *devfsd_buf_cache;
 #ifdef CONFIG_DEVFS_DEBUG
 static unsigned int devfs_debug_init __initdata = DEBUG_NONE;
 static unsigned int devfs_debug = DEBUG_NONE;
-static spinlock_t stat_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(stat_lock);
 static unsigned int stat_num_entries;
 static unsigned int stat_num_bytes;
 #endif
@@ -966,7 +966,7 @@ static struct devfs_entry *_devfs_alloc_entry(const char *name,
 {
        struct devfs_entry *new;
        static unsigned long inode_counter = FIRST_INODE;
-       static spinlock_t counter_lock = SPIN_LOCK_UNLOCKED;
+       static DEFINE_SPINLOCK(counter_lock);
 
        if (name && (namelen < 1))
                namelen = strlen(name);
@@ -1063,7 +1063,7 @@ static int _devfs_append_entry(devfs_handle_t dir, devfs_handle_t de,
 static struct devfs_entry *_devfs_get_root_entry(void)
 {
        struct devfs_entry *new;
-       static spinlock_t root_lock = SPIN_LOCK_UNLOCKED;
+       static DEFINE_SPINLOCK(root_lock);
 
        if (root_entry)
                return root_entry;
@@ -1802,7 +1802,6 @@ static int __init devfs_setup(char *str)
 
 __setup("devfs=", devfs_setup);
 
-EXPORT_SYMBOL(devfs_mk_symlink);
 EXPORT_SYMBOL(devfs_mk_dir);
 EXPORT_SYMBOL(devfs_remove);
 
@@ -2163,27 +2162,27 @@ static int devfs_d_revalidate_wait(struct dentry *dentry, struct nameidata *nd)
         *
         * make sure that
         *   d_instantiate always runs under lock
-        *   we release i_sem lock before going to sleep
+        *   we release i_mutex lock before going to sleep
         *
         * unfortunately sometimes d_revalidate is called with
-        * and sometimes without i_sem lock held. The following checks
+        * and sometimes without i_mutex lock held. The following checks
         * attempt to deduce when we need to add (and drop resp.) lock
         * here. This relies on current (2.6.2) calling coventions:
         *
-        *   lookup_hash is always run under i_sem and is passing NULL
+        *   lookup_hash is always run under i_mutex and is passing NULL
         *   as nd
         *
-        *   open(...,O_CREATE,...) calls _lookup_hash under i_sem
+        *   open(...,O_CREATE,...) calls _lookup_hash under i_mutex
         *   and sets flags to LOOKUP_OPEN|LOOKUP_CREATE
         *
         *   all other invocations of ->d_revalidate seem to happen
-        *   outside of i_sem
+        *   outside of i_mutex
         */
        need_lock = nd &&
            (!(nd->flags & LOOKUP_CREATE) || (nd->flags & LOOKUP_PARENT));
 
        if (need_lock)
-               down(&dir->i_sem);
+               mutex_lock(&dir->i_mutex);
 
        if (is_devfsd_or_child(fs_info)) {
                devfs_handle_t de = lookup_info->de;
@@ -2222,9 +2221,9 @@ static int devfs_d_revalidate_wait(struct dentry *dentry, struct nameidata *nd)
                add_wait_queue(&lookup_info->wait_queue, &wait);
                read_unlock(&parent->u.dir.lock);
                /* at this point it is always (hopefully) locked */
-               up(&dir->i_sem);
+               mutex_unlock(&dir->i_mutex);
                schedule();
-               down(&dir->i_sem);
+               mutex_lock(&dir->i_mutex);
                /*
                 * This does not need nor should remove wait from wait_queue.
                 * Wait queue head is never reused - nothing is ever added to it
@@ -2239,7 +2238,7 @@ static int devfs_d_revalidate_wait(struct dentry *dentry, struct nameidata *nd)
 
       out:
        if (need_lock)
-               up(&dir->i_sem);
+               mutex_unlock(&dir->i_mutex);
        return 1;
 }                              /*  End Function devfs_d_revalidate_wait  */
 
@@ -2285,9 +2284,9 @@ static struct dentry *devfs_lookup(struct inode *dir, struct dentry *dentry,
        /*  Unlock directory semaphore, which will release any waiters. They
           will get the hashed dentry, and may be forced to wait for
           revalidation  */
-       up(&dir->i_sem);
+       mutex_unlock(&dir->i_mutex);
        wait_for_devfsd_finished(fs_info);      /*  If I'm not devfsd, must wait  */
-       down(&dir->i_sem);      /*  Grab it again because them's the rules  */
+       mutex_lock(&dir->i_mutex);      /*  Grab it again because them's the rules  */
        de = lookup_info.de;
        /*  If someone else has been so kind as to make the inode, we go home
           early  */
@@ -2492,11 +2491,11 @@ static int devfs_mknod(struct inode *dir, struct dentry *dentry, int mode,
        return 0;
 }                              /*  End Function devfs_mknod  */
 
-static int devfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+static void *devfs_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
        struct devfs_entry *p = get_devfs_entry_from_vfs_inode(dentry->d_inode);
        nd_set_link(nd, p ? p->u.symlink.linkname : ERR_PTR(-ENODEV));
-       return 0;
+       return NULL;
 }                              /*  End Function devfs_follow_link  */
 
 static struct inode_operations devfs_iops = {
@@ -2534,6 +2533,7 @@ static int devfs_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_blocksize_bits = 10;
        sb->s_magic = DEVFS_SUPER_MAGIC;
        sb->s_op = &devfs_sops;
+       sb->s_time_gran = 1;
        if ((root_inode = _devfs_get_vfs_inode(sb, root_entry, NULL)) == NULL)
                goto out_no_root;
        sb->s_root = d_alloc_root(root_inode);
@@ -2683,7 +2683,7 @@ static int devfsd_ioctl(struct inode *inode, struct file *file,
                   work even if the global kernel lock were to be removed, because it
                   doesn't matter who gets in first, as long as only one gets it  */
                if (fs_info->devfsd_task == NULL) {
-                       static spinlock_t lock = SPIN_LOCK_UNLOCKED;
+                       static DEFINE_SPINLOCK(lock);
 
                        if (!spin_trylock(&lock))
                                return -EBUSY;
@@ -2738,10 +2738,8 @@ static int devfsd_close(struct inode *inode, struct file *file)
        entry = fs_info->devfsd_first_event;
        fs_info->devfsd_first_event = NULL;
        fs_info->devfsd_last_event = NULL;
-       if (fs_info->devfsd_info) {
-               kfree(fs_info->devfsd_info);
-               fs_info->devfsd_info = NULL;
-       }
+       kfree(fs_info->devfsd_info);
+       fs_info->devfsd_info = NULL;
        spin_unlock(&fs_info->devfsd_buffer_lock);
        fs_info->devfsd_pgrp = 0;
        fs_info->devfsd_task = NULL;