#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/namei.h>
+#include <linux/bitops.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/pgtable.h>
-#include <asm/bitops.h>
#include <asm/atomic.h>
#define DEVFS_VERSION "2004-01-31"
#ifdef CONFIG_DEVFS_DEBUG
static unsigned int devfs_debug_init __initdata = DEBUG_NONE;
static unsigned int devfs_debug = DEBUG_NONE;
-static spinlock_t stat_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(stat_lock);
static unsigned int stat_num_entries;
static unsigned int stat_num_bytes;
#endif
#ifdef CONFIG_DEVFS_DEBUG
static ssize_t stat_read(struct file *file, char __user *buf, size_t len,
loff_t * ppos);
-static struct file_operations stat_fops = {
+static const struct file_operations stat_fops = {
+ .open = nonseekable_open,
.read = stat_read,
};
#endif
/* Devfs daemon file operations */
-static struct file_operations devfsd_fops = {
+static const struct file_operations devfsd_fops = {
+ .open = nonseekable_open,
.read = devfsd_read,
.ioctl = devfsd_ioctl,
.release = devfsd_close,
{
struct devfs_entry *new;
static unsigned long inode_counter = FIRST_INODE;
- static spinlock_t counter_lock = SPIN_LOCK_UNLOCKED;
+ static DEFINE_SPINLOCK(counter_lock);
if (name && (namelen < 1))
namelen = strlen(name);
static struct devfs_entry *_devfs_get_root_entry(void)
{
struct devfs_entry *new;
- static spinlock_t root_lock = SPIN_LOCK_UNLOCKED;
+ static DEFINE_SPINLOCK(root_lock);
if (root_entry)
return root_entry;
__setup("devfs=", devfs_setup);
-EXPORT_SYMBOL(devfs_mk_symlink);
EXPORT_SYMBOL(devfs_mk_dir);
EXPORT_SYMBOL(devfs_remove);
static struct inode_operations devfs_iops;
static struct inode_operations devfs_dir_iops;
-static struct file_operations devfs_fops;
-static struct file_operations devfs_dir_fops;
+static const struct file_operations devfs_fops;
+static const struct file_operations devfs_dir_fops;
static struct inode_operations devfs_symlink_iops;
static int devfs_notify_change(struct dentry *dentry, struct iattr *iattr)
return err;
} /* End Function devfs_open */
-static struct file_operations devfs_fops = {
+static const struct file_operations devfs_fops = {
.open = devfs_open,
};
-static struct file_operations devfs_dir_fops = {
+static const struct file_operations devfs_dir_fops = {
.read = generic_read_dir,
.readdir = devfs_readdir,
};
*
* make sure that
* d_instantiate always runs under lock
- * we release i_sem lock before going to sleep
+ * we release i_mutex lock before going to sleep
*
* unfortunately sometimes d_revalidate is called with
- * and sometimes without i_sem lock held. The following checks
+ * and sometimes without i_mutex lock held. The following checks
* attempt to deduce when we need to add (and drop resp.) lock
* here. This relies on current (2.6.2) calling coventions:
*
- * lookup_hash is always run under i_sem and is passing NULL
+ * lookup_hash is always run under i_mutex and is passing NULL
* as nd
*
- * open(...,O_CREATE,...) calls _lookup_hash under i_sem
+ * open(...,O_CREATE,...) calls _lookup_hash under i_mutex
* and sets flags to LOOKUP_OPEN|LOOKUP_CREATE
*
* all other invocations of ->d_revalidate seem to happen
- * outside of i_sem
+ * outside of i_mutex
*/
need_lock = nd &&
(!(nd->flags & LOOKUP_CREATE) || (nd->flags & LOOKUP_PARENT));
if (need_lock)
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
if (is_devfsd_or_child(fs_info)) {
devfs_handle_t de = lookup_info->de;
add_wait_queue(&lookup_info->wait_queue, &wait);
read_unlock(&parent->u.dir.lock);
/* at this point it is always (hopefully) locked */
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
schedule();
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
/*
* This does not need nor should remove wait from wait_queue.
* Wait queue head is never reused - nothing is ever added to it
out:
if (need_lock)
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
return 1;
} /* End Function devfs_d_revalidate_wait */
/* Unlock directory semaphore, which will release any waiters. They
will get the hashed dentry, and may be forced to wait for
revalidation */
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
wait_for_devfsd_finished(fs_info); /* If I'm not devfsd, must wait */
- down(&dir->i_sem); /* Grab it again because them's the rules */
+ mutex_lock(&dir->i_mutex); /* Grab it again because them's the rules */
de = lookup_info.de;
/* If someone else has been so kind as to make the inode, we go home
early */
return 0;
} /* End Function devfs_mknod */
-static int devfs_readlink(struct dentry *dentry, char *buffer, int buflen)
+static void *devfs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- int err;
- struct devfs_entry *de;
-
- de = get_devfs_entry_from_vfs_inode(dentry->d_inode);
- if (!de)
- return -ENODEV;
- err = vfs_readlink(dentry, buffer, buflen, de->u.symlink.linkname);
- return err;
-} /* End Function devfs_readlink */
-
-static int devfs_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
- int err;
- struct devfs_entry *de;
-
- de = get_devfs_entry_from_vfs_inode(dentry->d_inode);
- if (!de)
- return -ENODEV;
- err = vfs_follow_link(nd, de->u.symlink.linkname);
- return err;
+ struct devfs_entry *p = get_devfs_entry_from_vfs_inode(dentry->d_inode);
+ nd_set_link(nd, p ? p->u.symlink.linkname : ERR_PTR(-ENODEV));
+ return NULL;
} /* End Function devfs_follow_link */
static struct inode_operations devfs_iops = {
};
static struct inode_operations devfs_symlink_iops = {
- .readlink = devfs_readlink,
+ .readlink = generic_readlink,
.follow_link = devfs_follow_link,
.setattr = devfs_notify_change,
};
sb->s_blocksize_bits = 10;
sb->s_magic = DEVFS_SUPER_MAGIC;
sb->s_op = &devfs_sops;
+ sb->s_time_gran = 1;
if ((root_inode = _devfs_get_vfs_inode(sb, root_entry, NULL)) == NULL)
goto out_no_root;
sb->s_root = d_alloc_root(root_inode);
struct devfsd_notify_struct *info = fs_info->devfsd_info;
DECLARE_WAITQUEUE(wait, current);
- /* Can't seek (pread) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
/* Verify the task has grabbed the queue */
if (fs_info->devfsd_task != current)
return -EPERM;
work even if the global kernel lock were to be removed, because it
doesn't matter who gets in first, as long as only one gets it */
if (fs_info->devfsd_task == NULL) {
- static spinlock_t lock = SPIN_LOCK_UNLOCKED;
+ static DEFINE_SPINLOCK(lock);
if (!spin_trylock(&lock))
return -EBUSY;
entry = fs_info->devfsd_first_event;
fs_info->devfsd_first_event = NULL;
fs_info->devfsd_last_event = NULL;
- if (fs_info->devfsd_info) {
- kfree(fs_info->devfsd_info);
- fs_info->devfsd_info = NULL;
- }
+ kfree(fs_info->devfsd_info);
+ fs_info->devfsd_info = NULL;
spin_unlock(&fs_info->devfsd_buffer_lock);
fs_info->devfsd_pgrp = 0;
fs_info->devfsd_task = NULL;
num = sprintf(txt, "Number of entries: %u number of bytes: %u\n",
stat_num_entries, stat_num_bytes) + 1;
- /* Can't seek (pread) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (*ppos >= num)
return 0;
if (*ppos + len > num)