printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
print_symbol("%s", regs->eip);
printk(" SS:ESP %04x:%08lx\n", ss, esp);
- } else
+ }
+ else
regs = NULL;
} else
printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
#include "linux/proc_fs.h"
#include "linux/ptrace.h"
#include "linux/random.h"
-
#include "asm/unistd.h"
#include "asm/mman.h"
#include "asm/segment.h"
{
int rval;
+ if (!vx_check(vx_task_xid(child), VX_WATCH|VX_IDENT))
+ goto out;
+
switch (request) {
unsigned long val, copied;
break;
rval = -EIO;
goto out;
- if (!vx_check(vx_task_xid(child), VX_WATCH|VX_IDENT))
- goto out_tsk;
/* Read/write the word at location ADDR in the registers. */
case PTRACE_PEEKUSR:
spin_unlock(&dq_list_lock);
}
-
int vfs_quota_sync(struct super_block *sb, int type)
{
struct list_head *dirty;
#ifdef __DQUOT_PARANOIA
BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
#endif
+
return dquot;
}
if (error)
return error;
}
-
error = inode_setattr(inode, iattr);
if (!error && (iattr->ia_valid & ATTR_MODE))
error = ext2_acl_chmod(inode);
#define JFS_APPEND_FL 0x01000000 /* writes to file may only append */
#define JFS_IMMUTABLE_FL 0x02000000 /* Immutable file */
-#define JFS_BARRIER_FL 0x04000000 /* Barrier for chroot() */
-#define JFS_IUNLINK_FL 0x08000000 /* Immutable unlink */
+#define JFS_BARRIER_FL 0x04000000 /* Barrier for chroot() */
+#define JFS_IUNLINK_FL 0x08000000 /* Immutable unlink */
#define JFS_FL_USER_VISIBLE 0x0FF80000
#define JFS_FL_USER_MODIFIABLE 0x03F80000
#define JFS_IOC_GETFLAGS _IOR('f', 1, long)
#define JFS_IOC_SETFLAGS _IOW('f', 2, long)
+
#endif /*_H_JFS_DINODE */
static int dtSplitUp(tid_t tid,
struct inode *ip, struct dtsplit * split, struct btstack * btstack)
{
- struct super_block *sb = ip->i_sb;
- struct jfs_sb_info *sbi = JFS_SBI(sb);
+ struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
int rc = 0;
struct metapage *smp;
dtpage_t *sp; /* split page */
rbn = addressPXD(pxd);
/* Allocate blocks to quota. */
- if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+ if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
rc = -EDQUOT;
goto clean_up;
}
fl->fl_type = type;
fl->fl_end = OFFSET_MAX;
- vxd_assert(filp->f_xid == vx_current_xid(),
- "f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid());
fl->fl_xid = filp->f_xid;
vx_locks_inc(fl);
goto out;
fl->fl_xid = vx_current_xid();
- if (filp)
- vxd_assert(filp->f_xid == fl->fl_xid,
- "f_xid(%d) == fl_xid(%d)", filp->f_xid, fl->fl_xid);
vx_locks_inc(fl);
error = lease_init(filp, type, fl);
if (error) {
struct file_lock **before;
int error, added = 0;
- vxd_assert(xid == vx_current_xid(),
- "xid(%d) == current(%d)", xid, vx_current_xid());
/*
* We may need two file_lock structures for this operation,
* so we get them in advance to avoid races.
if (file_lock == NULL)
return -ENOLCK;
- vxd_assert(filp->f_xid == vx_current_xid(),
- "f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid());
file_lock->fl_xid = filp->f_xid;
vx_locks_inc(file_lock);
if (file_lock == NULL)
return -ENOLCK;
- vxd_assert(filp->f_xid == vx_current_xid(),
- "f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid());
file_lock->fl_xid = filp->f_xid;
vx_locks_inc(file_lock);
return -EACCES;
}
-static inline int xid_permission(struct inode *inode, int mask, struct nameidata *nd)
+static inline int vx_barrier(struct inode *inode)
{
if (IS_BARRIER(inode) && !vx_check(0, VX_ADMIN)) {
vxwprintk(1, "xid=%d did hit the barrier.",
vx_current_xid());
- return -EACCES;
+ return 1;
}
+ return 0;
+}
+
+static inline int xid_permission(struct inode *inode, int mask, struct nameidata *nd)
+{
+ if (vx_barrier(inode))
+ return -EACCES;
if (inode->i_xid == 0)
return 0;
if (vx_check(inode->i_xid, VX_ADMIN|VX_WATCH|VX_IDENT))
{
umode_t mode = inode->i_mode;
+ if (vx_barrier(inode))
+ return -EACCES;
if (inode->i_op && inode->i_op->permission)
return -EAGAIN;
return __vfs_follow_link(nd, link);
}
-
/* get the link contents into pagecache */
static char *page_getlink(struct dentry * dentry, struct page **ppage)
{
struct vfsmount *mnt = v;
int err = 0;
- /* device */
- if (mnt->mnt_devname) {
- seq_puts(m, "device ");
- mangle(m, mnt->mnt_devname);
- } else
- seq_puts(m, "no device");
-
- /* mount point */
- seq_puts(m, " mounted on ");
- seq_path(m, mnt, mnt->mnt_root, " \t\n\\");
- seq_putc(m, ' ');
+ if (vx_flags(VXF_HIDE_MOUNT, 0))
+ return 0;
+ if (!mnt_is_reachable(mnt) && !vx_check(0, VX_WATCH))
+ return 0;
+
+ if (!vx_check(0, VX_ADMIN|VX_WATCH) &&
+ mnt == current->fs->rootmnt) {
+ seq_puts(m, "device /dev/root mounted on / ");
+ } else {
+ /* device */
+ if (mnt->mnt_devname) {
+ seq_puts(m, "device ");
+ mangle(m, mnt->mnt_devname);
+ } else
+ seq_puts(m, "no device");
+
+ /* mount point */
+ seq_puts(m, " mounted on ");
+ seq_path(m, mnt, mnt->mnt_root, " \t\n\\");
+ seq_putc(m, ' ');
+ }
/* file system type */
seq_puts(m, "with fstype ");
}
}
-static inline void __umount_list(struct vfsmount *mnt,
- int propagate, struct list_head *kill)
+void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
{
struct vfsmount *p;
+ for (p = mnt; p; p = next_mnt(p, mnt)) {
+ list_del(&p->mnt_hash);
+ list_add(&p->mnt_hash, kill);
+ }
+
if (propagate)
propagate_umount(kill);
}
}
-void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
-{
- struct vfsmount *p;
-
- for (p = mnt; p; p = next_mnt(p, mnt)) {
- list_del(&p->mnt_hash);
- list_add(&p->mnt_hash, kill);
- // p->mnt_namespace = NULL;
- }
- __umount_list(mnt, propagate, kill);
-}
-
-void umount_unused(struct vfsmount *mnt, struct fs_struct *fs)
-{
- struct vfsmount *p;
- LIST_HEAD(kill);
-
- for (p = mnt; p; p = next_mnt(p, mnt)) {
- if (p == fs->rootmnt || p == fs->pwdmnt)
- continue;
- list_del(&p->mnt_list);
- list_add(&p->mnt_list, &kill);
- p->mnt_namespace = NULL;
- }
- __umount_list(mnt, 0, &kill);
-}
-
static int do_umount(struct vfsmount *mnt, int flags)
{
struct super_block *sb = mnt->mnt_sb;
if (flags & MS_NODIRATIME)
mnt_flags |= MNT_NODIRATIME;
- if (vx_ccaps(VXC_SECURE_MOUNT))
+ if (!capable(CAP_SYS_ADMIN))
mnt_flags |= MNT_NODEV;
flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE |
MS_NOATIME | MS_NODIRATIME);
OCFS2_LOCK_TYPE_DATA, inode);
status = 0;
-
bail:
mlog_exit(status);
return status;
if (quota_ret)
hint->preallocate = hint->prealloc_size = 0;
}
-
/* for unformatted nodes, force large allocations */
bigalloc = amount_needed;
}
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
-#define ELF_ET_DYN_BASE ((TASK_UNMAPPED_BASE) * 2)
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
now struct_user_regs, they are different) */
extern int copy_namespace(int, struct task_struct *);
extern void __put_namespace(struct namespace *namespace);
extern struct namespace *dup_namespace(struct task_struct *, struct fs_struct *);
-extern void umount_unused(struct vfsmount *, struct fs_struct *);
static inline void put_namespace(struct namespace *namespace)
{
#define VCMD_enter_namespace VC_CMD(PROCALT, 1, 0)
-#define VCMD_cleanup_namespace VC_CMD(PROCALT, 2, 0)
#define VCMD_set_namespace_v0 VC_CMD(PROCALT, 3, 0)
#define VCMD_set_namespace VC_CMD(PROCALT, 3, 1)
#ifdef __KERNEL__
extern int vc_enter_namespace(uint32_t, void __user *);
-extern int vc_cleanup_namespace(uint32_t, void __user *);
extern int vc_set_namespace(uint32_t, void __user *);
#endif /* __KERNEL__ */
#define NXA_MOD_BCAST (1<<8)
-#define NXA_TYPE_ANY (~0)
+#define NXA_TYPE_ANY ((uint16_t)-1)
#ifdef __KERNEL__
/* interface version */
-#define VCI_VERSION 0x00020001
+#define VCI_VERSION 0x00020002
#define VCI_LEGACY_VERSION 0x000100FF
/* query version */
unsigned int skc_hash;
struct proto *skc_prot;
xid_t skc_xid;
- struct vx_info *skc_vx_info;
+ struct vx_info *skc_vx_info;
nid_t skc_nid;
- struct nx_info *skc_nx_info;
+ struct nx_info *skc_nx_info;
};
/**
return __capable(current, cap);
}
EXPORT_SYMBOL(capable);
-
ptrace_unlink(current);
/* Reparent to init */
remove_parent(current);
- /* FIXME: handle vchild_reaper/initpid */
current->parent = child_reaper;
current->real_parent = child_reaper;
add_parent(current);
}
i++;
set >>= 1;
+ cond_resched();
}
}
}
}
list_for_each_safe(_p, _n, &father->ptrace_children) {
p = list_entry(_p,struct task_struct,ptrace_list);
- /* check for reaper context */
choose_new_parent(p, reaper);
reparent_thread(p, father, 1);
}
if (!pid)
return -EAGAIN;
-
nr = pid->nr;
if (unlikely(current->ptrace)) {
trace = fork_traceflag (clone_flags);
if (timr->it_sigev_notify & SIGEV_THREAD_ID) {
struct task_struct *leader;
- int ret;
+ int ret = send_sigqueue(timr->it_sigev_signo, timr->sigq,
+ timr->it_process);
- ret = send_sigqueue(timr->it_sigev_signo, timr->sigq,
- timr->it_process);
if (likely(ret >= 0))
return ret;
*ppid = sys_getppid();
return sys_getpid();
}
-
+
#else /* _alpha_ */
asmlinkage long sys_getuid(void)
"vx_set_init(%p[#%d],%p[#%d,%d,%d])",
vxi, vxi->vx_id, p, p->xid, p->pid, p->tgid);
+ vxi->vx_flags &= ~VXF_STATE_INIT;
vxi->vx_initpid = p->tgid;
return 0;
}
vxdprintk(VXD_CBIT(xid, 6),
"vx_set_persistent(%p[#%d])", vxi, vxi->vx_id);
- if (vx_info_flags(vxi, VXF_PERSISTENT, 0)) {
- get_vx_info(vxi);
- claim_vx_info(vxi, current);
- } else {
- release_vx_info(vxi, current);
- put_vx_info(vxi);
- }
+ get_vx_info(vxi);
+ claim_vx_info(vxi, current);
+}
+
+void vx_clear_persistent(struct vx_info *vxi)
+{
+ vxdprintk(VXD_CBIT(xid, 6),
+ "vx_clear_persistent(%p[#%d])", vxi, vxi->vx_id);
+
+ release_vx_info(vxi, current);
+ put_vx_info(vxi);
+}
+
+void vx_update_persistent(struct vx_info *vxi)
+{
+ if (vx_info_flags(vxi, VXF_PERSISTENT, 0))
+ vx_set_persistent(vxi);
+ else
+ vx_clear_persistent(vxi);
}
if ((vc_data.flagword & VXF_PERSISTENT))
vx_set_persistent(new_vxi);
- vs_state_change(new_vxi, VSC_STARTUP);
- ret = new_vxi->vx_id;
- vx_migrate_task(current, new_vxi);
- /* if this fails, we might end up with a hashed vx_info */
+ ret = -ENOEXEC;
+ if (vs_state_change(new_vxi, VSC_STARTUP))
+ goto out_unhash;
+ ret = vx_migrate_task(current, new_vxi);
+ if (!ret) {
+ /* return context id on success */
+ ret = new_vxi->vx_id;
+ goto out;
+ }
+out_unhash:
+ /* prepare for context disposal */
+ new_vxi->vx_state |= VXS_SHUTDOWN;
+ if ((vc_data.flagword & VXF_PERSISTENT))
+ vx_clear_persistent(new_vxi);
+ __unhash_vx_info(new_vxi);
+out:
put_vx_info(new_vxi);
return ret;
}
vxi->vx_flags = vx_mask_flags(vxi->vx_flags,
vc_data.flagword, mask);
if (trigger & VXF_PERSISTENT)
- vx_set_persistent(vxi);
+ vx_update_persistent(vxi);
put_vx_info(vxi);
return 0;
uint32_t now, last, delta;
unsigned int nr_running, nr_uninterruptible;
unsigned int total;
+ unsigned long flags;
- spin_lock(&vxi->cvirt.load_lock);
+ spin_lock_irqsave(&vxi->cvirt.load_lock, flags);
now = jiffies;
last = vxi->cvirt.load_last;
vxi->cvirt.load_last = now;
out:
atomic_inc(&vxi->cvirt.load_updates);
- spin_unlock(&vxi->cvirt.load_lock);
+ spin_unlock_irqrestore(&vxi->cvirt.load_lock, flags);
}
struct dl_info *dli;
hlist_for_each_entry_rcu(dli, pos, head, dl_hlist) {
-// hlist_for_each_rcu(pos, head) {
-// struct dl_info *dli =
-// hlist_entry(pos, struct dl_info, dl_hlist);
if (dli->dl_xid == xid && dli->dl_sb == sb) {
return dli;
init = find_task_by_real_pid(pid);
if (!init)
return -ESRCH;
-
- vxi->vx_flags &= ~VXF_STATE_INIT;
return vx_set_init(vxi, init);
}
#include <linux/vs_context.h>
#include <linux/vs_network.h>
#include <linux/vserver/legacy.h>
-// #include <linux/vserver/namespace.h>
#include <linux/namespace.h>
#include <linux/err.h>
return ret;
}
-int vc_cleanup_namespace(uint32_t id, void __user *data)
-{
- // down_write(¤t->namespace->sem);
- spin_lock(&vfsmount_lock);
- umount_unused(current->namespace->root, current->fs);
- spin_unlock(&vfsmount_lock);
- // up_write(¤t->namespace->sem);
- return 0;
-}
-
int vc_set_namespace(uint32_t id, void __user *data)
{
struct fs_struct *fs;
void nx_set_persistent(struct nx_info *nxi)
{
- if (nx_info_flags(nxi, NXF_PERSISTENT, 0)) {
- get_nx_info(nxi);
- claim_nx_info(nxi, current);
- } else {
- release_nx_info(nxi, current);
- put_nx_info(nxi);
- }
+ get_nx_info(nxi);
+ claim_nx_info(nxi, current);
+}
+
+void nx_clear_persistent(struct nx_info *nxi)
+{
+ vxdprintk(VXD_CBIT(nid, 6),
+ "nx_clear_persistent(%p[#%d])", nxi, nxi->nx_id);
+
+ release_nx_info(nxi, current);
+ put_nx_info(nxi);
+}
+
+void nx_update_persistent(struct nx_info *nxi)
+{
+ if (nx_info_flags(nxi, NXF_PERSISTENT, 0))
+ nx_set_persistent(nxi);
+ else
+ nx_clear_persistent(nxi);
}
/* vserver syscall commands below here */
if ((vc_data.flagword & NXF_PERSISTENT))
nx_set_persistent(new_nxi);
- vs_net_change(new_nxi, VSC_NETUP);
- ret = new_nxi->nx_id;
- nx_migrate_task(current, new_nxi);
- /* if this fails, we might end up with a hashed nx_info */
+ ret = -ENOEXEC;
+ if (vs_net_change(new_nxi, VSC_NETUP))
+ goto out_unhash;
+ ret = nx_migrate_task(current, new_nxi);
+ if (!ret) {
+ /* return context id on success */
+ ret = new_nxi->nx_id;
+ goto out;
+ }
+out_unhash:
+ /* prepare for context disposal */
+ new_nxi->nx_state |= NXS_SHUTDOWN;
+ if ((vc_data.flagword & NXF_PERSISTENT))
+ nx_clear_persistent(new_nxi);
+ __unhash_nx_info(new_nxi);
+out:
put_nx_info(new_nxi);
return ret;
}
if (!nxi)
return -ESRCH;
- switch ((unsigned)vc_data.type) {
+ switch (vc_data.type) {
case NXA_TYPE_ANY:
nxi->nbipv4 = 0;
break;
nxi->nx_flags = vx_mask_flags(nxi->nx_flags,
vc_data.flagword, mask);
if (trigger & NXF_PERSISTENT)
- nx_set_persistent(nxi);
+ nx_update_persistent(nxi);
put_nx_info(nxi);
return 0;
return vc_set_namespace(-1, data);
case VCMD_set_namespace:
return vc_set_namespace(id, data);
- case VCMD_cleanup_namespace:
- return vc_cleanup_namespace(id, data);
}
/* those are allowed while in setup too */
set_page_dirty(page);
page_remove_rmap(page);
page_cache_release(page);
- // dec_mm_counter(mm, file_rss);
}
} else {
if (!pte_file(pte))
write_unlock_bh(&udp_hash_lock);
}
-
/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
* harder than this. -DaveM
*/