-static int run_acl(unsigned long ino) {
- struct hlist_node *n;
- struct acl_entry *entry;
- hlist_for_each_entry_rcu(entry,
- n, &procprotect_hash[ino & (HASH_SIZE-1)],
- hlist) {
- if (entry->ino==ino) {
- return 0;
- }
- }
- return 1;
+/* The entry hook ensures that the return hook is only called for
+ accesses to /proc */
+
+int printed=0;
+
+static int lookup_fast_ret(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ struct procprotect_ctx *ctx = (struct procprotect_ctx *) ri->data;
+ int ret = regs->ax;
+
+ if (ret==0) {
+ /* The kernel is going to honor the request. Here's where we step in */
+ struct inode *inode = *(ctx->inode);
+ printk(KERN_CRIT "Inode=%u",inode->i_ino);
+ if (!run_acl(inode->i_ino)) {
+ if (current->nsproxy->mnt_ns!=init_task.nsproxy->mnt_ns) {
+ regs->ax = -EPERM;
+ }
+ }
+ }
+ else if (ret==1) {
+ if (!printed) {
+ dump_stack();
+ printed=1;
+ }
+ }
+
+
+ return 0;
+}
+
+static int lookup_slow_entry(struct kretprobe_instance *ri, struct pt_regs *regs) {
+ int ret = -1;
+ struct procprotect_ctx *ctx;
+ struct nameidata *nd = (struct nameidata *) regs->di;
+ struct qstr *q = (struct qstr *) regs->si;
+ struct path *p = (struct path *) regs->dx;
+
+ struct dentry *parent = nd->path.dentry;
+ struct inode *pinode = parent->d_inode;
+
+ printk(KERN_CRIT "Entered lookup slow");
+ if (pinode->i_sb->s_magic == PROC_SUPER_MAGIC
+ && current->nsproxy->mnt_ns!=init_task.nsproxy->mnt_ns) {
+ ctx = (struct procprotect_ctx *) ri->data;
+ ctx->flags = nd->flags;
+ ctx->path = p;
+ ret = 0;
+ }
+
+ return ret;