#include <net/protocol.h>
#include <linux/skbuff.h>
#include <net/sock.h>
+#include <net/xfrm.h>
#include <linux/ipsec.h>
#include <linux/filter.h>
+#include <linux/vs_socket.h>
#ifdef CONFIG_INET
#include <net/tcp.h>
break;
case SO_PASSCRED:
- sock->passcred = valbool;
+ if (valbool)
+ set_bit(SOCK_PASS_CRED, &sock->flags);
+ else
+ clear_bit(SOCK_PASS_CRED, &sock->flags);
break;
case SO_TIMESTAMP:
struct timeval tm;
} v;
- unsigned int lv=sizeof(int),len;
+ unsigned int lv = sizeof(int);
+ int len;
if(get_user(len,optlen))
return -EFAULT;
v.tm.tv_usec = 0;
} else {
v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
- v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000) / HZ;
+ v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
}
break;
v.tm.tv_usec = 0;
} else {
v.tm.tv_sec = sk->sk_sndtimeo / HZ;
- v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000) / HZ;
+ v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
}
break;
break;
case SO_PASSCRED:
- v.val = sock->passcred;
+ v.val = test_bit(SOCK_PASS_CRED, &sock->flags)?1:0;
break;
case SO_PEERCRED:
sock_lock_init(sk);
}
sk->sk_slab = slab;
+ sock_vx_init(sk);
+ sock_nx_init(sk);
if (security_sk_alloc(sk, family, priority)) {
kmem_cache_free(slab, sk);
printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
__FUNCTION__, atomic_read(&sk->sk_omem_alloc));
+ /*
+ * If sendmsg cached page exists, toss it.
+ */
+ if (sk->sk_sndmsg_page) {
+ __free_page(sk->sk_sndmsg_page);
+ sk->sk_sndmsg_page = NULL;
+ }
+
security_sk_free(sk);
+ BUG_ON(sk->sk_vx_info);
+ BUG_ON(sk->sk_nx_info);
kmem_cache_free(sk->sk_slab, sk);
module_put(owner);
}
void __init sk_init(void)
{
sk_cachep = kmem_cache_create("sock", sizeof(struct sock), 0,
- SLAB_HWCACHE_ALIGN, 0, 0);
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!sk_cachep)
printk(KERN_CRIT "sk_init: Cannot create sock SLAB cache!");
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
}
+
+int sock_i_uid(struct sock *sk)
+{
+ int uid;
+
+ read_lock(&sk->sk_callback_lock);
+ uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
+ read_unlock(&sk->sk_callback_lock);
+ return uid;
+}
+
+unsigned long sock_i_ino(struct sock *sk)
+{
+ unsigned long ino;
+
+ read_lock(&sk->sk_callback_lock);
+ ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
+ read_unlock(&sk->sk_callback_lock);
+ return ino;
+}
+
/*
* Allocate a skb from the socket's send buffer.
*/
skb_queue_head_init(&sk->sk_write_queue);
skb_queue_head_init(&sk->sk_error_queue);
+ sk->sk_send_head = NULL;
+
init_timer(&sk->sk_timer);
sk->sk_allocation = GFP_KERNEL;
sk->sk_error_report = sock_def_error_report;
sk->sk_destruct = sock_def_destruct;
+ sk->sk_sndmsg_page = NULL;
+ sk->sk_sndmsg_off = 0;
+
sk->sk_peercred.pid = 0;
sk->sk_peercred.uid = -1;
sk->sk_peercred.gid = -1;
+ sk->sk_write_pending = 0;
sk->sk_rcvlowat = 1;
sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
sk->sk_stamp.tv_sec = -1L;
sk->sk_stamp.tv_usec = -1L;
+ sk->sk_vx_info = NULL;
+ sk->sk_xid = 0;
+ sk->sk_nx_info = NULL;
+ sk->sk_nid = 0;
+
atomic_set(&sk->sk_refcnt, 1);
}
}
EXPORT_SYMBOL(sock_disable_timestamp);
+/*
+ * Get a socket option on an socket.
+ *
+ * FIX: POSIX 1003.1g is very ambiguous here. It states that
+ * asynchronous errors should be reported by getsockopt. We assume
+ * this means if you specify SO_ERROR (otherwise whats the point of it).
+ */
+int sock_common_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+
+ return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
+}
+
+EXPORT_SYMBOL(sock_common_getsockopt);
+
+int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t size, int flags)
+{
+ struct sock *sk = sock->sk;
+ int addr_len = 0;
+ int err;
+
+ err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
+ flags & ~MSG_DONTWAIT, &addr_len);
+ if (err >= 0)
+ msg->msg_namelen = addr_len;
+ return err;
+}
+
+EXPORT_SYMBOL(sock_common_recvmsg);
+
+/*
+ * Set socket options on an inet socket.
+ */
+int sock_common_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int optlen)
+{
+ struct sock *sk = sock->sk;
+
+ return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
+}
+
+EXPORT_SYMBOL(sock_common_setsockopt);
+
+void sk_common_release(struct sock *sk)
+{
+ if (sk->sk_prot->destroy)
+ sk->sk_prot->destroy(sk);
+
+ /*
+ * Observation: when sock_common_release is called, processes have
+ * no access to socket. But net still has.
+ * Step one, detach it from networking:
+ *
+ * A. Remove from hash tables.
+ */
+
+ sk->sk_prot->unhash(sk);
+
+ /*
+ * In this point socket cannot receive new packets, but it is possible
+ * that some packets are in flight because some CPU runs receiver and
+ * did hash table lookup before we unhashed socket. They will achieve
+ * receive queue and will be purged by socket destructor.
+ *
+ * Also we still have packets pending on receive queue and probably,
+ * our own packets waiting in device queues. sock_destroy will drain
+ * receive queue, but transmitted packets will delay socket destruction
+ * until the last reference will be released.
+ */
+
+ sock_orphan(sk);
+
+ xfrm_sk_free_policy(sk);
+
+#ifdef INET_REFCNT_DEBUG
+ if (atomic_read(&sk->sk_refcnt) != 1)
+ printk(KERN_DEBUG "Destruction of the socket %p delayed, c=%d\n",
+ sk, atomic_read(&sk->sk_refcnt));
+#endif
+ sock_put(sk);
+}
+
+EXPORT_SYMBOL(sk_common_release);
+
EXPORT_SYMBOL(__lock_sock);
EXPORT_SYMBOL(__release_sock);
EXPORT_SYMBOL(sk_alloc);
EXPORT_SYMBOL(sock_setsockopt);
EXPORT_SYMBOL(sock_wfree);
EXPORT_SYMBOL(sock_wmalloc);
+EXPORT_SYMBOL(sock_i_uid);
+EXPORT_SYMBOL(sock_i_ino);
#ifdef CONFIG_SYSCTL
EXPORT_SYMBOL(sysctl_optmem_max);
EXPORT_SYMBOL(sysctl_rmem_max);