#include <net/protocol.h>
#include <linux/skbuff.h>
#include <net/sock.h>
-#include <net/xfrm.h>
#include <linux/ipsec.h>
#include <linux/filter.h>
clear_bit(SOCK_PASS_CRED, &sock->flags);
break;
- case SO_SETXID:
- if (current->xid) {
- ret = -EPERM;
- break;
- }
- if (val < 0 || val > MAX_S_CONTEXT) {
- ret = -EINVAL;
- break;
- }
- sk->sk_xid = val;
- break;
-
case SO_TIMESTAMP:
sk->sk_rcvtstamp = valbool;
if (valbool)
struct timeval tm;
} v;
- unsigned int lv = sizeof(int);
- int len;
+ unsigned int lv=sizeof(int),len;
if(get_user(len,optlen))
return -EFAULT;
v.tm.tv_usec = 0;
} else {
v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
- v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
+ v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000) / HZ;
}
break;
v.tm.tv_usec = 0;
} else {
v.tm.tv_sec = sk->sk_sndtimeo / HZ;
- v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
+ v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000) / HZ;
}
break;
security_sk_free(sk);
BUG_ON(sk->sk_vx_info);
BUG_ON(sk->sk_nx_info);
+/* clr_vx_info(&sk->sk_vx_info);
+ clr_nx_info(&sk->sk_nx_info); */
kmem_cache_free(sk->sk_slab, sk);
module_put(owner);
}
void __init sk_init(void)
{
sk_cachep = kmem_cache_create("sock", sizeof(struct sock), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, 0, 0);
if (!sk_cachep)
printk(KERN_CRIT "sk_init: Cannot create sock SLAB cache!");
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
}
-
-int sock_i_uid(struct sock *sk)
-{
- int uid;
-
- read_lock(&sk->sk_callback_lock);
- uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
- read_unlock(&sk->sk_callback_lock);
- return uid;
-}
-
-unsigned long sock_i_ino(struct sock *sk)
-{
- unsigned long ino;
-
- read_lock(&sk->sk_callback_lock);
- ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
- read_unlock(&sk->sk_callback_lock);
- return ino;
-}
-
/*
* Allocate a skb from the socket's send buffer.
*/
ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
{
ssize_t res;
- struct msghdr msg = {.msg_flags = flags};
- struct kvec iov;
- char *kaddr = kmap(page);
- iov.iov_base = kaddr + offset;
+ struct msghdr msg;
+ struct iovec iov;
+ mm_segment_t old_fs;
+ char *kaddr;
+
+ kaddr = kmap(page);
+
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = flags;
+
+ /* This cast is ok because of the "set_fs(KERNEL_DS)" */
+ iov.iov_base = (void __user *) (kaddr + offset);
iov.iov_len = size;
- res = kernel_sendmsg(sock, &msg, &iov, 1, size);
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ res = sock_sendmsg(sock, &msg, size);
+ set_fs(old_fs);
+
kunmap(page);
return res;
}
skb_queue_head_init(&sk->sk_write_queue);
skb_queue_head_init(&sk->sk_error_queue);
- sk->sk_send_head = NULL;
-
init_timer(&sk->sk_timer);
sk->sk_allocation = GFP_KERNEL;
sk->sk_error_report = sock_def_error_report;
sk->sk_destruct = sock_def_destruct;
- sk->sk_sndmsg_page = NULL;
- sk->sk_sndmsg_off = 0;
-
sk->sk_peercred.pid = 0;
sk->sk_peercred.uid = -1;
sk->sk_peercred.gid = -1;
- sk->sk_write_pending = 0;
sk->sk_rcvlowat = 1;
sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
}
EXPORT_SYMBOL(sock_disable_timestamp);
-/*
- * Get a socket option on an socket.
- *
- * FIX: POSIX 1003.1g is very ambiguous here. It states that
- * asynchronous errors should be reported by getsockopt. We assume
- * this means if you specify SO_ERROR (otherwise whats the point of it).
- */
-int sock_common_getsockopt(struct socket *sock, int level, int optname,
- char __user *optval, int __user *optlen)
-{
- struct sock *sk = sock->sk;
-
- return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
-}
-
-EXPORT_SYMBOL(sock_common_getsockopt);
-
-int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags)
-{
- struct sock *sk = sock->sk;
- int addr_len = 0;
- int err;
-
- err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
- flags & ~MSG_DONTWAIT, &addr_len);
- if (err >= 0)
- msg->msg_namelen = addr_len;
- return err;
-}
-
-EXPORT_SYMBOL(sock_common_recvmsg);
-
-/*
- * Set socket options on an inet socket.
- */
-int sock_common_setsockopt(struct socket *sock, int level, int optname,
- char __user *optval, int optlen)
-{
- struct sock *sk = sock->sk;
-
- return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
-}
-
-EXPORT_SYMBOL(sock_common_setsockopt);
-
-void sk_common_release(struct sock *sk)
-{
- if (sk->sk_prot->destroy)
- sk->sk_prot->destroy(sk);
-
- /*
- * Observation: when sock_common_release is called, processes have
- * no access to socket. But net still has.
- * Step one, detach it from networking:
- *
- * A. Remove from hash tables.
- */
-
- sk->sk_prot->unhash(sk);
-
- /*
- * In this point socket cannot receive new packets, but it is possible
- * that some packets are in flight because some CPU runs receiver and
- * did hash table lookup before we unhashed socket. They will achieve
- * receive queue and will be purged by socket destructor.
- *
- * Also we still have packets pending on receive queue and probably,
- * our own packets waiting in device queues. sock_destroy will drain
- * receive queue, but transmitted packets will delay socket destruction
- * until the last reference will be released.
- */
-
- sock_orphan(sk);
-
- xfrm_sk_free_policy(sk);
-
-#ifdef INET_REFCNT_DEBUG
- if (atomic_read(&sk->sk_refcnt) != 1)
- printk(KERN_DEBUG "Destruction of the socket %p delayed, c=%d\n",
- sk, atomic_read(&sk->sk_refcnt));
-#endif
- sock_put(sk);
-}
-
-EXPORT_SYMBOL(sk_common_release);
-
EXPORT_SYMBOL(__lock_sock);
EXPORT_SYMBOL(__release_sock);
EXPORT_SYMBOL(sk_alloc);
EXPORT_SYMBOL(sock_setsockopt);
EXPORT_SYMBOL(sock_wfree);
EXPORT_SYMBOL(sock_wmalloc);
-EXPORT_SYMBOL(sock_i_uid);
-EXPORT_SYMBOL(sock_i_ino);
#ifdef CONFIG_SYSCTL
EXPORT_SYMBOL(sysctl_optmem_max);
EXPORT_SYMBOL(sysctl_rmem_max);