#include <linux/poll.h>
#include <linux/tcp.h>
#include <linux/init.h>
+#include <linux/highmem.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
/* Run time adjustable parameters. */
-__u32 sysctl_wmem_max = SK_WMEM_MAX;
-__u32 sysctl_rmem_max = SK_RMEM_MAX;
-__u32 sysctl_wmem_default = SK_WMEM_MAX;
-__u32 sysctl_rmem_default = SK_RMEM_MAX;
+__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
+__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
+__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
+__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
/* Maximal space eaten by iovec or ancilliary data plus some space */
-int sysctl_optmem_max = sizeof(unsigned long)*(2*UIO_MAXIOV + 512);
+int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
{
goto out;
}
- /* It would be deadlock, if sock_queue_rcv_skb is used
- with socket lock! We assume that users of this
- function are lock free.
- */
- err = sk_filter(sk, skb, 1);
+ err = sk_filter(sk, skb);
if (err)
goto out;
}
EXPORT_SYMBOL(sock_queue_rcv_skb);
-int sk_receive_skb(struct sock *sk, struct sk_buff *skb)
+int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
{
int rc = NET_RX_SUCCESS;
- if (sk_filter(sk, skb, 0))
+ if (sk_filter(sk, skb))
goto discard_and_relse;
skb->dev = NULL;
- bh_lock_sock(sk);
+ if (nested)
+ bh_lock_sock_nested(sk);
+ else
+ bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
/*
* trylock + unlock semantics:
break;
case SO_DETACH_FILTER:
- spin_lock_bh(&sk->sk_lock.slock);
- filter = sk->sk_filter;
+ rcu_read_lock_bh();
+ filter = rcu_dereference(sk->sk_filter);
if (filter) {
- sk->sk_filter = NULL;
- spin_unlock_bh(&sk->sk_lock.slock);
+ rcu_assign_pointer(sk->sk_filter, NULL);
sk_filter_release(sk, filter);
+ rcu_read_unlock_bh();
break;
}
- spin_unlock_bh(&sk->sk_lock.slock);
+ rcu_read_unlock_bh();
ret = -ENONET;
break;
struct proto *prot, int zero_it)
{
struct sock *sk = NULL;
- kmem_cache_t *slab = prot->slab;
+ struct kmem_cache *slab = prot->slab;
if (slab != NULL)
sk = kmem_cache_alloc(slab, priority);
if (sk->sk_destruct)
sk->sk_destruct(sk);
- filter = sk->sk_filter;
+ filter = rcu_dereference(sk->sk_filter);
if (filter) {
sk_filter_release(sk, filter);
- sk->sk_filter = NULL;
+ rcu_assign_pointer(sk->sk_filter, NULL);
}
sock_disable_timestamp(sk);
module_put(owner);
}
-struct sock *sk_clone(struct sock *sk, const gfp_t priority)
+struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
{
struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0);
if (newsk != NULL) {
struct sk_filter *filter;
- memcpy(newsk, sk, sk->sk_prot->obj_size);
+ sock_copy(newsk, sk);
/* SANITY */
sock_vx_init(newsk);
if (filter != NULL)
sk_filter_charge(newsk, filter);
- if (sk->sk_create_child)
- sk->sk_create_child(sk, newsk);
-
if (unlikely(xfrm_sk_clone_policy(newsk))) {
/* It is still raw copy of parent, so invalidate
* destructor and make plain sk_free() */
atomic_set(&sk->sk_refcnt, 1);
}
-void fastcall lock_sock(struct sock *sk)
+void fastcall lock_sock_nested(struct sock *sk, int subclass)
{
might_sleep();
spin_lock_bh(&sk->sk_lock.slock);
/*
* The sk_lock has mutex_lock() semantics here:
*/
- mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
+ mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
local_bh_enable();
}
-EXPORT_SYMBOL(lock_sock);
+EXPORT_SYMBOL(lock_sock_nested);
void fastcall release_sock(struct sock *sk)
{
{
struct sock *sk = sock->sk;
- if (sk->sk_prot->compat_setsockopt != NULL)
+ if (sk->sk_prot->compat_getsockopt != NULL)
return sk->sk_prot->compat_getsockopt(sk, level, optname,
optval, optlen);
return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);