*/
#include <linux/module.h>
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <net/checksum.h>
#include <linux/security.h>
#include <linux/vs_context.h>
-#include <linux/vs_network.h>
#include <linux/vs_limit.h>
-int sysctl_unix_max_dgram_qlen = 10;
+int sysctl_unix_max_dgram_qlen __read_mostly = 10;
struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
DEFINE_SPINLOCK(unix_table_lock);
#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
+#ifdef CONFIG_SECURITY_NETWORK
+static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
+{
+ memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
+}
+
+static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
+{
+ scm->secid = *UNIXSID(skb);
+}
+#else
+static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
+{ }
+
+static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
+{ }
+#endif /* CONFIG_SECURITY_NETWORK */
+
/*
* SMP locking strategy:
* hash table is protected with spinlock unix_table_lock
* each socket state is protected by separate rwlock.
*/
-static inline unsigned unix_hash_fold(unsigned hash)
+static inline unsigned unix_hash_fold(__wsum n)
{
+ unsigned hash = (__force unsigned)n;
hash ^= hash>>16;
hash ^= hash>>8;
return hash&(UNIX_HASH_SIZE-1);
sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
struct unix_sock *u = unix_sk(s);
- if (!vx_check(s->sk_xid, VX_IDENT|VX_WATCH))
+ if (!nx_check(s->sk_nid, VS_WATCH_P|VS_IDENT))
continue;
if (u->addr->len == len &&
!memcmp(u->addr->name, sunname, len))
.obj_size = sizeof(struct unix_sock),
};
+/*
+ * AF_UNIX sockets do not interact with hardware, hence they
+ * dont trigger interrupts - so it's safe for them to have
+ * bh-unsafe locking for their sk_receive_queue.lock. Split off
+ * this special lock-class by reinitializing the spinlock key:
+ */
+static struct lock_class_key af_unix_sk_receive_queue_lock_key;
+
static struct sock * unix_create1(struct socket *sock)
{
struct sock *sk = NULL;
atomic_inc(&unix_nr_socks);
sock_init_data(sock,sk);
+ lockdep_set_class(&sk->sk_receive_queue.lock,
+ &af_unix_sk_receive_queue_lock_key);
sk->sk_write_space = unix_write_space;
sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen;
u->mnt = NULL;
spin_lock_init(&u->lock);
atomic_set(&u->inflight, sock ? 0 : -1);
- init_MUTEX(&u->readsem); /* single task reading lock */
+ mutex_init(&u->readlock); /* single task reading lock */
init_waitqueue_head(&u->peer_wait);
unix_insert_socket(unix_sockets_unbound, sk);
out:
struct unix_address * addr;
int err;
- down(&u->readsem);
+ mutex_lock(&u->readlock);
err = 0;
if (u->addr)
goto out;
err = -ENOMEM;
- addr = kmalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
+ addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
if (!addr)
goto out;
- memset(addr, 0, sizeof(*addr) + sizeof(short) + 16);
addr->name->sun_family = AF_UNIX;
atomic_set(&addr->refcnt, 1);
spin_unlock(&unix_table_lock);
err = 0;
-out: up(&u->readsem);
+out: mutex_unlock(&u->readlock);
return err;
}
goto out;
addr_len = err;
- down(&u->readsem);
+ mutex_lock(&u->readlock);
err = -EINVAL;
if (u->addr)
out_unlock:
spin_unlock(&unix_table_lock);
out_up:
- up(&u->readsem);
+ mutex_unlock(&u->readlock);
out:
return err;
goto out_unlock;
}
- unix_state_wlock(sk);
+ unix_state_wlock_nested(sk);
if (sk->sk_state != st) {
unix_state_wunlock(sk);
memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
if (siocb->scm->fp)
unix_attach_fds(siocb->scm, skb);
+ unix_get_secdata(siocb->scm, skb);
skb->h.raw = skb->data;
err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
while(sent < len)
{
/*
- * Optimisation for the fact that under 0.01% of X messages typically
- * need breaking up.
+ * Optimisation for the fact that under 0.01% of X
+ * messages typically need breaking up.
*/
- size=len-sent;
+ size = len-sent;
/* Keep two messages in the pipe so it schedules better */
- if (size > sk->sk_sndbuf / 2 - 64)
- size = sk->sk_sndbuf / 2 - 64;
+ if (size > ((sk->sk_sndbuf >> 1) - 64))
+ size = (sk->sk_sndbuf >> 1) - 64;
if (size > SKB_MAX_ALLOC)
size = SKB_MAX_ALLOC;
msg->msg_namelen = 0;
- down(&u->readsem);
+ mutex_lock(&u->readlock);
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
memset(&tmp_scm, 0, sizeof(tmp_scm));
}
siocb->scm->creds = *UNIXCREDS(skb);
+ unix_set_secdata(siocb->scm, skb);
if (!(flags & MSG_PEEK))
{
out_free:
skb_free_datagram(sk,skb);
out_unlock:
- up(&u->readsem);
+ mutex_unlock(&u->readlock);
out:
return err;
}
memset(&tmp_scm, 0, sizeof(tmp_scm));
}
- down(&u->readsem);
+ mutex_lock(&u->readlock);
do
{
err = -EAGAIN;
if (!timeo)
break;
- up(&u->readsem);
+ mutex_unlock(&u->readlock);
timeo = unix_stream_data_wait(sk, timeo);
err = sock_intr_errno(timeo);
goto out;
}
- down(&u->readsem);
+ mutex_lock(&u->readlock);
continue;
}
}
} while (size);
- up(&u->readsem);
+ mutex_unlock(&u->readlock);
scm_recv(sock, msg, siocb->scm, flags);
out:
return copied ? : err;
mask |= POLLERR;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ mask |= POLLRDHUP;
/* readable? */
if (!skb_queue_empty(&sk->sk_receive_queue) ||
int rc = -1;
struct sk_buff *dummy_skb;
- if (sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb)) {
- printk(KERN_CRIT "%s: panic\n", __FUNCTION__);
- goto out;
- }
+ BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
rc = proto_register(&unix_proto, 1);
if (rc != 0) {