DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
struct hlist_head udp_hash[UDP_HTABLE_SIZE];
-rwlock_t udp_hash_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(udp_hash_lock);
/* Shared by v4/v6 udp. */
int udp_port_rover;
{
struct hlist_node *node;
struct sock *sk2;
- struct inet_opt *inet = inet_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
write_lock_bh(&udp_hash_lock);
if (snum == 0) {
} else {
sk_for_each(sk2, node,
&udp_hash[snum & (UDP_HTABLE_SIZE - 1)]) {
- struct inet_opt *inet2 = inet_sk(sk2);
+ struct inet_sock *inet2 = inet_sk(sk2);
if (inet2->num == snum &&
sk2 != sk && !ipv6_only_sock(sk2) &&
/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
* harder than this. -DaveM
*/
-struct sock *udp_v4_lookup_longway(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif)
+static struct sock *udp_v4_lookup_longway(u32 saddr, u16 sport,
+ u32 daddr, u16 dport, int dif)
{
struct sock *sk, *result = NULL;
struct hlist_node *node;
int badness = -1;
sk_for_each(sk, node, &udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]) {
- struct inet_opt *inet = inet_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
if (inet->num == hnum && !ipv6_only_sock(sk)) {
int score = (sk->sk_family == PF_INET ? 1 : 0);
return result;
}
-__inline__ struct sock *udp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif)
+static __inline__ struct sock *udp_v4_lookup(u32 saddr, u16 sport,
+ u32 daddr, u16 dport, int dif)
{
struct sock *sk;
unsigned short hnum = ntohs(loc_port);
sk_for_each_from(s, node) {
- struct inet_opt *inet = inet_sk(s);
+ struct inet_sock *inet = inet_sk(s);
if (inet->num != hnum ||
(inet->daddr && inet->daddr != rmt_addr) ||
void udp_err(struct sk_buff *skb, u32 info)
{
- struct inet_opt *inet;
+ struct inet_sock *inet;
struct iphdr *iph = (struct iphdr*)skb->data;
struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2));
int type = skb->h.icmph->type;
*/
static void udp_flush_pending_frames(struct sock *sk)
{
- struct udp_opt *up = udp_sk(sk);
+ struct udp_sock *up = udp_sk(sk);
if (up->pending) {
up->len = 0;
/*
* Push out all pending data as one UDP datagram. Socket is locked.
*/
-static int udp_push_pending_frames(struct sock *sk, struct udp_opt *up)
+static int udp_push_pending_frames(struct sock *sk, struct udp_sock *up)
{
- struct inet_opt *inet = inet_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
struct flowi *fl = &inet->cork.fl;
struct sk_buff *skb;
struct udphdr *uh;
int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len)
{
- struct inet_opt *inet = inet_sk(sk);
- struct udp_opt *up = udp_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
+ struct udp_sock *up = udp_sk(sk);
int ulen = len;
struct ipcm_cookie ipc;
struct rtable *rt = NULL;
return -EINVAL;
if (usin->sin_family != AF_INET) {
if (usin->sin_family != AF_UNSPEC)
- return -EINVAL;
+ return -EAFNOSUPPORT;
}
daddr = usin->sin_addr.s_addr;
goto out;
}
-int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags)
+static int udp_sendpage(struct sock *sk, struct page *page, int offset,
+ size_t size, int flags)
{
- struct udp_opt *up = udp_sk(sk);
+ struct udp_sock *up = udp_sk(sk);
int ret;
if (!up->pending) {
* return it, otherwise we block.
*/
-int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len, int noblock, int flags, int *addr_len)
+static int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t len, int noblock, int flags, int *addr_len)
{
- struct inet_opt *inet = inet_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
struct sk_buff *skb;
int copied, err;
int udp_disconnect(struct sock *sk, int flags)
{
- struct inet_opt *inet = inet_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
/*
* 1003.1g - break association.
*/
#ifndef CONFIG_XFRM
return 1;
#else
- struct udp_opt *up = udp_sk(sk);
+ struct udp_sock *up = udp_sk(sk);
struct udphdr *uh = skb->h.uh;
struct iphdr *iph;
int iphlen, len;
*/
static int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
{
- struct udp_opt *up = udp_sk(sk);
+ struct udp_sock *up = udp_sk(sk);
/*
* Charge it to the socket, dropping if the queue is full.
static int udp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, int optlen)
{
- struct udp_opt *up = udp_sk(sk);
+ struct udp_sock *up = udp_sk(sk);
int val;
int err = 0;
static int udp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
- struct udp_opt *up = udp_sk(sk);
+ struct udp_sock *up = udp_sk(sk);
int val, len;
if (level != SOL_UDP)
return 0;
}
+/**
+ * udp_poll - wait for a UDP event.
+ * @file - file struct
+ * @sock - socket
+ * @wait - poll table
+ *
+ * This is same as datagram poll, except for the special case of
+ * blocking sockets. If application is using a blocking fd
+ * and a packet with checksum error is in the queue;
+ * then it could get return from select indicating data available
+ * but then block when reading it. Add special case code
+ * to work around these arguably broken applications.
+ */
+unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
+{
+ unsigned int mask = datagram_poll(file, sock, wait);
+ struct sock *sk = sock->sk;
+
+ /* Check for false positives due to checksum errors */
+ if ( (mask & POLLRDNORM) &&
+ !(file->f_flags & O_NONBLOCK) &&
+ !(sk->sk_shutdown & RCV_SHUTDOWN)){
+ struct sk_buff_head *rcvq = &sk->sk_receive_queue;
+ struct sk_buff *skb;
+
+ spin_lock_irq(&rcvq->lock);
+ while ((skb = skb_peek(rcvq)) != NULL) {
+ if (udp_checksum_complete(skb)) {
+ UDP_INC_STATS_BH(UDP_MIB_INERRORS);
+ __skb_unlink(skb, rcvq);
+ kfree_skb(skb);
+ } else {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ }
+ }
+ spin_unlock_irq(&rcvq->lock);
+
+ /* nothing to see, move along */
+ if (skb == NULL)
+ mask &= ~(POLLIN | POLLRDNORM);
+ }
+
+ return mask;
+
+}
struct proto udp_prot = {
.name = "UDP",
+ .owner = THIS_MODULE,
.close = udp_close,
.connect = ip4_datagram_connect,
.disconnect = udp_disconnect,
sk_for_each(sk, node, &udp_hash[state->bucket]) {
if (sk->sk_family == state->family &&
- vx_check(sk->sk_xid, VX_WATCH|VX_IDENT))
+ vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
goto found;
}
}
try_again:
;
} while (sk && (sk->sk_family != state->family ||
- !vx_check(sk->sk_xid, VX_WATCH|VX_IDENT)));
+ !vx_check(sk->sk_xid, VX_IDENT|VX_WATCH)));
if (!sk && ++state->bucket < UDP_HTABLE_SIZE) {
sk = sk_head(&udp_hash[state->bucket]);
/* ------------------------------------------------------------------------ */
static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket)
{
- struct inet_opt *inet = inet_sk(sp);
+ struct inet_sock *inet = inet_sk(sp);
unsigned int dest = inet->daddr;
unsigned int src = inet->rcv_saddr;
__u16 destp = ntohs(inet->dport);
EXPORT_SYMBOL(udp_port_rover);
EXPORT_SYMBOL(udp_prot);
EXPORT_SYMBOL(udp_sendmsg);
+EXPORT_SYMBOL(udp_poll);
#ifdef CONFIG_PROC_FS
EXPORT_SYMBOL(udp_proc_register);