2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The User Datagram Protocol (UDP).
8 * Version: $Id: udp.c,v 1.102 2002/02/01 22:01:04 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
13 * Alan Cox, <Alan.Cox@linux.org>
14 * Hirokazu Takahashi, <taka@valinux.co.jp>
17 * Alan Cox : verify_area() calls
18 * Alan Cox : stopped close while in use off icmp
19 * messages. Not a fix but a botch that
20 * for udp at least is 'valid'.
21 * Alan Cox : Fixed icmp handling properly
22 * Alan Cox : Correct error for oversized datagrams
23 * Alan Cox : Tidied select() semantics.
24 * Alan Cox : udp_err() fixed properly, also now
25 * select and read wake correctly on errors
26 * Alan Cox : udp_send verify_area moved to avoid mem leak
27 * Alan Cox : UDP can count its memory
28 * Alan Cox : send to an unknown connection causes
29 * an ECONNREFUSED off the icmp, but
31 * Alan Cox : Switched to new sk_buff handlers. No more backlog!
32 * Alan Cox : Using generic datagram code. Even smaller and the PEEK
33 * bug no longer crashes it.
34 * Fred Van Kempen : Net2e support for sk->broadcast.
35 * Alan Cox : Uses skb_free_datagram
36 * Alan Cox : Added get/set sockopt support.
37 * Alan Cox : Broadcasting without option set returns EACCES.
38 * Alan Cox : No wakeup calls. Instead we now use the callbacks.
39 * Alan Cox : Use ip_tos and ip_ttl
40 * Alan Cox : SNMP Mibs
41 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
42 * Matt Dillon : UDP length checks.
43 * Alan Cox : Smarter af_inet used properly.
44 * Alan Cox : Use new kernel side addressing.
45 * Alan Cox : Incorrect return on truncated datagram receive.
46 * Arnt Gulbrandsen : New udp_send and stuff
47 * Alan Cox : Cache last socket
48 * Alan Cox : Route cache
49 * Jon Peatfield : Minor efficiency fix to sendto().
50 * Mike Shaver : RFC1122 checks.
51 * Alan Cox : Nonblocking error fix.
52 * Willy Konynenberg : Transparent proxying support.
53 * Mike McLagan : Routing by source
54 * David S. Miller : New socket lookup architecture.
55 * Last socket cache retained as it
56 * does have a high hit rate.
57 * Olaf Kirch : Don't linearise iovec on sendmsg.
58 * Andi Kleen : Some cleanups, cache destination entry
60 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
61 * Melvin Smith : Check msg_name not msg_namelen in sendto(),
62 * return ENOTCONN for unconnected sockets (POSIX)
63 * Janos Farkas : don't deliver multi/broadcasts to a different
64 * bound-to-device socket
65 * Hirokazu Takahashi : HW checksumming for outgoing UDP
67 * Hirokazu Takahashi : sendfile() on UDP works now.
68 * Arnaldo C. Melo : convert /proc/net/udp to seq_file
69 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
70 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
71 * a single port at the same time.
72 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
75 * This program is free software; you can redistribute it and/or
76 * modify it under the terms of the GNU General Public License
77 * as published by the Free Software Foundation; either version
78 * 2 of the License, or (at your option) any later version.
81 #include <asm/system.h>
82 #include <asm/uaccess.h>
83 #include <asm/ioctls.h>
84 #include <linux/types.h>
85 #include <linux/fcntl.h>
86 #include <linux/module.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
90 #include <linux/errno.h>
91 #include <linux/timer.h>
93 #include <linux/config.h>
94 #include <linux/inet.h>
95 #include <linux/ipv6.h>
96 #include <linux/netdevice.h>
99 #include <net/protocol.h>
100 #include <linux/skbuff.h>
101 #include <linux/proc_fs.h>
102 #include <linux/seq_file.h>
103 #include <net/sock.h>
105 #include <net/icmp.h>
106 #include <net/route.h>
107 #include <net/inet_common.h>
108 #include <net/checksum.h>
109 #include <net/xfrm.h>
112 * Snmp MIB for the UDP layer
115 DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
117 struct hlist_head udp_hash[UDP_HTABLE_SIZE];
118 DEFINE_RWLOCK(udp_hash_lock);
120 /* Shared by v4/v6 udp. */
123 static int udp_v4_get_port(struct sock *sk, unsigned short snum)
125 struct hlist_node *node;
127 struct inet_sock *inet = inet_sk(sk);
129 write_lock_bh(&udp_hash_lock);
131 int best_size_so_far, best, result, i;
133 if (udp_port_rover > sysctl_local_port_range[1] ||
134 udp_port_rover < sysctl_local_port_range[0])
135 udp_port_rover = sysctl_local_port_range[0];
136 best_size_so_far = 32767;
137 best = result = udp_port_rover;
138 for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) {
139 struct hlist_head *list;
142 list = &udp_hash[result & (UDP_HTABLE_SIZE - 1)];
143 if (hlist_empty(list)) {
144 if (result > sysctl_local_port_range[1])
145 result = sysctl_local_port_range[0] +
146 ((result - sysctl_local_port_range[0]) &
147 (UDP_HTABLE_SIZE - 1));
151 sk_for_each(sk2, node, list)
152 if (++size >= best_size_so_far)
154 best_size_so_far = size;
159 for(i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++, result += UDP_HTABLE_SIZE) {
160 if (result > sysctl_local_port_range[1])
161 result = sysctl_local_port_range[0]
162 + ((result - sysctl_local_port_range[0]) &
163 (UDP_HTABLE_SIZE - 1));
164 if (!udp_lport_inuse(result))
167 if (i >= (1 << 16) / UDP_HTABLE_SIZE)
170 udp_port_rover = snum = result;
172 sk_for_each(sk2, node,
173 &udp_hash[snum & (UDP_HTABLE_SIZE - 1)]) {
174 struct inet_sock *inet2 = inet_sk(sk2);
176 if (inet2->num == snum &&
177 sk2 != sk && !ipv6_only_sock(sk2) &&
178 (!sk2->sk_bound_dev_if ||
179 !sk->sk_bound_dev_if ||
180 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
181 nx_addr_conflict(sk->sk_nx_info,
182 tcp_v4_rcv_saddr(sk), sk2) &&
183 (!sk2->sk_reuse || !sk->sk_reuse))
188 if (sk_unhashed(sk)) {
189 struct hlist_head *h = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
192 sock_prot_inc_use(sk->sk_prot);
194 write_unlock_bh(&udp_hash_lock);
198 write_unlock_bh(&udp_hash_lock);
202 static void udp_v4_hash(struct sock *sk)
207 static void udp_v4_unhash(struct sock *sk)
209 write_lock_bh(&udp_hash_lock);
210 if (sk_del_node_init(sk)) {
211 inet_sk(sk)->num = 0;
212 sock_prot_dec_use(sk->sk_prot);
214 write_unlock_bh(&udp_hash_lock);
217 static inline int udp_in_list(struct nx_info *nx_info, u32 addr)
219 int n = nx_info->nbipv4;
223 if (nx_info->ipv4[i] == addr)
228 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
229 * harder than this. -DaveM
231 static struct sock *udp_v4_lookup_longway(u32 saddr, u16 sport,
232 u32 daddr, u16 dport, int dif)
234 struct sock *sk, *result = NULL;
235 struct hlist_node *node;
236 unsigned short hnum = ntohs(dport);
239 sk_for_each(sk, node, &udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]) {
240 struct inet_sock *inet = inet_sk(sk);
242 if (inet->num == hnum && !ipv6_only_sock(sk)) {
243 int score = (sk->sk_family == PF_INET ? 1 : 0);
244 if (inet->rcv_saddr) {
245 if (inet->rcv_saddr != daddr)
248 } else if (sk->sk_nx_info) {
249 if (udp_in_list(sk->sk_nx_info, daddr))
255 if (inet->daddr != saddr)
260 if (inet->dport != sport)
264 if (sk->sk_bound_dev_if) {
265 if (sk->sk_bound_dev_if != dif)
272 } else if(score > badness) {
281 static __inline__ struct sock *udp_v4_lookup(u32 saddr, u16 sport,
282 u32 daddr, u16 dport, int dif)
286 read_lock(&udp_hash_lock);
287 sk = udp_v4_lookup_longway(saddr, sport, daddr, dport, dif);
290 read_unlock(&udp_hash_lock);
294 static inline struct sock *udp_v4_mcast_next(struct sock *sk,
295 u16 loc_port, u32 loc_addr,
296 u16 rmt_port, u32 rmt_addr,
299 struct hlist_node *node;
301 unsigned short hnum = ntohs(loc_port);
303 sk_for_each_from(s, node) {
304 struct inet_sock *inet = inet_sk(s);
306 if (inet->num != hnum ||
307 (inet->daddr && inet->daddr != rmt_addr) ||
308 (inet->dport != rmt_port && inet->dport) ||
309 (inet->rcv_saddr && inet->rcv_saddr != loc_addr &&
310 inet->rcv_saddr2 && inet->rcv_saddr2 != loc_addr) ||
312 (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
314 if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
324 * This routine is called by the ICMP module when it gets some
325 * sort of error condition. If err < 0 then the socket should
326 * be closed and the error returned to the user. If err > 0
327 * it's just the icmp type << 8 | icmp code.
328 * Header points to the ip header of the error packet. We move
329 * on past this. Then (as it used to claim before adjustment)
330 * header points to the first 8 bytes of the udp header. We need
331 * to find the appropriate port.
334 void udp_err(struct sk_buff *skb, u32 info)
336 struct inet_sock *inet;
337 struct iphdr *iph = (struct iphdr*)skb->data;
338 struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2));
339 int type = skb->h.icmph->type;
340 int code = skb->h.icmph->code;
345 sk = udp_v4_lookup(iph->daddr, uh->dest, iph->saddr, uh->source, skb->dev->ifindex);
347 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
348 return; /* No socket for error */
357 case ICMP_TIME_EXCEEDED:
360 case ICMP_SOURCE_QUENCH:
362 case ICMP_PARAMETERPROB:
366 case ICMP_DEST_UNREACH:
367 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
368 if (inet->pmtudisc != IP_PMTUDISC_DONT) {
376 if (code <= NR_ICMP_UNREACH) {
377 harderr = icmp_err_convert[code].fatal;
378 err = icmp_err_convert[code].errno;
384 * RFC1122: OK. Passes ICMP errors back to application, as per
387 if (!inet->recverr) {
388 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
391 ip_icmp_error(sk, skb, err, uh->dest, info, (u8*)(uh+1));
394 sk->sk_error_report(sk);
400 * Throw away all pending data and cancel the corking. Socket is locked.
402 static void udp_flush_pending_frames(struct sock *sk)
404 struct udp_sock *up = udp_sk(sk);
409 ip_flush_pending_frames(sk);
414 * Push out all pending data as one UDP datagram. Socket is locked.
416 static int udp_push_pending_frames(struct sock *sk, struct udp_sock *up)
418 struct inet_sock *inet = inet_sk(sk);
419 struct flowi *fl = &inet->cork.fl;
424 /* Grab the skbuff where UDP header space exists. */
425 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
429 * Create a UDP header
432 uh->source = fl->fl_ip_sport;
433 uh->dest = fl->fl_ip_dport;
434 uh->len = htons(up->len);
437 if (sk->sk_no_check == UDP_CSUM_NOXMIT) {
438 skb->ip_summed = CHECKSUM_NONE;
442 if (skb_queue_len(&sk->sk_write_queue) == 1) {
444 * Only one fragment on the socket.
446 if (skb->ip_summed == CHECKSUM_HW) {
447 skb->csum = offsetof(struct udphdr, check);
448 uh->check = ~csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst,
449 up->len, IPPROTO_UDP, 0);
451 skb->csum = csum_partial((char *)uh,
452 sizeof(struct udphdr), skb->csum);
453 uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst,
454 up->len, IPPROTO_UDP, skb->csum);
459 unsigned int csum = 0;
461 * HW-checksum won't work as there are two or more
462 * fragments on the socket so that all csums of sk_buffs
463 * should be together.
465 if (skb->ip_summed == CHECKSUM_HW) {
466 int offset = (unsigned char *)uh - skb->data;
467 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
469 skb->ip_summed = CHECKSUM_NONE;
471 skb->csum = csum_partial((char *)uh,
472 sizeof(struct udphdr), skb->csum);
475 skb_queue_walk(&sk->sk_write_queue, skb) {
476 csum = csum_add(csum, skb->csum);
478 uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst,
479 up->len, IPPROTO_UDP, csum);
484 err = ip_push_pending_frames(sk);
492 static unsigned short udp_check(struct udphdr *uh, int len, unsigned long saddr, unsigned long daddr, unsigned long base)
494 return(csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base));
497 int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
500 struct inet_sock *inet = inet_sk(sk);
501 struct udp_sock *up = udp_sk(sk);
503 struct ipcm_cookie ipc;
504 struct rtable *rt = NULL;
507 u32 daddr, faddr, saddr;
511 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
520 if (msg->msg_flags&MSG_OOB) /* Mirror BSD error message compatibility */
527 * There are pending frames.
528 * The socket lock must be held while it's corked.
531 if (likely(up->pending)) {
532 if (unlikely(up->pending != AF_INET)) {
540 ulen += sizeof(struct udphdr);
543 * Get and verify the address.
546 struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name;
547 if (msg->msg_namelen < sizeof(*usin))
549 if (usin->sin_family != AF_INET) {
550 if (usin->sin_family != AF_UNSPEC)
551 return -EAFNOSUPPORT;
554 daddr = usin->sin_addr.s_addr;
555 dport = usin->sin_port;
559 if (sk->sk_state != TCP_ESTABLISHED)
560 return -EDESTADDRREQ;
563 /* Open fast path for connected socket.
564 Route will not be used, if at least one option is set.
568 ipc.addr = inet->saddr;
570 ipc.oif = sk->sk_bound_dev_if;
571 if (msg->msg_controllen) {
572 err = ip_cmsg_send(msg, &ipc);
583 ipc.addr = faddr = daddr;
585 if (ipc.opt && ipc.opt->srr) {
588 faddr = ipc.opt->faddr;
591 tos = RT_TOS(inet->tos);
592 if (sock_flag(sk, SOCK_LOCALROUTE) ||
593 (msg->msg_flags & MSG_DONTROUTE) ||
594 (ipc.opt && ipc.opt->is_strictroute)) {
599 if (MULTICAST(daddr)) {
601 ipc.oif = inet->mc_index;
603 saddr = inet->mc_addr;
608 rt = (struct rtable*)sk_dst_check(sk, 0);
611 struct flowi fl = { .oif = ipc.oif,
616 .proto = IPPROTO_UDP,
618 { .sport = inet->sport,
619 .dport = dport } } };
620 struct nx_info *nxi = sk->sk_nx_info;
623 err = ip_find_src(nxi, &rt, &fl);
626 if (daddr == IPI_LOOPBACK && !vx_check(0, VX_ADMIN))
627 daddr = fl.fl4_dst = nxi->ipv4[0];
629 err = ip_route_output_flow(&rt, &fl, sk, !(msg->msg_flags&MSG_DONTWAIT));
634 if ((rt->rt_flags & RTCF_BROADCAST) &&
635 !sock_flag(sk, SOCK_BROADCAST))
638 sk_dst_set(sk, dst_clone(&rt->u.dst));
641 if (msg->msg_flags&MSG_CONFIRM)
647 daddr = ipc.addr = rt->rt_dst;
650 if (unlikely(up->pending)) {
651 /* The socket is already corked while preparing it. */
652 /* ... which is an evident application bug. --ANK */
655 NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "udp cork app bug 2\n"));
660 * Now cork the socket to pend data.
662 inet->cork.fl.fl4_dst = daddr;
663 inet->cork.fl.fl_ip_dport = dport;
664 inet->cork.fl.fl4_src = saddr;
665 inet->cork.fl.fl_ip_sport = inet->sport;
666 up->pending = AF_INET;
670 err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, ulen,
671 sizeof(struct udphdr), &ipc, rt,
672 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
674 udp_flush_pending_frames(sk);
676 err = udp_push_pending_frames(sk, up);
684 UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS);
690 dst_confirm(&rt->u.dst);
691 if (!(msg->msg_flags&MSG_PROBE) || len)
692 goto back_from_confirm;
697 static int udp_sendpage(struct sock *sk, struct page *page, int offset,
698 size_t size, int flags)
700 struct udp_sock *up = udp_sk(sk);
704 struct msghdr msg = { .msg_flags = flags|MSG_MORE };
706 /* Call udp_sendmsg to specify destination address which
707 * sendpage interface can't pass.
708 * This will succeed only when the socket is connected.
710 ret = udp_sendmsg(NULL, sk, &msg, 0);
717 if (unlikely(!up->pending)) {
720 NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "udp cork app bug 3\n"));
724 ret = ip_append_page(sk, page, offset, size, flags);
725 if (ret == -EOPNOTSUPP) {
727 return sock_no_sendpage(sk->sk_socket, page, offset,
731 udp_flush_pending_frames(sk);
736 if (!(up->corkflag || (flags&MSG_MORE)))
737 ret = udp_push_pending_frames(sk, up);
746 * IOCTL requests applicable to the UDP protocol
749 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
755 int amount = atomic_read(&sk->sk_wmem_alloc);
756 return put_user(amount, (int __user *)arg);
762 unsigned long amount;
765 spin_lock_bh(&sk->sk_receive_queue.lock);
766 skb = skb_peek(&sk->sk_receive_queue);
769 * We will only return the amount
770 * of this packet since that is all
773 amount = skb->len - sizeof(struct udphdr);
775 spin_unlock_bh(&sk->sk_receive_queue.lock);
776 return put_user(amount, (int __user *)arg);
785 static __inline__ int __udp_checksum_complete(struct sk_buff *skb)
787 return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
790 static __inline__ int udp_checksum_complete(struct sk_buff *skb)
792 return skb->ip_summed != CHECKSUM_UNNECESSARY &&
793 __udp_checksum_complete(skb);
797 * This should be easy, if there is something there we
798 * return it, otherwise we block.
801 static int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
802 size_t len, int noblock, int flags, int *addr_len)
804 struct inet_sock *inet = inet_sk(sk);
805 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
810 * Check any passed addresses
813 *addr_len=sizeof(*sin);
815 if (flags & MSG_ERRQUEUE)
816 return ip_recv_error(sk, msg, len);
819 skb = skb_recv_datagram(sk, flags, noblock, &err);
823 copied = skb->len - sizeof(struct udphdr);
826 msg->msg_flags |= MSG_TRUNC;
829 if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
830 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
832 } else if (msg->msg_flags&MSG_TRUNC) {
833 if (__udp_checksum_complete(skb))
835 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
838 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
847 sock_recv_timestamp(msg, sk, skb);
849 /* Copy the address. */
852 sin->sin_family = AF_INET;
853 sin->sin_port = skb->h.uh->source;
854 sin->sin_addr.s_addr = skb->nh.iph->saddr;
855 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
857 if (inet->cmsg_flags)
858 ip_cmsg_recv(msg, skb);
861 if (flags & MSG_TRUNC)
862 err = skb->len - sizeof(struct udphdr);
865 skb_free_datagram(sk, skb);
870 UDP_INC_STATS_BH(UDP_MIB_INERRORS);
873 if (flags&MSG_PEEK) {
875 spin_lock_bh(&sk->sk_receive_queue.lock);
876 if (skb == skb_peek(&sk->sk_receive_queue)) {
877 __skb_unlink(skb, &sk->sk_receive_queue);
880 spin_unlock_bh(&sk->sk_receive_queue.lock);
885 skb_free_datagram(sk, skb);
893 int udp_disconnect(struct sock *sk, int flags)
895 struct inet_sock *inet = inet_sk(sk);
897 * 1003.1g - break association.
900 sk->sk_state = TCP_CLOSE;
903 sk->sk_bound_dev_if = 0;
904 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
905 inet_reset_saddr(sk);
907 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
908 sk->sk_prot->unhash(sk);
915 static void udp_close(struct sock *sk, long timeout)
917 sk_common_release(sk);
921 * 1 if the the UDP system should process it
922 * 0 if we should drop this packet
923 * -1 if it should get processed by xfrm4_rcv_encap
925 static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb)
930 struct udp_sock *up = udp_sk(sk);
931 struct udphdr *uh = skb->h.uh;
935 __u8 *udpdata = (__u8 *)uh + sizeof(struct udphdr);
936 __u32 *udpdata32 = (__u32 *)udpdata;
937 __u16 encap_type = up->encap_type;
939 /* if we're overly short, let UDP handle it */
940 if (udpdata > skb->tail)
943 /* if this is not encapsulated socket, then just return now */
947 len = skb->tail - udpdata;
949 switch (encap_type) {
951 case UDP_ENCAP_ESPINUDP:
952 /* Check if this is a keepalive packet. If so, eat it. */
953 if (len == 1 && udpdata[0] == 0xff) {
955 } else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0 ) {
956 /* ESP Packet without Non-ESP header */
957 len = sizeof(struct udphdr);
959 /* Must be an IKE packet.. pass it through */
962 case UDP_ENCAP_ESPINUDP_NON_IKE:
963 /* Check if this is a keepalive packet. If so, eat it. */
964 if (len == 1 && udpdata[0] == 0xff) {
966 } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) &&
967 udpdata32[0] == 0 && udpdata32[1] == 0) {
969 /* ESP Packet with Non-IKE marker */
970 len = sizeof(struct udphdr) + 2 * sizeof(u32);
972 /* Must be an IKE packet.. pass it through */
977 /* At this point we are sure that this is an ESPinUDP packet,
978 * so we need to remove 'len' bytes from the packet (the UDP
979 * header and optional ESP marker bytes) and then modify the
980 * protocol to ESP, and then call into the transform receiver.
982 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
985 /* Now we can update and verify the packet length... */
987 iphlen = iph->ihl << 2;
988 iph->tot_len = htons(ntohs(iph->tot_len) - len);
989 if (skb->len < iphlen + len) {
990 /* packet is too small!?! */
994 /* pull the data buffer up to the ESP header and set the
995 * transport header to point to ESP. Keep UDP on the stack
998 skb->h.raw = skb_pull(skb, len);
1000 /* modify the protocol (it's ESP!) */
1001 iph->protocol = IPPROTO_ESP;
1003 /* and let the caller know to send this into the ESP processor... */
1011 * >0: "udp encap" protocol resubmission
1013 * Note that in the success and error cases, the skb is assumed to
1014 * have either been requeued or freed.
1016 static int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
1018 struct udp_sock *up = udp_sk(sk);
1021 * Charge it to the socket, dropping if the queue is full.
1023 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
1028 if (up->encap_type) {
1030 * This is an encapsulation socket, so let's see if this is
1031 * an encapsulated packet.
1032 * If it's a keepalive packet, then just eat it.
1033 * If it's an encapsulateed packet, then pass it to the
1034 * IPsec xfrm input and return the response
1035 * appropriately. Otherwise, just fall through and
1036 * pass this up the UDP socket.
1040 ret = udp_encap_rcv(sk, skb);
1042 /* Eat the packet .. */
1047 /* process the ESP packet */
1048 ret = xfrm4_rcv_encap(skb, up->encap_type);
1049 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS);
1052 /* FALLTHROUGH -- it's a UDP Packet */
1055 if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
1056 if (__udp_checksum_complete(skb)) {
1057 UDP_INC_STATS_BH(UDP_MIB_INERRORS);
1061 skb->ip_summed = CHECKSUM_UNNECESSARY;
1064 if (sock_queue_rcv_skb(sk,skb)<0) {
1065 UDP_INC_STATS_BH(UDP_MIB_INERRORS);
1069 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS);
1074 * Multicasts and broadcasts go to each listener.
1076 * Note: called only from the BH handler context,
1077 * so we don't need to lock the hashes.
1079 static int udp_v4_mcast_deliver(struct sk_buff *skb, struct udphdr *uh,
1080 u32 saddr, u32 daddr)
1085 read_lock(&udp_hash_lock);
1086 sk = sk_head(&udp_hash[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]);
1087 dif = skb->dev->ifindex;
1088 sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
1090 struct sock *sknext = NULL;
1093 struct sk_buff *skb1 = skb;
1095 sknext = udp_v4_mcast_next(sk_next(sk), uh->dest, daddr,
1096 uh->source, saddr, dif);
1098 skb1 = skb_clone(skb, GFP_ATOMIC);
1101 int ret = udp_queue_rcv_skb(sk, skb1);
1103 /* we should probably re-process instead
1104 * of dropping packets here. */
1111 read_unlock(&udp_hash_lock);
1115 /* Initialize UDP checksum. If exited with zero value (success),
1116 * CHECKSUM_UNNECESSARY means, that no more checks are required.
1117 * Otherwise, csum completion requires chacksumming packet body,
1118 * including udp header and folding it to skb->csum.
1120 static int udp_checksum_init(struct sk_buff *skb, struct udphdr *uh,
1121 unsigned short ulen, u32 saddr, u32 daddr)
1123 if (uh->check == 0) {
1124 skb->ip_summed = CHECKSUM_UNNECESSARY;
1125 } else if (skb->ip_summed == CHECKSUM_HW) {
1126 skb->ip_summed = CHECKSUM_UNNECESSARY;
1127 if (!udp_check(uh, ulen, saddr, daddr, skb->csum))
1129 NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "udp v4 hw csum failure.\n"));
1130 skb->ip_summed = CHECKSUM_NONE;
1132 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
1133 skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
1134 /* Probably, we should checksum udp header (it should be in cache
1135 * in any case) and data in tiny packets (< rx copybreak).
1141 * All we need to do is get the socket, and then do a checksum.
1144 int udp_rcv(struct sk_buff *skb)
1148 unsigned short ulen;
1149 struct rtable *rt = (struct rtable*)skb->dst;
1150 u32 saddr = skb->nh.iph->saddr;
1151 u32 daddr = skb->nh.iph->daddr;
1155 * Validate the packet and the UDP length.
1157 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1162 ulen = ntohs(uh->len);
1164 if (ulen > len || ulen < sizeof(*uh))
1167 if (pskb_trim(skb, ulen))
1170 if (udp_checksum_init(skb, uh, ulen, saddr, daddr) < 0)
1173 if(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1174 return udp_v4_mcast_deliver(skb, uh, saddr, daddr);
1176 sk = udp_v4_lookup(saddr, uh->source, daddr, uh->dest, skb->dev->ifindex);
1179 int ret = udp_queue_rcv_skb(sk, skb);
1182 /* a return value > 0 means to resubmit the input, but
1183 * it it wants the return to be -protocol, or 0
1190 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1193 /* No socket. Drop packet silently, if checksum is wrong */
1194 if (udp_checksum_complete(skb))
1197 UDP_INC_STATS_BH(UDP_MIB_NOPORTS);
1198 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
1201 * Hmm. We got an UDP packet to a port to which we
1202 * don't wanna listen. Ignore it.
1208 NETDEBUG(if (net_ratelimit())
1209 printk(KERN_DEBUG "UDP: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n",
1217 UDP_INC_STATS_BH(UDP_MIB_INERRORS);
1223 * RFC1122: OK. Discards the bad packet silently (as far as
1224 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1226 NETDEBUG(if (net_ratelimit())
1227 printk(KERN_DEBUG "UDP: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n",
1234 UDP_INC_STATS_BH(UDP_MIB_INERRORS);
1239 static int udp_destroy_sock(struct sock *sk)
1242 udp_flush_pending_frames(sk);
1248 * Socket option code for UDP
1250 static int udp_setsockopt(struct sock *sk, int level, int optname,
1251 char __user *optval, int optlen)
1253 struct udp_sock *up = udp_sk(sk);
1257 if (level != SOL_UDP)
1258 return ip_setsockopt(sk, level, optname, optval, optlen);
1260 if(optlen<sizeof(int))
1263 if (get_user(val, (int __user *)optval))
1273 udp_push_pending_frames(sk, up);
1281 case UDP_ENCAP_ESPINUDP:
1282 case UDP_ENCAP_ESPINUDP_NON_IKE:
1283 up->encap_type = val;
1299 static int udp_getsockopt(struct sock *sk, int level, int optname,
1300 char __user *optval, int __user *optlen)
1302 struct udp_sock *up = udp_sk(sk);
1305 if (level != SOL_UDP)
1306 return ip_getsockopt(sk, level, optname, optval, optlen);
1308 if(get_user(len,optlen))
1311 len = min_t(unsigned int, len, sizeof(int));
1322 val = up->encap_type;
1326 return -ENOPROTOOPT;
1329 if(put_user(len, optlen))
1331 if(copy_to_user(optval, &val,len))
1337 * udp_poll - wait for a UDP event.
1338 * @file - file struct
1340 * @wait - poll table
1342 * This is same as datagram poll, except for the special case of
1343 * blocking sockets. If application is using a blocking fd
1344 * and a packet with checksum error is in the queue;
1345 * then it could get return from select indicating data available
1346 * but then block when reading it. Add special case code
1347 * to work around these arguably broken applications.
1349 unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
1351 unsigned int mask = datagram_poll(file, sock, wait);
1352 struct sock *sk = sock->sk;
1354 /* Check for false positives due to checksum errors */
1355 if ( (mask & POLLRDNORM) &&
1356 !(file->f_flags & O_NONBLOCK) &&
1357 !(sk->sk_shutdown & RCV_SHUTDOWN)){
1358 struct sk_buff_head *rcvq = &sk->sk_receive_queue;
1359 struct sk_buff *skb;
1361 spin_lock_bh(&rcvq->lock);
1362 while ((skb = skb_peek(rcvq)) != NULL) {
1363 if (udp_checksum_complete(skb)) {
1364 UDP_INC_STATS_BH(UDP_MIB_INERRORS);
1365 __skb_unlink(skb, rcvq);
1368 skb->ip_summed = CHECKSUM_UNNECESSARY;
1372 spin_unlock_bh(&rcvq->lock);
1374 /* nothing to see, move along */
1376 mask &= ~(POLLIN | POLLRDNORM);
1383 struct proto udp_prot = {
1385 .owner = THIS_MODULE,
1387 .connect = ip4_datagram_connect,
1388 .disconnect = udp_disconnect,
1390 .destroy = udp_destroy_sock,
1391 .setsockopt = udp_setsockopt,
1392 .getsockopt = udp_getsockopt,
1393 .sendmsg = udp_sendmsg,
1394 .recvmsg = udp_recvmsg,
1395 .sendpage = udp_sendpage,
1396 .backlog_rcv = udp_queue_rcv_skb,
1397 .hash = udp_v4_hash,
1398 .unhash = udp_v4_unhash,
1399 .get_port = udp_v4_get_port,
1400 .obj_size = sizeof(struct udp_sock),
1403 /* ------------------------------------------------------------------------ */
1404 #ifdef CONFIG_PROC_FS
1406 static struct sock *udp_get_first(struct seq_file *seq)
1409 struct udp_iter_state *state = seq->private;
1411 for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) {
1412 struct hlist_node *node;
1414 sk_for_each(sk, node, &udp_hash[state->bucket]) {
1415 if (sk->sk_family == state->family &&
1416 vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
1425 static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
1427 struct udp_iter_state *state = seq->private;
1433 } while (sk && (sk->sk_family != state->family ||
1434 !vx_check(sk->sk_xid, VX_IDENT|VX_WATCH)));
1436 if (!sk && ++state->bucket < UDP_HTABLE_SIZE) {
1437 sk = sk_head(&udp_hash[state->bucket]);
1443 static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
1445 struct sock *sk = udp_get_first(seq);
1448 while(pos && (sk = udp_get_next(seq, sk)) != NULL)
1450 return pos ? NULL : sk;
1453 static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
1455 read_lock(&udp_hash_lock);
1456 return *pos ? udp_get_idx(seq, *pos-1) : (void *)1;
1459 static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1464 sk = udp_get_idx(seq, 0);
1466 sk = udp_get_next(seq, v);
1472 static void udp_seq_stop(struct seq_file *seq, void *v)
1474 read_unlock(&udp_hash_lock);
1477 static int udp_seq_open(struct inode *inode, struct file *file)
1479 struct udp_seq_afinfo *afinfo = PDE(inode)->data;
1480 struct seq_file *seq;
1482 struct udp_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
1486 memset(s, 0, sizeof(*s));
1487 s->family = afinfo->family;
1488 s->seq_ops.start = udp_seq_start;
1489 s->seq_ops.next = udp_seq_next;
1490 s->seq_ops.show = afinfo->seq_show;
1491 s->seq_ops.stop = udp_seq_stop;
1493 rc = seq_open(file, &s->seq_ops);
1497 seq = file->private_data;
1506 /* ------------------------------------------------------------------------ */
1507 int udp_proc_register(struct udp_seq_afinfo *afinfo)
1509 struct proc_dir_entry *p;
1514 afinfo->seq_fops->owner = afinfo->owner;
1515 afinfo->seq_fops->open = udp_seq_open;
1516 afinfo->seq_fops->read = seq_read;
1517 afinfo->seq_fops->llseek = seq_lseek;
1518 afinfo->seq_fops->release = seq_release_private;
1520 p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
1528 void udp_proc_unregister(struct udp_seq_afinfo *afinfo)
1532 proc_net_remove(afinfo->name);
1533 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
1536 /* ------------------------------------------------------------------------ */
1537 static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket)
1539 struct inet_sock *inet = inet_sk(sp);
1540 unsigned int dest = inet->daddr;
1541 unsigned int src = inet->rcv_saddr;
1542 __u16 destp = ntohs(inet->dport);
1543 __u16 srcp = ntohs(inet->sport);
1545 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1546 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
1547 bucket, src, srcp, dest, destp, sp->sk_state,
1548 atomic_read(&sp->sk_wmem_alloc),
1549 atomic_read(&sp->sk_rmem_alloc),
1550 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
1551 atomic_read(&sp->sk_refcnt), sp);
1554 static int udp4_seq_show(struct seq_file *seq, void *v)
1556 if (v == SEQ_START_TOKEN)
1557 seq_printf(seq, "%-127s\n",
1558 " sl local_address rem_address st tx_queue "
1559 "rx_queue tr tm->when retrnsmt uid timeout "
1563 struct udp_iter_state *state = seq->private;
1565 udp4_format_sock(v, tmpbuf, state->bucket);
1566 seq_printf(seq, "%-127s\n", tmpbuf);
1571 /* ------------------------------------------------------------------------ */
1572 static struct file_operations udp4_seq_fops;
1573 static struct udp_seq_afinfo udp4_seq_afinfo = {
1574 .owner = THIS_MODULE,
1577 .seq_show = udp4_seq_show,
1578 .seq_fops = &udp4_seq_fops,
1581 int __init udp4_proc_init(void)
1583 return udp_proc_register(&udp4_seq_afinfo);
1586 void udp4_proc_exit(void)
1588 udp_proc_unregister(&udp4_seq_afinfo);
1590 #endif /* CONFIG_PROC_FS */
1592 EXPORT_SYMBOL(udp_disconnect);
1593 EXPORT_SYMBOL(udp_hash);
1594 EXPORT_SYMBOL(udp_hash_lock);
1595 EXPORT_SYMBOL(udp_ioctl);
1596 EXPORT_SYMBOL(udp_port_rover);
1597 EXPORT_SYMBOL(udp_prot);
1598 EXPORT_SYMBOL(udp_sendmsg);
1599 EXPORT_SYMBOL(udp_poll);
1601 #ifdef CONFIG_PROC_FS
1602 EXPORT_SYMBOL(udp_proc_register);
1603 EXPORT_SYMBOL(udp_proc_unregister);